Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Davem says:

1) Fix JIT code generation on x86-64 for divide by zero, from Eric Dumazet.

2) tg3 header length computation correction from Eric Dumazet.

3) More build and reference counting fixes for socket memory cgroup
   code from Glauber Costa.

4) module.h snuck back into a core header after all the hard work we
   did to remove that, from Paul Gortmaker and Jesper Dangaard Brouer.

5) Fix PHY naming regression and add some new PCI IDs in stmmac, from
   Alessandro Rubini.

6) Netlink message generation fix in new team driver, should only advertise
   the entries that changed during events, from Jiri Pirko.

7) SRIOV VF registration and unregistration fixes, and also add a
   missing PCI ID, from Roopa Prabhu.

8) Fix infinite loop in tx queue flush code of brcmsmac, from Stanislaw Gruszka.

9) ftgmac100/ftmac100 build fix, missing interrupt.h include.

10) Memory leak fix in net/hyperv do_set_mutlicast() handling, from Wei Yongjun.

11) Off by one fix in netem packet scheduler, from Vijay Subramanian.

12) TCP loss detection fix from Yuchung Cheng.

13) TCP reset packet MD5 calculation uses wrong address, fix from Shawn Lu.

14) skge carrier assertion and DMA mapping fixes from Stephen Hemminger.

15) Congestion recovery undo performed at the wrong spot in BIC and CUBIC
    congestion control modules, fix from Neal Cardwell.

16) Ethtool ETHTOOL_GSSET_INFO is unnecessarily restrictive, from Michał Mirosław.

17) Fix triggerable race in ipv6 sysctl handling, from Francesco Ruggeri.

18) Statistics bug fixes in mlx4 from Eugenia Emantayev.

19) rds locking bug fix during info dumps, from your's truly.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (67 commits)
  rds: Make rds_sock_lock BH rather than IRQ safe.
  netprio_cgroup.h: dont include module.h from other includes
  net: flow_dissector.c missing include linux/export.h
  team: send only changed options/ports via netlink
  net/hyperv: fix possible memory leak in do_set_multicast()
  drivers/net: dsa/mv88e6xxx.c files need linux/module.h
  stmmac: added PCI identifiers
  llc: Fix race condition in llc_ui_recvmsg
  stmmac: fix phy naming inconsistency
  dsa: Add reporting of silicon revision for Marvell 88E6123/88E6161/88E6165 switches.
  tg3: fix ipv6 header length computation
  skge: add byte queue limit support
  mv643xx_eth: Add Rx Discard and Rx Overrun statistics
  bnx2x: fix compilation error with SOE in fw_dump
  bnx2x: handle CHIP_REVISION during init_one
  bnx2x: allow user to change ring size in ISCSI SD mode
  bnx2x: fix Big-Endianess in ethtool -t
  bnx2x: fixed ethtool statistics for MF modes
  bnx2x: credit-leakage fixup on vlan_mac_del_all
  macvlan: fix a possible use after free
  ...
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index b638e50..2f7fd43 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -50,7 +50,9 @@
 
      <sect1><title>Delaying, scheduling, and timer routines</title>
 !Iinclude/linux/sched.h
-!Ekernel/sched.c
+!Ekernel/sched/core.c
+!Ikernel/sched/cpupri.c
+!Ikernel/sched/fair.c
 !Iinclude/linux/completion.h
 !Ekernel/timer.c
      </sect1>
@@ -216,7 +218,6 @@
 
   <chapter id="uart16x50">
      <title>16x50 UART Driver</title>
-!Iinclude/linux/serial_core.h
 !Edrivers/tty/serial/serial_core.c
 !Edrivers/tty/serial/8250.c
   </chapter>
diff --git a/Documentation/DocBook/deviceiobook.tmpl b/Documentation/DocBook/deviceiobook.tmpl
index c1ed6a4..54199a0 100644
--- a/Documentation/DocBook/deviceiobook.tmpl
+++ b/Documentation/DocBook/deviceiobook.tmpl
@@ -317,7 +317,7 @@
   <chapter id="pubfunctions">
      <title>Public Functions Provided</title>
 !Iarch/x86/include/asm/io.h
-!Elib/iomap.c
+!Elib/pci_iomap.c
   </chapter>
 
 </book>
diff --git a/Documentation/DocBook/media/dvb/dvbproperty.xml b/Documentation/DocBook/media/dvb/dvbproperty.xml
index ffee1fb..c7a4ca5 100644
--- a/Documentation/DocBook/media/dvb/dvbproperty.xml
+++ b/Documentation/DocBook/media/dvb/dvbproperty.xml
@@ -163,14 +163,16 @@
 	<section id="DTV-FREQUENCY">
 		<title><constant>DTV_FREQUENCY</constant></title>
 
-		<para>Central frequency of the channel, in HZ.</para>
+		<para>Central frequency of the channel.</para>
 
 		<para>Notes:</para>
-		<para>1)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
+		<para>1)For satellital delivery systems, it is measured in kHz.
+			For the other ones, it is measured in Hz.</para>
+		<para>2)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
 			E.g. a valid frequncy could be 474143 kHz. The stepping is bound to the bandwidth of
 			the channel which is 6MHz.</para>
 
-		<para>2)As in ISDB-Tsb the channel consists of only one or three segments the
+		<para>3)As in ISDB-Tsb the channel consists of only one or three segments the
 			frequency step is 429kHz, 3*429 respectively. As for ISDB-T the
 			central frequency of the channel is expected.</para>
 	</section>
@@ -735,14 +737,10 @@
 			<listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
 			<listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
 			<listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
-			<listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
 			<listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
 			<listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
-			<listitem><para><link linkend="DTV-CODE-RATE-HP"><constant>DTV_CODE_RATE_HP</constant></link></para></listitem>
-			<listitem><para><link linkend="DTV-CODE-RATE-LP"><constant>DTV_CODE_RATE_LP</constant></link></para></listitem>
 			<listitem><para><link linkend="DTV-GUARD-INTERVAL"><constant>DTV_GUARD_INTERVAL</constant></link></para></listitem>
 			<listitem><para><link linkend="DTV-TRANSMISSION-MODE"><constant>DTV_TRANSMISSION_MODE</constant></link></para></listitem>
-			<listitem><para><link linkend="DTV-HIERARCHY"><constant>DTV_HIERARCHY</constant></link></para></listitem>
 			<listitem><para><link linkend="DTV-ISDBT-LAYER-ENABLED"><constant>DTV_ISDBT_LAYER_ENABLED</constant></link></para></listitem>
 			<listitem><para><link linkend="DTV-ISDBT-PARTIAL-RECEPTION"><constant>DTV_ISDBT_PARTIAL_RECEPTION</constant></link></para></listitem>
 			<listitem><para><link linkend="DTV-ISDBT-SOUND-BROADCASTING"><constant>DTV_ISDBT_SOUND_BROADCASTING</constant></link></para></listitem>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
index 6f1f9a6..b17a7aa 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
@@ -183,7 +183,12 @@
 	    <entry>__u32</entry>
 	    <entry><structfield>ctrl_class</structfield></entry>
 	    <entry>The control class to which all controls belong, see
-<xref linkend="ctrl-class" />.</entry>
+<xref linkend="ctrl-class" />. Drivers that use a kernel framework for handling
+controls will also accept a value of 0 here, meaning that the controls can
+belong to any control class. Whether drivers support this can be tested by setting
+<structfield>ctrl_class</structfield> to 0 and calling <constant>VIDIOC_TRY_EXT_CTRLS</constant>
+with a <structfield>count</structfield> of 0. If that succeeds, then the driver
+supports this feature.</entry>
 	  </row>
 	  <row>
 	    <entry>__u32</entry>
@@ -194,10 +199,13 @@
 	  <row>
 	    <entry>__u32</entry>
 	    <entry><structfield>error_idx</structfield></entry>
-	    <entry>Set by the driver in case of an error. It is the
-index of the control causing the error or equal to 'count' when the
-error is not associated with a particular control. Undefined when the
-ioctl returns 0 (success).</entry>
+	    <entry>Set by the driver in case of an error. If it is equal
+to <structfield>count</structfield>, then no actual changes were made to
+controls. In other words, the error was not associated with setting a particular
+control. If it is another value, then only the controls up to <structfield>error_idx-1</structfield>
+were modified and control <structfield>error_idx</structfield> is the one that
+caused the error. The <structfield>error_idx</structfield> value is undefined
+if the ioctl returned 0 (success).</entry>
 	  </row>
 	  <row>
 	    <entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml b/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
index 93817f3..7c63815 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
@@ -364,15 +364,20 @@
 	  <row>
 	    <entry><constant>V4L2_FBUF_FLAG_OVERLAY</constant></entry>
 	    <entry>0x0002</entry>
-	    <entry>The frame buffer is an overlay surface the same
-size as the capture. [?]</entry>
-	  </row>
-	  <row>
-	    <entry spanname="hspan">The purpose of
-<constant>V4L2_FBUF_FLAG_OVERLAY</constant> was never quite clear.
-Most drivers seem to ignore this flag. For compatibility with the
-<wordasword>bttv</wordasword> driver applications should set the
-<constant>V4L2_FBUF_FLAG_OVERLAY</constant> flag.</entry>
+	    <entry>If this flag is set for a video capture device, then the
+driver will set the initial overlay size to cover the full framebuffer size,
+otherwise the existing overlay size (as set by &VIDIOC-S-FMT;) will be used.
+
+Only one video capture driver (bttv) supports this flag. The use of this flag
+for capture devices is deprecated. There is no way to detect which drivers
+support this flag, so the only reliable method of setting the overlay size is
+through &VIDIOC-S-FMT;.
+
+If this flag is set for a video output device, then the video output overlay
+window is relative to the top-left corner of the framebuffer and restricted
+to the size of the framebuffer. If it is cleared, then the video output
+overlay window is relative to the video output display.
+            </entry>
 	  </row>
 	  <row>
 	    <entry><constant>V4L2_FBUF_FLAG_CHROMAKEY</constant></entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml b/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
index 1643181..66e9a52 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
@@ -98,8 +98,11 @@
 	    <entry>&v4l2-tuner-type;</entry>
 	    <entry><structfield>type</structfield></entry>
 	    <entry>The tuner type. This is the same value as in the
-&v4l2-tuner; <structfield>type</structfield> field. The field is not
-applicable to modulators, &ie; ignored by drivers.</entry>
+&v4l2-tuner; <structfield>type</structfield> field. The type must be set
+to <constant>V4L2_TUNER_RADIO</constant> for <filename>/dev/radioX</filename>
+device nodes, and to <constant>V4L2_TUNER_ANALOG_TV</constant>
+for all others. The field is not applicable to modulators, &ie; ignored
+by drivers.</entry>
 	  </row>
 	  <row>
 	    <entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-input.xml b/Documentation/DocBook/media/v4l/vidioc-g-input.xml
index 08ae82f..1d43065 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-input.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-input.xml
@@ -61,8 +61,8 @@
 <constant>VIDIOC_S_INPUT</constant> ioctl with a pointer to this
 integer. Side effects are possible. For example inputs may support
 different video standards, so the driver may implicitly switch the
-current standard. It is good practice to select an input before
-querying or negotiating any other parameters.</para>
+current standard. Because of these possible side effects applications
+must select an input before querying or negotiating any other parameters.</para>
 
     <para>Information about video inputs is available using the
 &VIDIOC-ENUMINPUT; ioctl.</para>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-output.xml b/Documentation/DocBook/media/v4l/vidioc-g-output.xml
index fd45f1c..4533068 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-output.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-output.xml
@@ -61,8 +61,9 @@
 <constant>VIDIOC_S_OUTPUT</constant> ioctl with a pointer to this integer.
 Side effects are possible. For example outputs may support different
 video standards, so the driver may implicitly switch the current
-standard. It is good practice to select an output before querying or
-negotiating any other parameters.</para>
+standard.
+standard. Because of these possible side effects applications
+must select an output before querying or negotiating any other parameters.</para>
 
     <para>Information about video outputs is available using the
 &VIDIOC-ENUMOUTPUT; ioctl.</para>
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
index 5cc699b..e7cc363 100644
--- a/Documentation/acpi/apei/einj.txt
+++ b/Documentation/acpi/apei/einj.txt
@@ -47,20 +47,53 @@
 
 - param1
   This file is used to set the first error parameter value. Effect of
-  parameter depends on error_type specified. For memory error, this is
-  physical memory address.  Only available if param_extension module
-  parameter is specified.
+  parameter depends on error_type specified.
 
 - param2
   This file is used to set the second error parameter value. Effect of
-  parameter depends on error_type specified. For memory error, this is
-  physical memory address mask.  Only available if param_extension
-  module parameter is specified.
+  parameter depends on error_type specified.
 
-Injecting parameter support is a BIOS version specific extension, that
-is, it only works on some BIOS version.  If you want to use it, please
-make sure your BIOS version has the proper support and specify
-"param_extension=y" in module parameter.
+BIOS versions based in the ACPI 4.0 specification have limited options
+to control where the errors are injected.  Your BIOS may support an
+extension (enabled with the param_extension=1 module parameter, or
+boot command line einj.param_extension=1). This allows the address
+and mask for memory injections to be specified by the param1 and
+param2 files in apei/einj.
+
+BIOS versions using the ACPI 5.0 specification have more control over
+the target of the injection. For processor related errors (type 0x1,
+0x2 and 0x4) the APICID of the target should be provided using the
+param1 file in apei/einj. For memory errors (type 0x8, 0x10 and 0x20)
+the address is set using param1 with a mask in param2 (0x0 is equivalent
+to all ones). For PCI express errors (type 0x40, 0x80 and 0x100) the
+segment, bus, device and function are specified using param1:
+
+         31     24 23    16 15    11 10      8  7        0
+	+-------------------------------------------------+
+	| segment |   bus  | device | function | reserved |
+	+-------------------------------------------------+
+
+An ACPI 5.0 BIOS may also allow vendor specific errors to be injected.
+In this case a file named vendor will contain identifying information
+from the BIOS that hopefully will allow an application wishing to use
+the vendor specific extension to tell that they are running on a BIOS
+that supports it. All vendor extensions have the 0x80000000 bit set in
+error_type. A file vendor_flags controls the interpretation of param1
+and param2 (1 = PROCESSOR, 2 = MEMORY, 4 = PCI). See your BIOS vendor
+documentation for details (and expect changes to this API if vendors
+creativity in using this feature expands beyond our expectations).
+
+Example:
+# cd /sys/kernel/debug/apei/einj
+# cat available_error_type		# See which errors can be injected
+0x00000002	Processor Uncorrectable non-fatal
+0x00000008	Memory Correctable
+0x00000010	Memory Uncorrectable non-fatal
+# echo 0x12345000 > param1		# Set memory address for injection
+# echo 0xfffffffffffff000 > param2	# Mask - anywhere in this page
+# echo 0x8 > error_type			# Choose correctable memory error
+# echo 1 > error_inject			# Inject now
+
 
 For more information about EINJ, please refer to ACPI specification
-version 4.0, section 17.5.
+version 4.0, section 17.5 and ACPI 5.0, section 18.6.
diff --git a/Documentation/devicetree/bindings/i2c/omap-i2c.txt b/Documentation/devicetree/bindings/i2c/omap-i2c.txt
new file mode 100644
index 0000000..56564aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/omap-i2c.txt
@@ -0,0 +1,30 @@
+I2C for OMAP platforms
+
+Required properties :
+- compatible : Must be "ti,omap3-i2c" or "ti,omap4-i2c"
+- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Recommended properties :
+- clock-frequency : Desired I2C bus clock frequency in Hz. Otherwise
+  the default 100 kHz frequency will be used.
+
+Optional properties:
+- Child nodes conforming to i2c bus binding
+
+Note: Current implementation will fetch base address, irq and dma
+from omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples :
+
+i2c1: i2c@0 {
+    compatible = "ti,omap3-i2c";
+    #address-cells = <1>;
+    #size-cells = <0>;
+    ti,hwmods = "i2c1";
+    clock-frequency = <400000>;
+};
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index d725c0d..1bea46a 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -439,17 +439,6 @@
 
 ----------------------------
 
-What:	For VIDIOC_S_FREQUENCY the type field must match the device node's type.
-	If not, return -EINVAL.
-When:	3.2
-Why:	It makes no sense to switch the tuner to radio mode by calling
-	VIDIOC_S_FREQUENCY on a video node, or to switch the tuner to tv mode by
-	calling VIDIOC_S_FREQUENCY on a radio node. This is the first step of a
-	move to more consistent handling of tv and radio tuners.
-Who:	Hans Verkuil <hans.verkuil@cisco.com>
-
-----------------------------
-
 What:	Opening a radio device node will no longer automatically switch the
 	tuner mode from tv to radio.
 When:	3.3
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 54078ed..4840334 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -149,6 +149,7 @@
 'M'	01-03	drivers/scsi/megaraid/megaraid_sas.h
 'M'	00-0F	drivers/video/fsl-diu-fb.h	conflict!
 'N'	00-1F	drivers/usb/scanner.h
+'N'	40-7F	drivers/block/nvme.c
 'O'     00-06   mtd/ubi-user.h		UBI
 'P'	all	linux/soundcard.h	conflict!
 'P'	60-6F	sound/sscape_ioctl.h	conflict!
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b29f3c4..033d4e6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1059,6 +1059,11 @@
 			By default, super page will be supported if Intel IOMMU
 			has the capability. With this option, super page will
 			not be supported.
+
+	intel_idle.max_cstate=	[KNL,HW,ACPI,X86]
+			0	disables intel_idle and fall back on acpi_idle.
+			1 to 6	specify maximum depth of C-state.
+
 	intremap=	[X86-64, Intel-IOMMU]
 			on	enable Interrupt Remapping (default)
 			off	disable Interrupt Remapping
diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt
index 40a4c65..262acf5 100644
--- a/Documentation/power/basic-pm-debugging.txt
+++ b/Documentation/power/basic-pm-debugging.txt
@@ -15,7 +15,7 @@
 because some problems only show up on a second attempt at suspending and
 resuming the system.]  Moreover, hibernating in the "reboot" and "shutdown"
 modes causes the PM core to skip some platform-related callbacks which on ACPI
-systems might be necessary to make hibernation work.  Thus, if you machine fails
+systems might be necessary to make hibernation work.  Thus, if your machine fails
 to hibernate or resume in the "reboot" mode, you should try the "platform" mode:
 
 # echo platform > /sys/power/disk
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 6ccb68f..ebd7490 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -120,10 +120,10 @@
 freezing user threads I don't find really objectionable."
 
 Still, there are kernel threads that may want to be freezable.  For example, if
-a kernel that belongs to a device driver accesses the device directly, it in
-principle needs to know when the device is suspended, so that it doesn't try to
-access it at that time.  However, if the kernel thread is freezable, it will be
-frozen before the driver's .suspend() callback is executed and it will be
+a kernel thread that belongs to a device driver accesses the device directly, it
+in principle needs to know when the device is suspended, so that it doesn't try
+to access it at that time.  However, if the kernel thread is freezable, it will
+be frozen before the driver's .suspend() callback is executed and it will be
 thawed after the driver's .resume() callback has run, so it won't be accessing
 the device while it's suspended.
 
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 64adb98..57566ba 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,13 @@
+Release Date    : Fri. Jan 6, 2012 17:00:00 PST 2010 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+Current Version : 00.00.06.14-rc1
+Old Version     : 00.00.06.12-rc1
+    1. Fix reglockFlags for degraded raid5/6 for MR 9360/9380.
+    2. Mask off flags in ioctl path to prevent memory scribble with older
+       MegaCLI versions.
+    3. Remove poll_mode_io module paramater, sysfs node, and associated code.
+-------------------------------------------------------------------------------
 Release Date    : Wed. Oct 5, 2011 17:00:00 PST 2010 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford
diff --git a/Documentation/scsi/LICENSE.qla4xxx b/Documentation/scsi/LICENSE.qla4xxx
index 494980e..ab89959 100644
--- a/Documentation/scsi/LICENSE.qla4xxx
+++ b/Documentation/scsi/LICENSE.qla4xxx
@@ -1,32 +1,11 @@
 Copyright (c) 2003-2011 QLogic Corporation
-QLogic Linux iSCSI HBA Driver
+QLogic Linux iSCSI Driver
 
 This program includes a device driver for Linux 3.x.
 You may modify and redistribute the device driver code under the
 GNU General Public License (a copy of which is attached hereto as
 Exhibit A) published by the Free Software Foundation (version 2).
 
-REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
-THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
-CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
-OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
-TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
-ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
-COMBINATION WITH THIS PROGRAM.
-
 
 EXHIBIT A
 
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 7ef9b84..6e21b8b 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -230,14 +230,9 @@
 	buf += "#include <linux/ctype.h>\n"
 	buf += "#include <asm/unaligned.h>\n\n"
 	buf += "#include <target/target_core_base.h>\n"
-	buf += "#include <target/target_core_transport.h>\n"
-	buf += "#include <target/target_core_fabric_ops.h>\n"
+	buf += "#include <target/target_core_fabric.h>\n"
 	buf += "#include <target/target_core_fabric_configfs.h>\n"
-	buf += "#include <target/target_core_fabric_lib.h>\n"
-	buf += "#include <target/target_core_device.h>\n"
-	buf += "#include <target/target_core_tpg.h>\n"
 	buf += "#include <target/target_core_configfs.h>\n"
-	buf += "#include <target/target_core_base.h>\n"
 	buf += "#include <target/configfs_macros.h>\n\n"
 	buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
 	buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -260,7 +255,7 @@
 	buf += "	/* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
 	buf += "		return ERR_PTR(-EINVAL); */\n"
 	buf += "	se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
-	buf += "	if (!(se_nacl_new))\n"
+	buf += "	if (!se_nacl_new)\n"
 	buf += "		return ERR_PTR(-ENOMEM);\n"
 	buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
 	buf += "	nexus_depth = 1;\n"
@@ -308,7 +303,7 @@
 	buf += "	if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
 	buf += "		return ERR_PTR(-EINVAL);\n\n"
 	buf += "	tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
-	buf += "	if (!(tpg)) {\n"
+	buf += "	if (!tpg) {\n"
 	buf += "		printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
 	buf += "		return ERR_PTR(-ENOMEM);\n"
 	buf += "	}\n"
@@ -344,7 +339,7 @@
 	buf += "	/* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
 	buf += "		return ERR_PTR(-EINVAL); */\n\n"
 	buf += "	" + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
-	buf += "	if (!(" + fabric_mod_port + ")) {\n"
+	buf += "	if (!" + fabric_mod_port + ") {\n"
 	buf += "		printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
 	buf += "		return ERR_PTR(-ENOMEM);\n"
 	buf += "	}\n"
@@ -352,7 +347,7 @@
 	if proto_ident == "FC" or proto_ident == "SAS":
 		buf += "	" + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
 
-	buf += "	/* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
+	buf += "	/* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
 	buf += "	return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
 	buf += "}\n\n"
 	buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
@@ -391,8 +386,7 @@
 	buf += "	.tpg_alloc_fabric_acl		= " + fabric_mod_name + "_alloc_fabric_acl,\n"
 	buf += "	.tpg_release_fabric_acl		= " + fabric_mod_name + "_release_fabric_acl,\n"
 	buf += "	.tpg_get_inst_index		= " + fabric_mod_name + "_tpg_get_inst_index,\n"
-	buf += "	.release_cmd_to_pool		= " + fabric_mod_name + "_release_cmd,\n"
-	buf += "	.release_cmd_direct		= " + fabric_mod_name + "_release_cmd,\n"
+	buf += "	.release_cmd			= " + fabric_mod_name + "_release_cmd,\n"
 	buf += "	.shutdown_session		= " + fabric_mod_name + "_shutdown_session,\n"
 	buf += "	.close_session			= " + fabric_mod_name + "_close_session,\n"
 	buf += "	.stop_session			= " + fabric_mod_name + "_stop_session,\n"
@@ -405,14 +399,12 @@
 	buf += "	.set_default_node_attributes	= " + fabric_mod_name + "_set_default_node_attrs,\n"
 	buf += "	.get_task_tag			= " + fabric_mod_name + "_get_task_tag,\n"
 	buf += "	.get_cmd_state			= " + fabric_mod_name + "_get_cmd_state,\n"
-	buf += "	.new_cmd_failure		= " + fabric_mod_name + "_new_cmd_failure,\n"
 	buf += "	.queue_data_in			= " + fabric_mod_name + "_queue_data_in,\n"
 	buf += "	.queue_status			= " + fabric_mod_name + "_queue_status,\n"
 	buf += "	.queue_tm_rsp			= " + fabric_mod_name + "_queue_tm_rsp,\n"
 	buf += "	.get_fabric_sense_len		= " + fabric_mod_name + "_get_fabric_sense_len,\n"
 	buf += "	.set_fabric_sense_len		= " + fabric_mod_name + "_set_fabric_sense_len,\n"
 	buf += "	.is_state_remove		= " + fabric_mod_name + "_is_state_remove,\n"
-	buf += "	.pack_lun			= " + fabric_mod_name + "_pack_lun,\n"
 	buf += "	/*\n"
 	buf += "	 * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
 	buf += "	 */\n"
@@ -439,9 +431,9 @@
 	buf += "	 * Register the top level struct config_item_type with TCM core\n"
 	buf += "	 */\n"
 	buf += "	fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
-	buf += "	if (!(fabric)) {\n"
+	buf += "	if (IS_ERR(fabric)) {\n"
 	buf += "		printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
-	buf += "		return -ENOMEM;\n"
+	buf += "		return PTR_ERR(fabric);\n"
 	buf += "	}\n"
 	buf += "	/*\n"
 	buf += "	 * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
@@ -475,9 +467,9 @@
 	buf += "	printk(KERN_INFO \"" +  fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
 	buf += "	return 0;\n"
 	buf += "};\n\n"
-	buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
+	buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
 	buf += "{\n"
-	buf += "	if (!(" + fabric_mod_name + "_fabric_configfs))\n"
+	buf += "	if (!" + fabric_mod_name + "_fabric_configfs)\n"
 	buf += "		return;\n\n"
 	buf += "	target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
 	buf += "	" + fabric_mod_name + "_fabric_configfs = NULL;\n"
@@ -492,17 +484,15 @@
 	buf += "		return ret;\n\n"
 	buf += "	return 0;\n"
 	buf += "};\n\n"
-	buf += "static void " + fabric_mod_name + "_exit(void)\n"
+	buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
 	buf += "{\n"
 	buf += "	" + fabric_mod_name + "_deregister_configfs();\n"
 	buf += "};\n\n"
 
-	buf += "#ifdef MODULE\n"
 	buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
 	buf += "MODULE_LICENSE(\"GPL\");\n"
 	buf += "module_init(" + fabric_mod_name + "_init);\n"
 	buf += "module_exit(" + fabric_mod_name + "_exit);\n"
-	buf += "#endif\n"
 
 	ret = p.write(buf)
 	if ret:
@@ -514,7 +504,7 @@
 
 def tcm_mod_scan_fabric_ops(tcm_dir):
 
-	fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
+	fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
 
 	print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
 	process_fo = 0;
@@ -579,11 +569,7 @@
 	buf += "#include <scsi/scsi_cmnd.h>\n"
 	buf += "#include <scsi/libfc.h>\n\n"
 	buf += "#include <target/target_core_base.h>\n"
-	buf += "#include <target/target_core_transport.h>\n"
-	buf += "#include <target/target_core_fabric_ops.h>\n"
-	buf += "#include <target/target_core_fabric_lib.h>\n"
-	buf += "#include <target/target_core_device.h>\n"
-	buf += "#include <target/target_core_tpg.h>\n"
+	buf += "#include <target/target_core_fabric.h>\n"
 	buf += "#include <target/target_core_configfs.h>\n\n"
 	buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
 	buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -788,7 +774,7 @@
 			buf += "{\n"
 			buf += "	struct " + fabric_mod_name + "_nacl *nacl;\n\n"
 			buf += "	nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
-			buf += "	if (!(nacl)) {\n"
+			buf += "	if (!nacl) {\n"
 			buf += "		printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
 			buf += "		return NULL;\n"
 			buf += "	}\n\n"
@@ -815,7 +801,7 @@
 			buf += "}\n\n"
 			bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
 
-		if re.search('release_cmd_to_pool', fo):
+		if re.search('\*release_cmd\)\(', fo):
 			buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
 			buf += "{\n"
 			buf += "	return;\n"
@@ -899,13 +885,6 @@
 			buf += "}\n\n"
 			bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
 
-		if re.search('new_cmd_failure\)\(', fo):
-			buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
-			buf += "{\n"
-			buf += "	return;\n"
-			buf += "}\n\n"
-			bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
-
 		if re.search('queue_data_in\)\(', fo):
 			buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
 			buf += "{\n"
@@ -948,15 +927,6 @@
 			buf += "}\n\n"
 			bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
 
-		if re.search('pack_lun\)\(', fo):
-			buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
-			buf += "{\n"
-			buf += "	WARN_ON(lun >= 256);\n"
-			buf += "	/* Caller wants this byte-swapped */\n"
-			buf += "	return cpu_to_le64((lun & 0xff) << 8);\n"
-			buf += "}\n\n"
-			bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
-
 
 	ret = p.write(buf)
 	if ret:
diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt
index 26aa057..e2492a9 100644
--- a/Documentation/video4linux/v4l2-controls.txt
+++ b/Documentation/video4linux/v4l2-controls.txt
@@ -666,27 +666,6 @@
 class is added.
 
 
-Differences from the Spec
-=========================
-
-There are a few places where the framework acts slightly differently from the
-V4L2 Specification. Those differences are described in this section. We will
-have to see whether we need to adjust the spec or not.
-
-1) It is no longer required to have all controls contained in a
-v4l2_ext_control array be from the same control class. The framework will be
-able to handle any type of control in the array. You need to set ctrl_class
-to 0 in order to enable this. If ctrl_class is non-zero, then it will still
-check that all controls belong to that control class.
-
-If you set ctrl_class to 0 and count to 0, then it will only return an error
-if there are no controls at all.
-
-2) Clarified the way error_idx works. For get and set it will be equal to
-count if nothing was done yet. If it is less than count then only the controls
-up to error_idx-1 were successfully applied.
-
-
 Proposals for Extensions
 ========================
 
diff --git a/MAINTAINERS b/MAINTAINERS
index fa3f5e6..93c68d5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2246,6 +2246,17 @@
 S:	Supported
 F:	fs/dlm/
 
+DMA BUFFER SHARING FRAMEWORK
+M:	Sumit Semwal <sumit.semwal@linaro.org>
+S:	Maintained
+L:	linux-media@vger.kernel.org
+L:	dri-devel@lists.freedesktop.org
+L:	linaro-mm-sig@lists.linaro.org
+F:	drivers/base/dma-buf*
+F:	include/linux/dma-buf*
+F:	Documentation/dma-buf-sharing.txt
+T:	git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:	Vinod Koul <vinod.koul@intel.com>
 M:	Dan Williams <dan.j.williams@intel.com>
@@ -7200,7 +7211,7 @@
 F:	drivers/net/vmxnet3/
 
 VMware PVSCSI driver
-M:	Alok Kataria <akataria@vmware.com>
+M:	Arvind Kumar <arvindkumar@vmware.com>
 M:	VMware PV-Drivers <pv-drivers@vmware.com>
 L:	linux-scsi@vger.kernel.org
 S:	Maintained
diff --git a/Makefile b/Makefile
index 156ac69..71e6ed2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 2
+PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 40319d9..1683bfb 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -160,7 +160,6 @@
 machine-$(CONFIG_ARCH_MV78XX0)		:= mv78xx0
 machine-$(CONFIG_ARCH_IMX_V4_V5)	:= imx
 machine-$(CONFIG_ARCH_IMX_V6_V7)	:= imx
-machine-$(CONFIG_ARCH_MX5)		:= mx5
 machine-$(CONFIG_ARCH_MXS)		:= mxs
 machine-$(CONFIG_ARCH_NETX)		:= netx
 machine-$(CONFIG_ARCH_NOMADIK)		:= nomadik
diff --git a/arch/arm/configs/mx5_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
similarity index 80%
rename from arch/arm/configs/mx5_defconfig
rename to arch/arm/configs/imx_v6_v7_defconfig
index d0d8dfe..3a4fb2e 100644
--- a/arch/arm/configs/mx5_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -3,6 +3,7 @@
 CONFIG_KERNEL_LZO=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=18
+CONFIG_CGROUPS=y
 CONFIG_RELAY=y
 CONFIG_EXPERT=y
 # CONFIG_SLUB_DEBUG is not set
@@ -14,20 +15,31 @@
 # CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_MXC=y
-CONFIG_ARCH_MX5=y
-CONFIG_MACH_MX51_BABBAGE=y
+CONFIG_MACH_MX31LILLY=y
+CONFIG_MACH_MX31LITE=y
+CONFIG_MACH_PCM037=y
+CONFIG_MACH_PCM037_EET=y
+CONFIG_MACH_MX31_3DS=y
+CONFIG_MACH_MX31MOBOARD=y
+CONFIG_MACH_QONG=y
+CONFIG_MACH_ARMADILLO5X0=y
+CONFIG_MACH_KZM_ARM11_01=y
+CONFIG_MACH_PCM043=y
+CONFIG_MACH_MX35_3DS=y
+CONFIG_MACH_EUKREA_CPUIMX35=y
+CONFIG_MACH_VPR200=y
+CONFIG_MACH_IMX51_DT=y
 CONFIG_MACH_MX51_3DS=y
 CONFIG_MACH_EUKREA_CPUIMX51=y
 CONFIG_MACH_EUKREA_CPUIMX51SD=y
 CONFIG_MACH_MX51_EFIKAMX=y
 CONFIG_MACH_MX51_EFIKASB=y
-CONFIG_MACH_MX53_EVK=y
-CONFIG_MACH_MX53_SMD=y
-CONFIG_MACH_MX53_LOCO=y
-CONFIG_MACH_MX53_ARD=y
+CONFIG_MACH_IMX53_DT=y
+CONFIG_SOC_IMX6Q=y
 CONFIG_MXC_PWM=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
 CONFIG_VMSPLIT_2G=y
 CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_AEABI=y
@@ -49,7 +61,7 @@
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=y
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -68,24 +80,20 @@
 CONFIG_ATA=y
 CONFIG_PATA_IMX=y
 CONFIG_NETDEVICES=y
-CONFIG_MII=m
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
-CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_SMSC_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_ICPLUS_PHY=y
-CONFIG_REALTEK_PHY=y
-CONFIG_NATIONAL_PHY=y
-CONFIG_STE10XP=y
-CONFIG_LSI_ET1011C_PHY=y
-CONFIG_MICREL_PHY=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+CONFIG_FEC=y
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMC91X=y
+CONFIG_SMC911X=y
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_WLAN is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
@@ -124,7 +132,6 @@
 CONFIG_USB_EHCI_MXC=y
 CONFIG_USB_STORAGE=y
 CONFIG_MMC=y
-CONFIG_MMC_BLOCK=m
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_ESDHC_IMX=y
@@ -133,6 +140,8 @@
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_INTF_DEV_UIE_EMUL=y
 CONFIG_RTC_MXC=y
+CONFIG_DMADEVICES=y
+CONFIG_IMX_SDMA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/mx3_defconfig b/arch/arm/configs/mx3_defconfig
deleted file mode 100644
index cb0717f..0000000
--- a/arch/arm/configs/mx3_defconfig
+++ /dev/null
@@ -1,144 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_MXC=y
-CONFIG_MACH_MX31ADS_WM1133_EV1=y
-CONFIG_MACH_MX31LILLY=y
-CONFIG_MACH_MX31LITE=y
-CONFIG_MACH_PCM037=y
-CONFIG_MACH_PCM037_EET=y
-CONFIG_MACH_MX31_3DS=y
-CONFIG_MACH_MX31MOBOARD=y
-CONFIG_MACH_QONG=y
-CONFIG_MACH_ARMADILLO5X0=y
-CONFIG_MACH_KZM_ARM11_01=y
-CONFIG_MACH_PCM043=y
-CONFIG_MACH_MX35_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX35=y
-CONFIG_MXC_IRQ_PRIOR=y
-CONFIG_MXC_PWM=y
-CONFIG_ARM_ERRATA_411920=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="noinitrd console=ttymxc0,115200 root=/dev/mtdblock2 rw ip=off"
-CONFIG_VFP=y
-CONFIG_PM_DEBUG=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_FW_LOADER=m
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_MXC=y
-CONFIG_MTD_UBI=y
-# CONFIG_BLK_DEV is not set
-CONFIG_MISC_DEVICES=y
-CONFIG_EEPROM_AT24=y
-CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMSC911X=y
-CONFIG_DNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_IMX=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_8250=m
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_IMX=y
-CONFIG_SERIAL_IMX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_IMX=y
-CONFIG_SPI=y
-CONFIG_W1=y
-CONFIG_W1_MASTER_MXC=y
-CONFIG_W1_SLAVE_THERM=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_IMX2_WDT=y
-CONFIG_MFD_WM8350_I2C=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_WM8350=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_VIDEO_DEV=y
-# CONFIG_RC_CORE is not set
-# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
-CONFIG_SOC_CAMERA=y
-CONFIG_SOC_CAMERA_MT9M001=y
-CONFIG_SOC_CAMERA_MT9M111=y
-CONFIG_SOC_CAMERA_MT9T031=y
-CONFIG_SOC_CAMERA_MT9V022=y
-CONFIG_SOC_CAMERA_TW9910=y
-CONFIG_SOC_CAMERA_OV772X=y
-CONFIG_VIDEO_MX3=y
-# CONFIG_RADIO_ADAPTERS is not set
-CONFIG_FB=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_SPI is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_IMX_SOC=y
-CONFIG_SND_MXC_SOC_WM1133_EV1=y
-CONFIG_SND_SOC_PHYCORE_AC97=y
-CONFIG_SND_SOC_EUKREA_TLV320=y
-CONFIG_USB=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MXC=y
-CONFIG_USB_GADGET=m
-CONFIG_USB_FSL_USB2=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_ULPI=y
-CONFIG_MMC=y
-CONFIG_MMC_MXC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_MXC=y
-CONFIG_DMADEVICES=y
-# CONFIG_DNOTIFY is not set
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_UBIFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 0e6de36..09f357b 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -22,6 +22,18 @@
 config MACH_MX27
 	bool
 
+config ARCH_MX5
+	bool
+
+config ARCH_MX50
+	bool
+
+config ARCH_MX51
+	bool
+
+config ARCH_MX53
+	bool
+
 config SOC_IMX1
 	bool
 	select ARCH_MX1
@@ -73,6 +85,32 @@
 	select MXC_AVIC
 	select SMP_ON_UP if SMP
 
+config SOC_IMX5
+	select CPU_V7
+	select ARM_L1_CACHE_SHIFT_6
+	select MXC_TZIC
+	select ARCH_MXC_IOMUX_V3
+	select ARCH_MXC_AUDMUX_V2
+	select ARCH_HAS_CPUFREQ
+	select ARCH_MX5
+	bool
+
+config SOC_IMX50
+	bool
+	select SOC_IMX5
+	select ARCH_MX50
+
+config	SOC_IMX51
+	bool
+	select SOC_IMX5
+	select ARCH_MX5
+	select ARCH_MX51
+
+config	SOC_IMX53
+	bool
+	select SOC_IMX5
+	select ARCH_MX5
+	select ARCH_MX53
 
 if ARCH_IMX_V4_V5
 
@@ -592,6 +630,207 @@
 	  Include support for VPR200 platform. This includes specific
 	  configurations for the board and its peripherals.
 
+comment "i.MX5 platforms:"
+
+config MACH_MX50_RDP
+	bool "Support MX50 reference design platform"
+	depends on BROKEN
+	select SOC_IMX50
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select IMX_HAVE_PLATFORM_SPI_IMX
+	help
+	  Include support for MX50 reference design platform (RDP) board. This
+	  includes specific configurations for the board and its peripherals.
+
+comment "i.MX51 machines:"
+
+config MACH_IMX51_DT
+	bool "Support i.MX51 platforms from device tree"
+	select SOC_IMX51
+	select USE_OF
+	select MACH_MX51_BABBAGE
+	help
+	  Include support for Freescale i.MX51 based platforms
+	  using the device tree for discovery
+
+config MACH_MX51_BABBAGE
+	bool "Support MX51 BABBAGE platforms"
+	select SOC_IMX51
+	select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+	select IMX_HAVE_PLATFORM_IMX2_WDT
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_MXC_EHCI
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select IMX_HAVE_PLATFORM_SPI_IMX
+	help
+	  Include support for MX51 Babbage platform, also known as MX51EVK in
+	  u-boot. This includes specific configurations for the board and its
+	  peripherals.
+
+config MACH_MX51_3DS
+	bool "Support MX51PDK (3DS)"
+	select SOC_IMX51
+	select IMX_HAVE_PLATFORM_IMX2_WDT
+	select IMX_HAVE_PLATFORM_IMX_KEYPAD
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select IMX_HAVE_PLATFORM_SPI_IMX
+	select MXC_DEBUG_BOARD
+	help
+	  Include support for MX51PDK (3DS) platform. This includes specific
+	  configurations for the board and its peripherals.
+
+config MACH_EUKREA_CPUIMX51
+	bool "Support Eukrea CPUIMX51 module"
+	select SOC_IMX51
+	select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_MXC_EHCI
+	select IMX_HAVE_PLATFORM_MXC_NAND
+	select IMX_HAVE_PLATFORM_SPI_IMX
+	help
+	  Include support for Eukrea CPUIMX51 platform. This includes
+	  specific configurations for the module and its peripherals.
+
+choice
+	prompt "Baseboard"
+	depends on MACH_EUKREA_CPUIMX51
+	default MACH_EUKREA_MBIMX51_BASEBOARD
+
+config MACH_EUKREA_MBIMX51_BASEBOARD
+	prompt "Eukrea MBIMX51 development board"
+	bool
+	select IMX_HAVE_PLATFORM_IMX_KEYPAD
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select LEDS_GPIO_REGISTER
+	help
+	  This adds board specific devices that can be found on Eukrea's
+	  MBIMX51 evaluation board.
+
+endchoice
+
+config MACH_EUKREA_CPUIMX51SD
+	bool "Support Eukrea CPUIMX51SD module"
+	select SOC_IMX51
+	select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_MXC_EHCI
+	select IMX_HAVE_PLATFORM_MXC_NAND
+	select IMX_HAVE_PLATFORM_SPI_IMX
+	help
+	  Include support for Eukrea CPUIMX51SD platform. This includes
+	  specific configurations for the module and its peripherals.
+
+choice
+	prompt "Baseboard"
+	depends on MACH_EUKREA_CPUIMX51SD
+	default MACH_EUKREA_MBIMXSD51_BASEBOARD
+
+config MACH_EUKREA_MBIMXSD51_BASEBOARD
+	prompt "Eukrea MBIMXSD development board"
+	bool
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select LEDS_GPIO_REGISTER
+	help
+	  This adds board specific devices that can be found on Eukrea's
+	  MBIMXSD evaluation board.
+
+endchoice
+
+config MX51_EFIKA_COMMON
+	bool
+	select SOC_IMX51
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_MXC_EHCI
+	select IMX_HAVE_PLATFORM_PATA_IMX
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select IMX_HAVE_PLATFORM_SPI_IMX
+	select MXC_ULPI if USB_ULPI
+
+config MACH_MX51_EFIKAMX
+	bool "Support MX51 Genesi Efika MX nettop"
+	select LEDS_GPIO_REGISTER
+	select MX51_EFIKA_COMMON
+	help
+	  Include support for Genesi Efika MX nettop. This includes specific
+	  configurations for the board and its peripherals.
+
+config MACH_MX51_EFIKASB
+	bool "Support MX51 Genesi Efika Smartbook"
+	select LEDS_GPIO_REGISTER
+	select MX51_EFIKA_COMMON
+	help
+	  Include support for Genesi Efika Smartbook. This includes specific
+	  configurations for the board and its peripherals.
+
+comment "i.MX53 machines:"
+
+config MACH_IMX53_DT
+	bool "Support i.MX53 platforms from device tree"
+	select SOC_IMX53
+	select USE_OF
+	select MACH_MX53_ARD
+	select MACH_MX53_EVK
+	select MACH_MX53_LOCO
+	select MACH_MX53_SMD
+	help
+	  Include support for Freescale i.MX53 based platforms
+	  using the device tree for discovery
+
+config MACH_MX53_EVK
+	bool "Support MX53 EVK platforms"
+	select SOC_IMX53
+	select IMX_HAVE_PLATFORM_IMX2_WDT
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select IMX_HAVE_PLATFORM_SPI_IMX
+	select LEDS_GPIO_REGISTER
+	help
+	  Include support for MX53 EVK platform. This includes specific
+	  configurations for the board and its peripherals.
+
+config MACH_MX53_SMD
+	bool "Support MX53 SMD platforms"
+	select SOC_IMX53
+	select IMX_HAVE_PLATFORM_IMX2_WDT
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	help
+	  Include support for MX53 SMD platform. This includes specific
+	  configurations for the board and its peripherals.
+
+config MACH_MX53_LOCO
+	bool "Support MX53 LOCO platforms"
+	select SOC_IMX53
+	select IMX_HAVE_PLATFORM_IMX2_WDT
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select IMX_HAVE_PLATFORM_GPIO_KEYS
+	select LEDS_GPIO_REGISTER
+	help
+	  Include support for MX53 LOCO platform. This includes specific
+	  configurations for the board and its peripherals.
+
+config MACH_MX53_ARD
+	bool "Support MX53 ARD platforms"
+	select SOC_IMX53
+	select IMX_HAVE_PLATFORM_IMX2_WDT
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select IMX_HAVE_PLATFORM_GPIO_KEYS
+	help
+	  Include support for MX53 ARD platform. This includes specific
+	  configurations for the board and its peripherals.
+
 comment "i.MX6 family:"
 
 config SOC_IMX6Q
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index f5920c2..55db9c4 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -11,6 +11,8 @@
 obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o
 obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o
 
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clock-mx51-mx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+
 # Support for CMOS sensor interface
 obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
 
@@ -75,3 +77,22 @@
 ifeq ($(CONFIG_PM),y)
 obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
 endif
+
+# i.MX5 based machines
+obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
+obj-$(CONFIG_MACH_MX51_3DS) += mach-mx51_3ds.o
+obj-$(CONFIG_MACH_MX53_EVK) += mach-mx53_evk.o
+obj-$(CONFIG_MACH_MX53_SMD) += mach-mx53_smd.o
+obj-$(CONFIG_MACH_MX53_LOCO) += mach-mx53_loco.o
+obj-$(CONFIG_MACH_MX53_ARD) += mach-mx53_ard.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += mach-cpuimx51.o
+obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += mach-cpuimx51sd.o
+obj-$(CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD) += eukrea_mbimxsd-baseboard.o
+obj-$(CONFIG_MX51_EFIKA_COMMON) += mx51_efika.o
+obj-$(CONFIG_MACH_MX51_EFIKAMX) += mach-mx51_efikamx.o
+obj-$(CONFIG_MACH_MX51_EFIKASB) += mach-mx51_efikasb.o
+obj-$(CONFIG_MACH_MX50_RDP) += mach-mx50_rdp.o
+
+obj-$(CONFIG_MACH_IMX51_DT) += imx51-dt.o
+obj-$(CONFIG_MACH_IMX53_DT) += imx53-dt.o
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index 5f4d06a..6dfdbcc 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -22,6 +22,18 @@
 params_phys-$(CONFIG_SOC_IMX35)	:= 0x80000100
 initrd_phys-$(CONFIG_SOC_IMX35)	:= 0x80800000
 
+zreladdr-$(CONFIG_SOC_IMX50)	+= 0x70008000
+params_phys-$(CONFIG_SOC_IMX50)	:= 0x70000100
+initrd_phys-$(CONFIG_SOC_IMX50)	:= 0x70800000
+
+zreladdr-$(CONFIG_SOC_IMX51)	+= 0x90008000
+params_phys-$(CONFIG_SOC_IMX51)	:= 0x90000100
+initrd_phys-$(CONFIG_SOC_IMX51)	:= 0x90800000
+
+zreladdr-$(CONFIG_SOC_IMX53)	+= 0x70008000
+params_phys-$(CONFIG_SOC_IMX53)	:= 0x70000100
+initrd_phys-$(CONFIG_SOC_IMX53)	:= 0x70800000
+
 zreladdr-$(CONFIG_SOC_IMX6Q)	+= 0x10008000
 params_phys-$(CONFIG_SOC_IMX6Q)	:= 0x10000100
 initrd_phys-$(CONFIG_SOC_IMX6Q)	:= 0x10800000
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-imx/clock-mx51-mx53.c
similarity index 99%
rename from arch/arm/mach-mx5/clock-mx51-mx53.c
rename to arch/arm/mach-imx/clock-mx51-mx53.c
index 4cb2769..0847050 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-imx/clock-mx51-mx53.c
@@ -23,7 +23,7 @@
 #include <mach/common.h>
 #include <mach/clock.h>
 
-#include "crm_regs.h"
+#include "crm-regs-imx5.h"
 
 /* External clock values passed-in by the board code */
 static unsigned long external_high_reference, external_low_reference;
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-imx/cpu-imx5.c
similarity index 100%
rename from arch/arm/mach-mx5/cpu.c
rename to arch/arm/mach-imx/cpu-imx5.c
diff --git a/arch/arm/mach-mx5/cpu_op-mx51.c b/arch/arm/mach-imx/cpu_op-mx51.c
similarity index 100%
rename from arch/arm/mach-mx5/cpu_op-mx51.c
rename to arch/arm/mach-imx/cpu_op-mx51.c
diff --git a/arch/arm/mach-mx5/cpu_op-mx51.h b/arch/arm/mach-imx/cpu_op-mx51.h
similarity index 100%
rename from arch/arm/mach-mx5/cpu_op-mx51.h
rename to arch/arm/mach-imx/cpu_op-mx51.h
diff --git a/arch/arm/mach-mx5/crm_regs.h b/arch/arm/mach-imx/crm-regs-imx5.h
similarity index 100%
rename from arch/arm/mach-mx5/crm_regs.h
rename to arch/arm/mach-imx/crm-regs-imx5.h
diff --git a/arch/arm/mach-mx5/devices-imx50.h b/arch/arm/mach-imx/devices-imx50.h
similarity index 100%
rename from arch/arm/mach-mx5/devices-imx50.h
rename to arch/arm/mach-imx/devices-imx50.h
diff --git a/arch/arm/mach-mx5/devices-imx51.h b/arch/arm/mach-imx/devices-imx51.h
similarity index 100%
rename from arch/arm/mach-mx5/devices-imx51.h
rename to arch/arm/mach-imx/devices-imx51.h
diff --git a/arch/arm/mach-mx5/devices-imx53.h b/arch/arm/mach-imx/devices-imx53.h
similarity index 100%
rename from arch/arm/mach-mx5/devices-imx53.h
rename to arch/arm/mach-imx/devices-imx53.h
diff --git a/arch/arm/mach-mx5/efika.h b/arch/arm/mach-imx/efika.h
similarity index 100%
rename from arch/arm/mach-mx5/efika.h
rename to arch/arm/mach-imx/efika.h
diff --git a/arch/arm/mach-mx5/ehci.c b/arch/arm/mach-imx/ehci-imx5.c
similarity index 100%
rename from arch/arm/mach-mx5/ehci.c
rename to arch/arm/mach-imx/ehci-imx5.c
diff --git a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx51-baseboard.c
similarity index 100%
rename from arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
rename to arch/arm/mach-imx/eukrea_mbimx51-baseboard.c
diff --git a/arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd-baseboard.c
similarity index 100%
rename from arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c
rename to arch/arm/mach-imx/eukrea_mbimxsd-baseboard.c
diff --git a/arch/arm/mach-mx5/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c
similarity index 100%
rename from arch/arm/mach-mx5/imx51-dt.c
rename to arch/arm/mach-imx/imx51-dt.c
diff --git a/arch/arm/mach-mx5/imx53-dt.c b/arch/arm/mach-imx/imx53-dt.c
similarity index 100%
rename from arch/arm/mach-mx5/imx53-dt.c
rename to arch/arm/mach-imx/imx53-dt.c
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-imx/mach-cpuimx51.c
similarity index 100%
rename from arch/arm/mach-mx5/board-cpuimx51.c
rename to arch/arm/mach-imx/mach-cpuimx51.c
diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c
similarity index 100%
rename from arch/arm/mach-mx5/board-cpuimx51sd.c
rename to arch/arm/mach-imx/mach-cpuimx51sd.c
diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-imx/mach-mx50_rdp.c
similarity index 100%
rename from arch/arm/mach-mx5/board-mx50_rdp.c
rename to arch/arm/mach-imx/mach-mx50_rdp.c
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-imx/mach-mx51_3ds.c
similarity index 100%
rename from arch/arm/mach-mx5/board-mx51_3ds.c
rename to arch/arm/mach-imx/mach-mx51_3ds.c
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-imx/mach-mx51_babbage.c
similarity index 100%
rename from arch/arm/mach-mx5/board-mx51_babbage.c
rename to arch/arm/mach-imx/mach-mx51_babbage.c
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-imx/mach-mx51_efikamx.c
similarity index 100%
rename from arch/arm/mach-mx5/board-mx51_efikamx.c
rename to arch/arm/mach-imx/mach-mx51_efikamx.c
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-imx/mach-mx51_efikasb.c
similarity index 100%
rename from arch/arm/mach-mx5/board-mx51_efikasb.c
rename to arch/arm/mach-imx/mach-mx51_efikasb.c
diff --git a/arch/arm/mach-mx5/board-mx53_ard.c b/arch/arm/mach-imx/mach-mx53_ard.c
similarity index 99%
rename from arch/arm/mach-mx5/board-mx53_ard.c
rename to arch/arm/mach-imx/mach-mx53_ard.c
index 5f224f1..08dfb76 100644
--- a/arch/arm/mach-mx5/board-mx53_ard.c
+++ b/arch/arm/mach-imx/mach-mx53_ard.c
@@ -32,7 +32,6 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 
-#include "crm_regs.h"
 #include "devices-imx53.h"
 
 #define ARD_ETHERNET_INT_B	IMX_GPIO_NR(2, 31)
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-imx/mach-mx53_evk.c
similarity index 99%
rename from arch/arm/mach-mx5/board-mx53_evk.c
rename to arch/arm/mach-imx/mach-mx53_evk.c
index d6ce137..5a72188 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-imx/mach-mx53_evk.c
@@ -37,7 +37,6 @@
 #define EVK_ECSPI1_CS1		IMX_GPIO_NR(3, 19)
 #define MX53EVK_LED		IMX_GPIO_NR(7, 7)
 
-#include "crm_regs.h"
 #include "devices-imx53.h"
 
 static iomux_v3_cfg_t mx53_evk_pads[] = {
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-imx/mach-mx53_loco.c
similarity index 99%
rename from arch/arm/mach-mx5/board-mx53_loco.c
rename to arch/arm/mach-imx/mach-mx53_loco.c
index fd8b524..37f67ca 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-imx/mach-mx53_loco.c
@@ -32,7 +32,6 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 
-#include "crm_regs.h"
 #include "devices-imx53.h"
 
 #define MX53_LOCO_POWER			IMX_GPIO_NR(1, 8)
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-imx/mach-mx53_smd.c
similarity index 99%
rename from arch/arm/mach-mx5/board-mx53_smd.c
rename to arch/arm/mach-imx/mach-mx53_smd.c
index 22c53c9..8e972c5 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-imx/mach-mx53_smd.c
@@ -31,7 +31,6 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 
-#include "crm_regs.h"
 #include "devices-imx53.h"
 
 #define SMD_FEC_PHY_RST		IMX_GPIO_NR(7, 6)
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-imx/mm-imx5.c
similarity index 100%
rename from arch/arm/mach-mx5/mm.c
rename to arch/arm/mach-imx/mm-imx5.c
diff --git a/arch/arm/mach-mx5/mx51_efika.c b/arch/arm/mach-imx/mx51_efika.c
similarity index 100%
rename from arch/arm/mach-mx5/mx51_efika.c
rename to arch/arm/mach-imx/mx51_efika.c
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
new file mode 100644
index 0000000..6dc0934
--- /dev/null
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -0,0 +1,153 @@
+/*
+ *  Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/suspend.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include "crm-regs-imx5.h"
+
+static struct clk *gpc_dvfs_clk;
+
+/*
+ * set cpu low power mode before WFI instruction. This function is called
+ * mx5 because it can be used for mx50, mx51, and mx53.
+ */
+void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
+{
+	u32 plat_lpc, arm_srpgcr, ccm_clpcr;
+	u32 empgc0, empgc1;
+	int stop_mode = 0;
+
+	/* always allow platform to issue a deep sleep mode request */
+	plat_lpc = __raw_readl(MXC_CORTEXA8_PLAT_LPC) &
+	    ~(MXC_CORTEXA8_PLAT_LPC_DSM);
+	ccm_clpcr = __raw_readl(MXC_CCM_CLPCR) & ~(MXC_CCM_CLPCR_LPM_MASK);
+	arm_srpgcr = __raw_readl(MXC_SRPG_ARM_SRPGCR) & ~(MXC_SRPGCR_PCR);
+	empgc0 = __raw_readl(MXC_SRPG_EMPGC0_SRPGCR) & ~(MXC_SRPGCR_PCR);
+	empgc1 = __raw_readl(MXC_SRPG_EMPGC1_SRPGCR) & ~(MXC_SRPGCR_PCR);
+
+	switch (mode) {
+	case WAIT_CLOCKED:
+		break;
+	case WAIT_UNCLOCKED:
+		ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET;
+		break;
+	case WAIT_UNCLOCKED_POWER_OFF:
+	case STOP_POWER_OFF:
+		plat_lpc |= MXC_CORTEXA8_PLAT_LPC_DSM
+			    | MXC_CORTEXA8_PLAT_LPC_DBG_DSM;
+		if (mode == WAIT_UNCLOCKED_POWER_OFF) {
+			ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET;
+			ccm_clpcr &= ~MXC_CCM_CLPCR_VSTBY;
+			ccm_clpcr &= ~MXC_CCM_CLPCR_SBYOS;
+			stop_mode = 0;
+		} else {
+			ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET;
+			ccm_clpcr |= 0x3 << MXC_CCM_CLPCR_STBY_COUNT_OFFSET;
+			ccm_clpcr |= MXC_CCM_CLPCR_VSTBY;
+			ccm_clpcr |= MXC_CCM_CLPCR_SBYOS;
+			stop_mode = 1;
+		}
+		arm_srpgcr |= MXC_SRPGCR_PCR;
+		break;
+	case STOP_POWER_ON:
+		ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET;
+		break;
+	default:
+		printk(KERN_WARNING "UNKNOWN cpu power mode: %d\n", mode);
+		return;
+	}
+
+	__raw_writel(plat_lpc, MXC_CORTEXA8_PLAT_LPC);
+	__raw_writel(ccm_clpcr, MXC_CCM_CLPCR);
+	__raw_writel(arm_srpgcr, MXC_SRPG_ARM_SRPGCR);
+
+	/* Enable NEON SRPG for all but MX50TO1.0. */
+	if (mx50_revision() != IMX_CHIP_REVISION_1_0)
+		__raw_writel(arm_srpgcr, MXC_SRPG_NEON_SRPGCR);
+
+	if (stop_mode) {
+		empgc0 |= MXC_SRPGCR_PCR;
+		empgc1 |= MXC_SRPGCR_PCR;
+
+		__raw_writel(empgc0, MXC_SRPG_EMPGC0_SRPGCR);
+		__raw_writel(empgc1, MXC_SRPG_EMPGC1_SRPGCR);
+	}
+}
+
+static int mx5_suspend_prepare(void)
+{
+	return clk_enable(gpc_dvfs_clk);
+}
+
+static int mx5_suspend_enter(suspend_state_t state)
+{
+	switch (state) {
+	case PM_SUSPEND_MEM:
+		mx5_cpu_lp_set(STOP_POWER_OFF);
+		break;
+	case PM_SUSPEND_STANDBY:
+		mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (state == PM_SUSPEND_MEM) {
+		local_flush_tlb_all();
+		flush_cache_all();
+
+		/*clear the EMPGC0/1 bits */
+		__raw_writel(0, MXC_SRPG_EMPGC0_SRPGCR);
+		__raw_writel(0, MXC_SRPG_EMPGC1_SRPGCR);
+	}
+	cpu_do_idle();
+	return 0;
+}
+
+static void mx5_suspend_finish(void)
+{
+	clk_disable(gpc_dvfs_clk);
+}
+
+static int mx5_pm_valid(suspend_state_t state)
+{
+	return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX);
+}
+
+static const struct platform_suspend_ops mx5_suspend_ops = {
+	.valid = mx5_pm_valid,
+	.prepare = mx5_suspend_prepare,
+	.enter = mx5_suspend_enter,
+	.finish = mx5_suspend_finish,
+};
+
+static int __init mx5_pm_init(void)
+{
+	if (!cpu_is_mx51() && !cpu_is_mx53())
+		return 0;
+
+	if (gpc_dvfs_clk == NULL)
+		gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
+
+	if (!IS_ERR(gpc_dvfs_clk)) {
+		if (cpu_is_mx51())
+			suspend_set_ops(&mx5_suspend_ops);
+	} else
+		return -EPERM;
+
+	return 0;
+}
+device_initcall(mx5_pm_init);
diff --git a/arch/arm/mach-mx5/Kconfig b/arch/arm/mach-mx5/Kconfig
deleted file mode 100644
index af0c212..0000000
--- a/arch/arm/mach-mx5/Kconfig
+++ /dev/null
@@ -1,244 +0,0 @@
-if ARCH_MX5
-
-# ARCH_MX5/50/53 are left to mark places where prevent multi-soc in single
-# image. So for most time, SOC_IMX50/51/53 should be used.
-
-config ARCH_MX51
-	bool
-
-config ARCH_MX50
-	bool
-
-config ARCH_MX53
-	bool
-
-config SOC_IMX50
-	bool
-	select CPU_V7
-	select ARM_L1_CACHE_SHIFT_6
-	select MXC_TZIC
-	select ARCH_MXC_IOMUX_V3
-	select ARCH_MXC_AUDMUX_V2
-	select ARCH_HAS_CPUFREQ
-	select ARCH_MX50
-
-config	SOC_IMX51
-	bool
-	select CPU_V7
-	select ARM_L1_CACHE_SHIFT_6
-	select MXC_TZIC
-	select ARCH_MXC_IOMUX_V3
-	select ARCH_MXC_AUDMUX_V2
-	select ARCH_HAS_CPUFREQ
-	select ARCH_MX51
-
-config	SOC_IMX53
-	bool
-	select CPU_V7
-	select ARM_L1_CACHE_SHIFT_6
-	select MXC_TZIC
-	select ARCH_MXC_IOMUX_V3
-	select ARCH_MX53
-
-#comment "i.MX50 machines:"
-
-config MACH_MX50_RDP
-	bool "Support MX50 reference design platform"
-	depends on BROKEN
-	select SOC_IMX50
-	select IMX_HAVE_PLATFORM_IMX_I2C
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	select IMX_HAVE_PLATFORM_SPI_IMX
-	help
-	  Include support for MX50 reference design platform (RDP) board. This
-	  includes specific configurations for the board and its peripherals.
-
-comment "i.MX51 machines:"
-
-config MACH_IMX51_DT
-	bool "Support i.MX51 platforms from device tree"
-	select SOC_IMX51
-	select USE_OF
-	select MACH_MX51_BABBAGE
-	help
-	  Include support for Freescale i.MX51 based platforms
-	  using the device tree for discovery
-
-config MACH_MX51_BABBAGE
-	bool "Support MX51 BABBAGE platforms"
-	select SOC_IMX51
-	select IMX_HAVE_PLATFORM_FSL_USB2_UDC
-	select IMX_HAVE_PLATFORM_IMX2_WDT
-	select IMX_HAVE_PLATFORM_IMX_I2C
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_MXC_EHCI
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	select IMX_HAVE_PLATFORM_SPI_IMX
-	help
-	  Include support for MX51 Babbage platform, also known as MX51EVK in
-	  u-boot. This includes specific configurations for the board and its
-	  peripherals.
-
-config MACH_MX51_3DS
-	bool "Support MX51PDK (3DS)"
-	select SOC_IMX51
-	select IMX_HAVE_PLATFORM_IMX2_WDT
-	select IMX_HAVE_PLATFORM_IMX_KEYPAD
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	select IMX_HAVE_PLATFORM_SPI_IMX
-	select MXC_DEBUG_BOARD
-	help
-	  Include support for MX51PDK (3DS) platform. This includes specific
-	  configurations for the board and its peripherals.
-
-config MACH_EUKREA_CPUIMX51
-	bool "Support Eukrea CPUIMX51 module"
-	select SOC_IMX51
-	select IMX_HAVE_PLATFORM_FSL_USB2_UDC
-	select IMX_HAVE_PLATFORM_IMX_I2C
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_MXC_EHCI
-	select IMX_HAVE_PLATFORM_MXC_NAND
-	select IMX_HAVE_PLATFORM_SPI_IMX
-	help
-	  Include support for Eukrea CPUIMX51 platform. This includes
-	  specific configurations for the module and its peripherals.
-
-choice
-	prompt "Baseboard"
-	depends on MACH_EUKREA_CPUIMX51
-	default MACH_EUKREA_MBIMX51_BASEBOARD
-
-config MACH_EUKREA_MBIMX51_BASEBOARD
-	prompt "Eukrea MBIMX51 development board"
-	bool
-	select IMX_HAVE_PLATFORM_IMX_KEYPAD
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	select LEDS_GPIO_REGISTER
-	help
-	  This adds board specific devices that can be found on Eukrea's
-	  MBIMX51 evaluation board.
-
-endchoice
-
-config MACH_EUKREA_CPUIMX51SD
-	bool "Support Eukrea CPUIMX51SD module"
-	select SOC_IMX51
-	select IMX_HAVE_PLATFORM_FSL_USB2_UDC
-	select IMX_HAVE_PLATFORM_IMX_I2C
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_MXC_EHCI
-	select IMX_HAVE_PLATFORM_MXC_NAND
-	select IMX_HAVE_PLATFORM_SPI_IMX
-	help
-	  Include support for Eukrea CPUIMX51SD platform. This includes
-	  specific configurations for the module and its peripherals.
-
-choice
-	prompt "Baseboard"
-	depends on MACH_EUKREA_CPUIMX51SD
-	default MACH_EUKREA_MBIMXSD51_BASEBOARD
-
-config MACH_EUKREA_MBIMXSD51_BASEBOARD
-	prompt "Eukrea MBIMXSD development board"
-	bool
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	select LEDS_GPIO_REGISTER
-	help
-	  This adds board specific devices that can be found on Eukrea's
-	  MBIMXSD evaluation board.
-
-endchoice
-
-config MX51_EFIKA_COMMON
-	bool
-	select SOC_IMX51
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_MXC_EHCI
-	select IMX_HAVE_PLATFORM_PATA_IMX
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	select IMX_HAVE_PLATFORM_SPI_IMX
-	select MXC_ULPI if USB_ULPI
-
-config MACH_MX51_EFIKAMX
-	bool "Support MX51 Genesi Efika MX nettop"
-	select LEDS_GPIO_REGISTER
-	select MX51_EFIKA_COMMON
-	help
-	  Include support for Genesi Efika MX nettop. This includes specific
-	  configurations for the board and its peripherals.
-
-config MACH_MX51_EFIKASB
-	bool "Support MX51 Genesi Efika Smartbook"
-	select LEDS_GPIO_REGISTER
-	select MX51_EFIKA_COMMON
-	help
-	  Include support for Genesi Efika Smartbook. This includes specific
-	  configurations for the board and its peripherals.
-
-comment "i.MX53 machines:"
-
-config MACH_IMX53_DT
-	bool "Support i.MX53 platforms from device tree"
-	select SOC_IMX53
-	select USE_OF
-	select MACH_MX53_ARD
-	select MACH_MX53_EVK
-	select MACH_MX53_LOCO
-	select MACH_MX53_SMD
-	help
-	  Include support for Freescale i.MX53 based platforms
-	  using the device tree for discovery
-
-config MACH_MX53_EVK
-	bool "Support MX53 EVK platforms"
-	select SOC_IMX53
-	select IMX_HAVE_PLATFORM_IMX2_WDT
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_IMX_I2C
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	select IMX_HAVE_PLATFORM_SPI_IMX
-	select LEDS_GPIO_REGISTER
-	help
-	  Include support for MX53 EVK platform. This includes specific
-	  configurations for the board and its peripherals.
-
-config MACH_MX53_SMD
-	bool "Support MX53 SMD platforms"
-	select SOC_IMX53
-	select IMX_HAVE_PLATFORM_IMX2_WDT
-	select IMX_HAVE_PLATFORM_IMX_I2C
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	help
-	  Include support for MX53 SMD platform. This includes specific
-	  configurations for the board and its peripherals.
-
-config MACH_MX53_LOCO
-	bool "Support MX53 LOCO platforms"
-	select SOC_IMX53
-	select IMX_HAVE_PLATFORM_IMX2_WDT
-	select IMX_HAVE_PLATFORM_IMX_I2C
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	select IMX_HAVE_PLATFORM_GPIO_KEYS
-	select LEDS_GPIO_REGISTER
-	help
-	  Include support for MX53 LOCO platform. This includes specific
-	  configurations for the board and its peripherals.
-
-config MACH_MX53_ARD
-	bool "Support MX53 ARD platforms"
-	select SOC_IMX53
-	select IMX_HAVE_PLATFORM_IMX2_WDT
-	select IMX_HAVE_PLATFORM_IMX_I2C
-	select IMX_HAVE_PLATFORM_IMX_UART
-	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-	select IMX_HAVE_PLATFORM_GPIO_KEYS
-	help
-	  Include support for MX53 ARD platform. This includes specific
-	  configurations for the board and its peripherals.
-
-endif
diff --git a/arch/arm/mach-mx5/Makefile b/arch/arm/mach-mx5/Makefile
deleted file mode 100644
index 0fc6080..0000000
--- a/arch/arm/mach-mx5/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-obj-y   := cpu.o mm.o clock-mx51-mx53.o ehci.o system.o
-
-obj-$(CONFIG_PM) += pm-imx5.o
-obj-$(CONFIG_CPU_FREQ_IMX)    += cpu_op-mx51.o
-obj-$(CONFIG_MACH_MX51_BABBAGE) += board-mx51_babbage.o
-obj-$(CONFIG_MACH_MX51_3DS) += board-mx51_3ds.o
-obj-$(CONFIG_MACH_MX53_EVK) += board-mx53_evk.o
-obj-$(CONFIG_MACH_MX53_SMD) += board-mx53_smd.o
-obj-$(CONFIG_MACH_MX53_LOCO) += board-mx53_loco.o
-obj-$(CONFIG_MACH_MX53_ARD) += board-mx53_ard.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += board-cpuimx51.o
-obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += board-cpuimx51sd.o
-obj-$(CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD) += eukrea_mbimxsd-baseboard.o
-obj-$(CONFIG_MX51_EFIKA_COMMON) += mx51_efika.o
-obj-$(CONFIG_MACH_MX51_EFIKAMX) += board-mx51_efikamx.o
-obj-$(CONFIG_MACH_MX51_EFIKASB) += board-mx51_efikasb.o
-obj-$(CONFIG_MACH_MX50_RDP) += board-mx50_rdp.o
-
-obj-$(CONFIG_MACH_IMX51_DT) += imx51-dt.o
-obj-$(CONFIG_MACH_IMX53_DT) += imx53-dt.o
diff --git a/arch/arm/mach-mx5/Makefile.boot b/arch/arm/mach-mx5/Makefile.boot
deleted file mode 100644
index ca207ca..0000000
--- a/arch/arm/mach-mx5/Makefile.boot
+++ /dev/null
@@ -1,9 +0,0 @@
-   zreladdr-$(CONFIG_ARCH_MX50)	+= 0x70008000
-params_phys-$(CONFIG_ARCH_MX50)	:= 0x70000100
-initrd_phys-$(CONFIG_ARCH_MX50)	:= 0x70800000
-   zreladdr-$(CONFIG_ARCH_MX51)	+= 0x90008000
-params_phys-$(CONFIG_ARCH_MX51)	:= 0x90000100
-initrd_phys-$(CONFIG_ARCH_MX51)	:= 0x90800000
-   zreladdr-$(CONFIG_ARCH_MX53)	+= 0x70008000
-params_phys-$(CONFIG_ARCH_MX53)	:= 0x70000100
-initrd_phys-$(CONFIG_ARCH_MX53)	:= 0x70800000
diff --git a/arch/arm/mach-mx5/pm-imx5.c b/arch/arm/mach-mx5/pm-imx5.c
deleted file mode 100644
index 98052fc..0000000
--- a/arch/arm/mach-mx5/pm-imx5.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- *  Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-#include <linux/suspend.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-#include "crm_regs.h"
-
-static struct clk *gpc_dvfs_clk;
-
-static int mx5_suspend_prepare(void)
-{
-	return clk_enable(gpc_dvfs_clk);
-}
-
-static int mx5_suspend_enter(suspend_state_t state)
-{
-	switch (state) {
-	case PM_SUSPEND_MEM:
-		mx5_cpu_lp_set(STOP_POWER_OFF);
-		break;
-	case PM_SUSPEND_STANDBY:
-		mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (state == PM_SUSPEND_MEM) {
-		local_flush_tlb_all();
-		flush_cache_all();
-
-		/*clear the EMPGC0/1 bits */
-		__raw_writel(0, MXC_SRPG_EMPGC0_SRPGCR);
-		__raw_writel(0, MXC_SRPG_EMPGC1_SRPGCR);
-	}
-	cpu_do_idle();
-	return 0;
-}
-
-static void mx5_suspend_finish(void)
-{
-	clk_disable(gpc_dvfs_clk);
-}
-
-static int mx5_pm_valid(suspend_state_t state)
-{
-	return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX);
-}
-
-static const struct platform_suspend_ops mx5_suspend_ops = {
-	.valid = mx5_pm_valid,
-	.prepare = mx5_suspend_prepare,
-	.enter = mx5_suspend_enter,
-	.finish = mx5_suspend_finish,
-};
-
-static int __init mx5_pm_init(void)
-{
-	if (gpc_dvfs_clk == NULL)
-		gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
-
-	if (!IS_ERR(gpc_dvfs_clk)) {
-		if (cpu_is_mx51())
-			suspend_set_ops(&mx5_suspend_ops);
-	} else
-		return -EPERM;
-
-	return 0;
-}
-device_initcall(mx5_pm_init);
diff --git a/arch/arm/mach-mx5/system.c b/arch/arm/mach-mx5/system.c
deleted file mode 100644
index 5eebfaa..0000000
--- a/arch/arm/mach-mx5/system.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-/*
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include "crm_regs.h"
-
-/* set cpu low power mode before WFI instruction. This function is called
-  * mx5 because it can be used for mx50, mx51, and mx53.*/
-void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
-{
-	u32 plat_lpc, arm_srpgcr, ccm_clpcr;
-	u32 empgc0, empgc1;
-	int stop_mode = 0;
-
-	/* always allow platform to issue a deep sleep mode request */
-	plat_lpc = __raw_readl(MXC_CORTEXA8_PLAT_LPC) &
-	    ~(MXC_CORTEXA8_PLAT_LPC_DSM);
-	ccm_clpcr = __raw_readl(MXC_CCM_CLPCR) & ~(MXC_CCM_CLPCR_LPM_MASK);
-	arm_srpgcr = __raw_readl(MXC_SRPG_ARM_SRPGCR) & ~(MXC_SRPGCR_PCR);
-	empgc0 = __raw_readl(MXC_SRPG_EMPGC0_SRPGCR) & ~(MXC_SRPGCR_PCR);
-	empgc1 = __raw_readl(MXC_SRPG_EMPGC1_SRPGCR) & ~(MXC_SRPGCR_PCR);
-
-	switch (mode) {
-	case WAIT_CLOCKED:
-		break;
-	case WAIT_UNCLOCKED:
-		ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET;
-		break;
-	case WAIT_UNCLOCKED_POWER_OFF:
-	case STOP_POWER_OFF:
-		plat_lpc |= MXC_CORTEXA8_PLAT_LPC_DSM
-			    | MXC_CORTEXA8_PLAT_LPC_DBG_DSM;
-		if (mode == WAIT_UNCLOCKED_POWER_OFF) {
-			ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET;
-			ccm_clpcr &= ~MXC_CCM_CLPCR_VSTBY;
-			ccm_clpcr &= ~MXC_CCM_CLPCR_SBYOS;
-			stop_mode = 0;
-		} else {
-			ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET;
-			ccm_clpcr |= 0x3 << MXC_CCM_CLPCR_STBY_COUNT_OFFSET;
-			ccm_clpcr |= MXC_CCM_CLPCR_VSTBY;
-			ccm_clpcr |= MXC_CCM_CLPCR_SBYOS;
-			stop_mode = 1;
-		}
-		arm_srpgcr |= MXC_SRPGCR_PCR;
-		break;
-	case STOP_POWER_ON:
-		ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET;
-		break;
-	default:
-		printk(KERN_WARNING "UNKNOWN cpu power mode: %d\n", mode);
-		return;
-	}
-
-	__raw_writel(plat_lpc, MXC_CORTEXA8_PLAT_LPC);
-	__raw_writel(ccm_clpcr, MXC_CCM_CLPCR);
-	__raw_writel(arm_srpgcr, MXC_SRPG_ARM_SRPGCR);
-
-	/* Enable NEON SRPG for all but MX50TO1.0. */
-	if (mx50_revision() != IMX_CHIP_REVISION_1_0)
-		__raw_writel(arm_srpgcr, MXC_SRPG_NEON_SRPGCR);
-
-	if (stop_mode) {
-		empgc0 |= MXC_SRPGCR_PCR;
-		empgc1 |= MXC_SRPGCR_PCR;
-
-		__raw_writel(empgc0, MXC_SRPG_EMPGC0_SRPGCR);
-		__raw_writel(empgc1, MXC_SRPG_EMPGC1_SRPGCR);
-	}
-}
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index b30708e..dcebb12 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -17,26 +17,17 @@
 	  and ARMv5 SoCs
 
 config ARCH_IMX_V6_V7
-	bool "i.MX3, i.MX6"
+	bool "i.MX3, i.MX5, i.MX6"
 	select AUTO_ZRELADDR if !ZBOOT_ROM
 	select ARM_PATCH_PHYS_VIRT
 	select MIGHT_HAVE_CACHE_L2X0
 	help
-	  This enables support for systems based on the Freescale i.MX3 and i.MX6
-	  family.
-
-config ARCH_MX5
-	bool "i.MX50, i.MX51, i.MX53"
-	select AUTO_ZRELADDR if !ZBOOT_ROM
-	select ARM_PATCH_PHYS_VIRT
-	help
-	  This enables support for machines using Freescale's i.MX50 and i.MX53
-	  processors.
+	  This enables support for systems based on the Freescale i.MX3, i.MX5
+	  and i.MX6 family.
 
 endchoice
 
 source "arch/arm/mach-imx/Kconfig"
-source "arch/arm/mach-mx5/Kconfig"
 
 endmenu
 
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index bfb4d01..5207035 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -429,22 +429,24 @@
 static struct acpi_table_slit __initdata *slit_table;
 cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
 
-static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
+static int __init
+get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
 {
 	int pxm;
 
 	pxm = pa->proximity_domain_lo;
-	if (ia64_platform_is("sn2"))
+	if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
 		pxm += pa->proximity_domain_hi[0] << 8;
 	return pxm;
 }
 
-static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
+static int __init
+get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
 {
 	int pxm;
 
 	pxm = ma->proximity_domain;
-	if (!ia64_platform_is("sn2"))
+	if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
 		pxm &= 0xff;
 
 	return pxm;
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index cf4e47b..3f30dac 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -42,6 +42,24 @@
 /* The native architecture */
 #define KEXEC_ARCH KEXEC_ARCH_S390
 
+/*
+ * Size for s390x ELF notes per CPU
+ *
+ * Seven notes plus zero note at the end: prstatus, fpregset, timer,
+ * tod_cmp, tod_reg, control regs, and prefix
+ */
+#define KEXEC_NOTE_BYTES \
+	(ALIGN(sizeof(struct elf_note), 4) * 8 + \
+	 ALIGN(sizeof("CORE"), 4) * 7 + \
+	 ALIGN(sizeof(struct elf_prstatus), 4) + \
+	 ALIGN(sizeof(elf_fpregset_t), 4) + \
+	 ALIGN(sizeof(u64), 4) + \
+	 ALIGN(sizeof(u64), 4) + \
+	 ALIGN(sizeof(u32), 4) + \
+	 ALIGN(sizeof(u64) * 16, 4) + \
+	 ALIGN(sizeof(u32), 4) \
+	)
+
 /* Provide a dummy definition to avoid build failures. */
 static inline void crash_setup_regs(struct pt_regs *newregs,
 					struct pt_regs *oldregs) { }
diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
index 577abba..83bb960 100644
--- a/arch/score/kernel/entry.S
+++ b/arch/score/kernel/entry.S
@@ -408,7 +408,7 @@
 	sw	r9, [r0, PT_EPC]
 
 	cmpi.c	r27, __NR_syscalls 	# check syscall number
-	bgtu	illegal_syscall
+	bgeu	illegal_syscall
 
 	slli	r8, r27, 2		# get syscall routine
 	la	r11, sys_call_table
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore
index 0280790..7cab8c0 100644
--- a/arch/x86/.gitignore
+++ b/arch/x86/.gitignore
@@ -1,3 +1,4 @@
 boot/compressed/vmlinux
 tools/test_get_len
+tools/insn_sanity
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6c14ecd..864cc6e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -125,16 +125,6 @@
 config MMU
 	def_bool y
 
-config ZONE_DMA
-	bool "DMA memory allocation support" if EXPERT
-	default y
-	help
-	  DMA memory allocation support allows devices with less than 32-bit
-	  addressing to allocate within the first 16MB of address space.
-	  Disable if no such devices will be used.
-
-	  If unsure, say Y.
-
 config SBUS
 	bool
 
@@ -255,6 +245,16 @@
 
 menu "Processor type and features"
 
+config ZONE_DMA
+	bool "DMA memory allocation support" if EXPERT
+	default y
+	help
+	  DMA memory allocation support allows devices with less than 32-bit
+	  addressing to allocate within the first 16MB of address space.
+	  Disable if no such devices will be used.
+
+	  If unsure, say Y.
+
 source "kernel/time/Kconfig"
 
 config SMP
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index b4a3db7..21f77b8 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -7,6 +7,7 @@
 #  include <asm/unistd_32.h>
 #  define __ARCH_WANT_IPC_PARSE_VERSION
 #  define __ARCH_WANT_STAT64
+#  define __ARCH_WANT_SYS_IPC
 #  define __ARCH_WANT_SYS_OLD_MMAP
 #  define __ARCH_WANT_SYS_OLD_SELECT
 
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 8e862aa..becf47b 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -65,7 +65,7 @@
  * UV2: Bit 19 selects between
  *  (0): 10 microsecond timebase and
  *  (1): 80 microseconds
- *  we're using 655us, similar to UV1: 65 units of 10us
+ *  we're using 560us, similar to UV1: 65 units of 10us
  */
 #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
 #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
@@ -167,6 +167,7 @@
 #define FLUSH_RETRY_TIMEOUT		2
 #define FLUSH_GIVEUP			3
 #define FLUSH_COMPLETE			4
+#define FLUSH_RETRY_BUSYBUG		5
 
 /*
  * tuning the action when the numalink network is extremely delayed
@@ -235,10 +236,10 @@
 
 
 /*
- * Message header:  16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+ * UV1 Message header:  16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
  * see table 4.2.3.0.1 in broacast_assist spec.
  */
-struct bau_msg_header {
+struct uv1_bau_msg_header {
 	unsigned int	dest_subnodeid:6;	/* must be 0x10, for the LB */
 	/* bits 5:0 */
 	unsigned int	base_dest_nasid:15;	/* nasid of the first bit */
@@ -318,19 +319,87 @@
 };
 
 /*
+ * UV2 Message header:  16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+ * see figure 9-2 of harp_sys.pdf
+ */
+struct uv2_bau_msg_header {
+	unsigned int	base_dest_nasid:15;	/* nasid of the first bit */
+	/* bits 14:0 */				/* in uvhub map */
+	unsigned int	dest_subnodeid:5;	/* must be 0x10, for the LB */
+	/* bits 19:15 */
+	unsigned int	rsvd_1:1;		/* must be zero */
+	/* bit 20 */
+	/* Address bits 59:21 */
+	/* bits 25:2 of address (44:21) are payload */
+	/* these next 24 bits become bytes 12-14 of msg */
+	/* bits 28:21 land in byte 12 */
+	unsigned int	replied_to:1;		/* sent as 0 by the source to
+						   byte 12 */
+	/* bit 21 */
+	unsigned int	msg_type:3;		/* software type of the
+						   message */
+	/* bits 24:22 */
+	unsigned int	canceled:1;		/* message canceled, resource
+						   is to be freed*/
+	/* bit 25 */
+	unsigned int	payload_1:3;		/* not currently used */
+	/* bits 28:26 */
+
+	/* bits 36:29 land in byte 13 */
+	unsigned int	payload_2a:3;		/* not currently used */
+	unsigned int	payload_2b:5;		/* not currently used */
+	/* bits 36:29 */
+
+	/* bits 44:37 land in byte 14 */
+	unsigned int	payload_3:8;		/* not currently used */
+	/* bits 44:37 */
+
+	unsigned int	rsvd_2:7;		/* reserved */
+	/* bits 51:45 */
+	unsigned int	swack_flag:1;		/* software acknowledge flag */
+	/* bit 52 */
+	unsigned int	rsvd_3a:3;		/* must be zero */
+	unsigned int	rsvd_3b:8;		/* must be zero */
+	unsigned int	rsvd_3c:8;		/* must be zero */
+	unsigned int	rsvd_3d:3;		/* must be zero */
+	/* bits 74:53 */
+	unsigned int	fairness:3;		/* usually zero */
+	/* bits 77:75 */
+
+	unsigned int	sequence:16;		/* message sequence number */
+	/* bits 93:78  Suppl_A  */
+	unsigned int	chaining:1;		/* next descriptor is part of
+						   this activation*/
+	/* bit 94 */
+	unsigned int	multilevel:1;		/* multi-level multicast
+						   format */
+	/* bit 95 */
+	unsigned int	rsvd_4:24;		/* ordered / source node /
+						   source subnode / aging
+						   must be zero */
+	/* bits 119:96 */
+	unsigned int	command:8;		/* message type */
+	/* bits 127:120 */
+};
+
+/*
  * The activation descriptor:
  * The format of the message to send, plus all accompanying control
  * Should be 64 bytes
  */
 struct bau_desc {
-	struct pnmask			distribution;
+	struct pnmask				distribution;
 	/*
 	 * message template, consisting of header and payload:
 	 */
-	struct bau_msg_header		header;
-	struct bau_msg_payload		payload;
+	union bau_msg_header {
+		struct uv1_bau_msg_header	uv1_hdr;
+		struct uv2_bau_msg_header	uv2_hdr;
+	} header;
+
+	struct bau_msg_payload			payload;
 };
-/*
+/* UV1:
  *   -payload--    ---------header------
  *   bytes 0-11    bits 41-56  bits 58-81
  *       A           B  (2)      C (3)
@@ -340,6 +409,16 @@
  *   bytes 0-11  bytes 12-14  bytes 16-17  (byte 15 filled in by hw as vector)
  *   ------------payload queue-----------
  */
+/* UV2:
+ *   -payload--    ---------header------
+ *   bytes 0-11    bits 70-78  bits 21-44
+ *       A           B  (2)      C (3)
+ *
+ *            A/B/C are moved to:
+ *       A            C          B
+ *   bytes 0-11  bytes 12-14  bytes 16-17  (byte 15 filled in by hw as vector)
+ *   ------------payload queue-----------
+ */
 
 /*
  * The payload queue on the destination side is an array of these.
@@ -385,7 +464,6 @@
 struct msg_desc {
 	struct bau_pq_entry	*msg;
 	int			msg_slot;
-	int			swack_slot;
 	struct bau_pq_entry	*queue_first;
 	struct bau_pq_entry	*queue_last;
 };
@@ -405,6 +483,7 @@
 						   requests */
 	unsigned long	s_stimeout;		/* source side timeouts */
 	unsigned long	s_dtimeout;		/* destination side timeouts */
+	unsigned long	s_strongnacks;		/* number of strong nack's */
 	unsigned long	s_time;			/* time spent in sending side */
 	unsigned long	s_retriesok;		/* successful retries */
 	unsigned long	s_ntargcpu;		/* total number of cpu's
@@ -439,6 +518,9 @@
 	unsigned long	s_retry_messages;	/* retry broadcasts */
 	unsigned long	s_bau_reenabled;	/* for bau enable/disable */
 	unsigned long	s_bau_disabled;		/* for bau enable/disable */
+	unsigned long	s_uv2_wars;		/* uv2 workaround, perm. busy */
+	unsigned long	s_uv2_wars_hw;		/* uv2 workaround, hiwater */
+	unsigned long	s_uv2_war_waits;	/* uv2 workaround, long waits */
 	/* destination statistics */
 	unsigned long	d_alltlb;		/* times all tlb's on this
 						   cpu were flushed */
@@ -511,9 +593,12 @@
 	short			osnode;
 	short			uvhub_cpu;
 	short			uvhub;
+	short			uvhub_version;
 	short			cpus_in_socket;
 	short			cpus_in_uvhub;
 	short			partition_base_pnode;
+	short			using_desc; /* an index, like uvhub_cpu */
+	unsigned int		inuse_map;
 	unsigned short		message_number;
 	unsigned short		uvhub_quiesce;
 	short			socket_acknowledge_count[DEST_Q_SIZE];
@@ -531,6 +616,7 @@
 	int			cong_response_us;
 	int			cong_reps;
 	int			cong_period;
+	unsigned long		clocks_per_100_usec;
 	cycles_t		period_time;
 	long			period_requests;
 	struct hub_and_pnode	*thp;
@@ -591,6 +677,11 @@
 	uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
 }
 
+static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
+{
+	write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
+}
+
 static inline unsigned long read_mmr_sw_ack(void)
 {
 	return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 174d938..62d61e9 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -703,7 +703,7 @@
 }
 #endif
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_ACPI
 /**
  * Mark ACPI NVS memory region, so that we can save/restore it during
  * hibernation and the subsequent resume.
@@ -716,7 +716,7 @@
 		struct e820entry *ei = &e820.map[i];
 
 		if (ei->type == E820_NVS)
-			suspend_nvs_register(ei->addr, ei->size);
+			acpi_nvs_register(ei->addr, ei->size);
 	}
 
 	return 0;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c0dd5b6..a62c201 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -290,14 +290,15 @@
 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
 {
 	int count;
-	u64 tsc = 0;
+	u64 tsc = 0, prev_tsc = 0;
 
 	for (count = 0; count < 50000; count++) {
 		if (!pit_verify_msb(val))
 			break;
+		prev_tsc = tsc;
 		tsc = get_cycles();
 	}
-	*deltap = get_cycles() - tsc;
+	*deltap = get_cycles() - prev_tsc;
 	*tscp = tsc;
 
 	/*
@@ -311,9 +312,9 @@
  * How many MSB values do we want to see? We aim for
  * a maximum error rate of 500ppm (in practice the
  * real error is much smaller), but refuse to spend
- * more than 25ms on it.
+ * more than 50ms on it.
  */
-#define MAX_QUICK_PIT_MS 25
+#define MAX_QUICK_PIT_MS 50
 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
 
 static unsigned long quick_pit_calibrate(void)
@@ -383,15 +384,12 @@
 	 *
 	 * As a result, we can depend on there not being
 	 * any odd delays anywhere, and the TSC reads are
-	 * reliable (within the error). We also adjust the
-	 * delta to the middle of the error bars, just
-	 * because it looks nicer.
+	 * reliable (within the error).
 	 *
 	 * kHz = ticks / time-in-seconds / 1000;
 	 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
 	 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
 	 */
-	delta += (long)(d2 - d1)/2;
 	delta *= PIT_TICK_RATE;
 	do_div(delta, i*256*1000);
 	printk("Fast TSC calibration using PIT\n");
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 5b83c51..8191379 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -219,7 +219,9 @@
 ac: LODS/B AL,Xb
 ad: LODS/W/D/Q rAX,Xv
 ae: SCAS/B AL,Yb
-af: SCAS/W/D/Q rAX,Xv
+# Note: The May 2011 Intel manual shows Xv for the second parameter of the
+# next instruction but Yv is correct
+af: SCAS/W/D/Q rAX,Yv
 # 0xb0 - 0xbf
 b0: MOV AL/R8L,Ib
 b1: MOV CL/R9L,Ib
@@ -729,8 +731,8 @@
 df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
 f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2)
 f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2)
-f3: ANDN Gy,By,Ey (v)
-f4: Grp17 (1A)
+f2: ANDN Gy,By,Ey (v)
+f3: Grp17 (1A)
 f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
 f6: MULX By,Gy,rDX,Ey (F2),(v)
 f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index fd61b3f..1c1c4f4 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -109,6 +109,8 @@
 	if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
 		return;
 	pxm = pa->proximity_domain_lo;
+	if (acpi_srat_revision >= 2)
+		pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
 	node = setup_node(pxm);
 	if (node < 0) {
 		printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -160,6 +162,8 @@
 	start = ma->base_address;
 	end = start + ma->length;
 	pxm = ma->proximity_domain;
+	if (acpi_srat_revision <= 1)
+		pxm &= 0xff;
 	node = setup_node(pxm);
 	if (node < 0) {
 		printk(KERN_ERR "SRAT: Too many proximity domains.\n");
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 5b55219..9be4cff 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -157,13 +157,14 @@
  * clear of the Timeout bit (as well) will free the resource. No reply will
  * be sent (the hardware will only do one reply per message).
  */
-static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
+static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
+						int do_acknowledge)
 {
 	unsigned long dw;
 	struct bau_pq_entry *msg;
 
 	msg = mdp->msg;
-	if (!msg->canceled) {
+	if (!msg->canceled && do_acknowledge) {
 		dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
 		write_mmr_sw_ack(dw);
 	}
@@ -212,8 +213,8 @@
 			if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
 				unsigned long mr;
 				/*
-				 * is the resource timed out?
-				 * make everyone ignore the cancelled message.
+				 * Is the resource timed out?
+				 * Make everyone ignore the cancelled message.
 				 */
 				msg2->canceled = 1;
 				stat->d_canceled++;
@@ -231,8 +232,8 @@
  * Do all the things a cpu should do for a TLB shootdown message.
  * Other cpu's may come here at the same time for this message.
  */
-static void bau_process_message(struct msg_desc *mdp,
-					struct bau_control *bcp)
+static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
+						int do_acknowledge)
 {
 	short socket_ack_count = 0;
 	short *sp;
@@ -284,8 +285,9 @@
 		if (msg_ack_count == bcp->cpus_in_uvhub) {
 			/*
 			 * All cpus in uvhub saw it; reply
+			 * (unless we are in the UV2 workaround)
 			 */
-			reply_to_message(mdp, bcp);
+			reply_to_message(mdp, bcp, do_acknowledge);
 		}
 	}
 
@@ -491,27 +493,138 @@
 /*
  * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
  */
-static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
+static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
 {
 	unsigned long descriptor_status;
 	unsigned long descriptor_status2;
 
 	descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
-	descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
+	descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
 	descriptor_status = (descriptor_status << 1) | descriptor_status2;
 	return descriptor_status;
 }
 
+/*
+ * Return whether the status of the descriptor that is normally used for this
+ * cpu (the one indexed by its hub-relative cpu number) is busy.
+ * The status of the original 32 descriptors is always reflected in the 64
+ * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
+ * The bit provided by the activation_status_2 register is irrelevant to
+ * the status if it is only being tested for busy or not busy.
+ */
+int normal_busy(struct bau_control *bcp)
+{
+	int cpu = bcp->uvhub_cpu;
+	int mmr_offset;
+	int right_shift;
+
+	mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
+	right_shift = cpu * UV_ACT_STATUS_SIZE;
+	return (((((read_lmmr(mmr_offset) >> right_shift) &
+				UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
+}
+
+/*
+ * Entered when a bau descriptor has gone into a permanent busy wait because
+ * of a hardware bug.
+ * Workaround the bug.
+ */
+int handle_uv2_busy(struct bau_control *bcp)
+{
+	int busy_one = bcp->using_desc;
+	int normal = bcp->uvhub_cpu;
+	int selected = -1;
+	int i;
+	unsigned long descriptor_status;
+	unsigned long status;
+	int mmr_offset;
+	struct bau_desc *bau_desc_old;
+	struct bau_desc *bau_desc_new;
+	struct bau_control *hmaster = bcp->uvhub_master;
+	struct ptc_stats *stat = bcp->statp;
+	cycles_t ttm;
+
+	stat->s_uv2_wars++;
+	spin_lock(&hmaster->uvhub_lock);
+	/* try for the original first */
+	if (busy_one != normal) {
+		if (!normal_busy(bcp))
+			selected = normal;
+	}
+	if (selected < 0) {
+		/* can't use the normal, select an alternate */
+		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
+		descriptor_status = read_lmmr(mmr_offset);
+
+		/* scan available descriptors 32-63 */
+		for (i = 0; i < UV_CPUS_PER_AS; i++) {
+			if ((hmaster->inuse_map & (1 << i)) == 0) {
+				status = ((descriptor_status >>
+						(i * UV_ACT_STATUS_SIZE)) &
+						UV_ACT_STATUS_MASK) << 1;
+				if (status != UV2H_DESC_BUSY) {
+					selected = i + UV_CPUS_PER_AS;
+					break;
+				}
+			}
+		}
+	}
+
+	if (busy_one != normal)
+		/* mark the busy alternate as not in-use */
+		hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
+
+	if (selected >= 0) {
+		/* switch to the selected descriptor */
+		if (selected != normal) {
+			/* set the selected alternate as in-use */
+			hmaster->inuse_map |=
+					(1 << (selected - UV_CPUS_PER_AS));
+			if (selected > stat->s_uv2_wars_hw)
+				stat->s_uv2_wars_hw = selected;
+		}
+		bau_desc_old = bcp->descriptor_base;
+		bau_desc_old += (ITEMS_PER_DESC * busy_one);
+		bcp->using_desc = selected;
+		bau_desc_new = bcp->descriptor_base;
+		bau_desc_new += (ITEMS_PER_DESC * selected);
+		*bau_desc_new = *bau_desc_old;
+	} else {
+		/*
+		 * All are busy. Wait for the normal one for this cpu to
+		 * free up.
+		 */
+		stat->s_uv2_war_waits++;
+		spin_unlock(&hmaster->uvhub_lock);
+		ttm = get_cycles();
+		do {
+			cpu_relax();
+		} while (normal_busy(bcp));
+		spin_lock(&hmaster->uvhub_lock);
+		/* switch to the original descriptor */
+		bcp->using_desc = normal;
+		bau_desc_old = bcp->descriptor_base;
+		bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
+		bcp->using_desc = (ITEMS_PER_DESC * normal);
+		bau_desc_new = bcp->descriptor_base;
+		bau_desc_new += (ITEMS_PER_DESC * normal);
+		*bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
+	}
+	spin_unlock(&hmaster->uvhub_lock);
+	return FLUSH_RETRY_BUSYBUG;
+}
+
 static int uv2_wait_completion(struct bau_desc *bau_desc,
 				unsigned long mmr_offset, int right_shift,
 				struct bau_control *bcp, long try)
 {
 	unsigned long descriptor_stat;
 	cycles_t ttm;
-	int cpu = bcp->uvhub_cpu;
+	int desc = bcp->using_desc;
+	long busy_reps = 0;
 	struct ptc_stats *stat = bcp->statp;
 
-	descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+	descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
 
 	/* spin on the status MMR, waiting for it to go idle */
 	while (descriptor_stat != UV2H_DESC_IDLE) {
@@ -522,32 +635,35 @@
 		 * our message and its state will stay IDLE.
 		 */
 		if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
-		    (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
 		    (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
 			stat->s_stimeout++;
 			return FLUSH_GIVEUP;
+		} else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) {
+			stat->s_strongnacks++;
+			bcp->conseccompletes = 0;
+			return FLUSH_GIVEUP;
 		} else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
 			stat->s_dtimeout++;
-			ttm = get_cycles();
-			/*
-			 * Our retries may be blocked by all destination
-			 * swack resources being consumed, and a timeout
-			 * pending.  In that case hardware returns the
-			 * ERROR that looks like a destination timeout.
-			 */
-			if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
-				bcp->conseccompletes = 0;
-				return FLUSH_RETRY_PLUGGED;
-			}
 			bcp->conseccompletes = 0;
 			return FLUSH_RETRY_TIMEOUT;
 		} else {
+			busy_reps++;
+			if (busy_reps > 1000000) {
+				/* not to hammer on the clock */
+				busy_reps = 0;
+				ttm = get_cycles();
+				if ((ttm - bcp->send_message) >
+					(bcp->clocks_per_100_usec)) {
+					return handle_uv2_busy(bcp);
+				}
+			}
 			/*
 			 * descriptor_stat is still BUSY
 			 */
 			cpu_relax();
 		}
-		descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+		descriptor_stat = uv2_read_status(mmr_offset, right_shift,
+									desc);
 	}
 	bcp->conseccompletes++;
 	return FLUSH_COMPLETE;
@@ -563,17 +679,17 @@
 {
 	int right_shift;
 	unsigned long mmr_offset;
-	int cpu = bcp->uvhub_cpu;
+	int desc = bcp->using_desc;
 
-	if (cpu < UV_CPUS_PER_AS) {
+	if (desc < UV_CPUS_PER_AS) {
 		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
-		right_shift = cpu * UV_ACT_STATUS_SIZE;
+		right_shift = desc * UV_ACT_STATUS_SIZE;
 	} else {
 		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
-		right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
+		right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
 	}
 
-	if (is_uv1_hub())
+	if (bcp->uvhub_version == 1)
 		return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
 								bcp, try);
 	else
@@ -752,19 +868,22 @@
  * Returns 1 if it gives up entirely and the original cpu mask is to be
  * returned to the kernel.
  */
-int uv_flush_send_and_wait(struct bau_desc *bau_desc,
-			struct cpumask *flush_mask, struct bau_control *bcp)
+int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
 {
 	int seq_number = 0;
 	int completion_stat = 0;
+	int uv1 = 0;
 	long try = 0;
 	unsigned long index;
 	cycles_t time1;
 	cycles_t time2;
 	struct ptc_stats *stat = bcp->statp;
 	struct bau_control *hmaster = bcp->uvhub_master;
+	struct uv1_bau_msg_header *uv1_hdr = NULL;
+	struct uv2_bau_msg_header *uv2_hdr = NULL;
+	struct bau_desc *bau_desc;
 
-	if (is_uv1_hub())
+	if (bcp->uvhub_version == 1)
 		uv1_throttle(hmaster, stat);
 
 	while (hmaster->uvhub_quiesce)
@@ -772,22 +891,39 @@
 
 	time1 = get_cycles();
 	do {
-		if (try == 0) {
-			bau_desc->header.msg_type = MSG_REGULAR;
+		bau_desc = bcp->descriptor_base;
+		bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
+		if (bcp->uvhub_version == 1) {
+			uv1 = 1;
+			uv1_hdr = &bau_desc->header.uv1_hdr;
+		} else
+			uv2_hdr = &bau_desc->header.uv2_hdr;
+		if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
+			if (uv1)
+				uv1_hdr->msg_type = MSG_REGULAR;
+			else
+				uv2_hdr->msg_type = MSG_REGULAR;
 			seq_number = bcp->message_number++;
 		} else {
-			bau_desc->header.msg_type = MSG_RETRY;
+			if (uv1)
+				uv1_hdr->msg_type = MSG_RETRY;
+			else
+				uv2_hdr->msg_type = MSG_RETRY;
 			stat->s_retry_messages++;
 		}
 
-		bau_desc->header.sequence = seq_number;
-		index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
+		if (uv1)
+			uv1_hdr->sequence = seq_number;
+		else
+			uv2_hdr->sequence = seq_number;
+		index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
 		bcp->send_message = get_cycles();
 
 		write_mmr_activation(index);
 
 		try++;
 		completion_stat = wait_completion(bau_desc, bcp, try);
+		/* UV2: wait_completion() may change the bcp->using_desc */
 
 		handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
 
@@ -798,6 +934,7 @@
 		}
 		cpu_relax();
 	} while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
+		 (completion_stat == FLUSH_RETRY_BUSYBUG) ||
 		 (completion_stat == FLUSH_RETRY_TIMEOUT));
 
 	time2 = get_cycles();
@@ -812,6 +949,7 @@
 	record_send_stats(time1, time2, bcp, stat, completion_stat, try);
 
 	if (completion_stat == FLUSH_GIVEUP)
+		/* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
 		return 1;
 	return 0;
 }
@@ -967,7 +1105,7 @@
 		stat->s_ntargself++;
 
 	bau_desc = bcp->descriptor_base;
-	bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
+	bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
 	bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
 	if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
 		return NULL;
@@ -980,13 +1118,86 @@
 	 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
 	 * or 1 if it gave up and the original cpumask should be returned.
 	 */
-	if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
+	if (!uv_flush_send_and_wait(flush_mask, bcp))
 		return NULL;
 	else
 		return cpumask;
 }
 
 /*
+ * Search the message queue for any 'other' message with the same software
+ * acknowledge resource bit vector.
+ */
+struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
+			struct bau_control *bcp, unsigned char swack_vec)
+{
+	struct bau_pq_entry *msg_next = msg + 1;
+
+	if (msg_next > bcp->queue_last)
+		msg_next = bcp->queue_first;
+	while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
+		if (msg_next->swack_vec == swack_vec)
+			return msg_next;
+		msg_next++;
+		if (msg_next > bcp->queue_last)
+			msg_next = bcp->queue_first;
+	}
+	return NULL;
+}
+
+/*
+ * UV2 needs to work around a bug in which an arriving message has not
+ * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
+ * Such a message must be ignored.
+ */
+void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
+{
+	unsigned long mmr_image;
+	unsigned char swack_vec;
+	struct bau_pq_entry *msg = mdp->msg;
+	struct bau_pq_entry *other_msg;
+
+	mmr_image = read_mmr_sw_ack();
+	swack_vec = msg->swack_vec;
+
+	if ((swack_vec & mmr_image) == 0) {
+		/*
+		 * This message was assigned a swack resource, but no
+		 * reserved acknowlegment is pending.
+		 * The bug has prevented this message from setting the MMR.
+		 * And no other message has used the same sw_ack resource.
+		 * Do the requested shootdown but do not reply to the msg.
+		 * (the 0 means make no acknowledge)
+		 */
+		bau_process_message(mdp, bcp, 0);
+		return;
+	}
+
+	/*
+	 * Some message has set the MMR 'pending' bit; it might have been
+	 * another message.  Look for that message.
+	 */
+	other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
+	if (other_msg) {
+		/* There is another.  Do not ack the current one. */
+		bau_process_message(mdp, bcp, 0);
+		/*
+		 * Let the natural processing of that message acknowledge
+		 * it. Don't get the processing of sw_ack's out of order.
+		 */
+		return;
+	}
+
+	/*
+	 * There is no other message using this sw_ack, so it is safe to
+	 * acknowledge it.
+	 */
+	bau_process_message(mdp, bcp, 1);
+
+	return;
+}
+
+/*
  * The BAU message interrupt comes here. (registered by set_intr_gate)
  * See entry_64.S
  *
@@ -1009,6 +1220,7 @@
 	struct ptc_stats *stat;
 	struct msg_desc msgdesc;
 
+	ack_APIC_irq();
 	time_start = get_cycles();
 
 	bcp = &per_cpu(bau_control, smp_processor_id());
@@ -1022,9 +1234,11 @@
 		count++;
 
 		msgdesc.msg_slot = msg - msgdesc.queue_first;
-		msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
 		msgdesc.msg = msg;
-		bau_process_message(&msgdesc, bcp);
+		if (bcp->uvhub_version == 2)
+			process_uv2_message(&msgdesc, bcp);
+		else
+			bau_process_message(&msgdesc, bcp, 1);
 
 		msg++;
 		if (msg > msgdesc.queue_last)
@@ -1036,8 +1250,6 @@
 		stat->d_nomsg++;
 	else if (count > 1)
 		stat->d_multmsg++;
-
-	ack_APIC_irq();
 }
 
 /*
@@ -1083,7 +1295,7 @@
 		 */
 		mmr_image |= (1L << SOFTACK_MSHIFT);
 		if (is_uv2_hub()) {
-			mmr_image |= (1L << UV2_LEG_SHFT);
+			mmr_image &= ~(1L << UV2_LEG_SHFT);
 			mmr_image |= (1L << UV2_EXT_SHFT);
 		}
 		write_mmr_misc_control(pnode, mmr_image);
@@ -1136,13 +1348,13 @@
 		seq_printf(file,
 			"remotehub numuvhubs numuvhubs16 numuvhubs8 ");
 		seq_printf(file,
-			"numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
+		    "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok ");
 		seq_printf(file,
 			"resetp resett giveup sto bz throt swack recv rtime ");
 		seq_printf(file,
 			"all one mult none retry canc nocan reset rcan ");
 		seq_printf(file,
-			"disable enable\n");
+			"disable enable wars warshw warwaits\n");
 	}
 	if (cpu < num_possible_cpus() && cpu_online(cpu)) {
 		stat = &per_cpu(ptcstats, cpu);
@@ -1154,10 +1366,10 @@
 			   stat->s_ntargremotes, stat->s_ntargcpu,
 			   stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
 			   stat->s_ntarguvhub, stat->s_ntarguvhub16);
-		seq_printf(file, "%ld %ld %ld %ld %ld ",
+		seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
 			   stat->s_ntarguvhub8, stat->s_ntarguvhub4,
 			   stat->s_ntarguvhub2, stat->s_ntarguvhub1,
-			   stat->s_dtimeout);
+			   stat->s_dtimeout, stat->s_strongnacks);
 		seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
 			   stat->s_retry_messages, stat->s_retriesok,
 			   stat->s_resets_plug, stat->s_resets_timeout,
@@ -1173,8 +1385,10 @@
 			   stat->d_nomsg, stat->d_retries, stat->d_canceled,
 			   stat->d_nocanceled, stat->d_resets,
 			   stat->d_rcanceled);
-		seq_printf(file, "%ld %ld\n",
-			stat->s_bau_disabled, stat->s_bau_reenabled);
+		seq_printf(file, "%ld %ld %ld %ld %ld\n",
+			stat->s_bau_disabled, stat->s_bau_reenabled,
+			stat->s_uv2_wars, stat->s_uv2_wars_hw,
+			stat->s_uv2_war_waits);
 	}
 	return 0;
 }
@@ -1432,12 +1646,15 @@
 {
 	int i;
 	int cpu;
+	int uv1 = 0;
 	unsigned long gpa;
 	unsigned long m;
 	unsigned long n;
 	size_t dsize;
 	struct bau_desc *bau_desc;
 	struct bau_desc *bd2;
+	struct uv1_bau_msg_header *uv1_hdr;
+	struct uv2_bau_msg_header *uv2_hdr;
 	struct bau_control *bcp;
 
 	/*
@@ -1451,6 +1668,8 @@
 	gpa = uv_gpa(bau_desc);
 	n = uv_gpa_to_gnode(gpa);
 	m = uv_gpa_to_offset(gpa);
+	if (is_uv1_hub())
+		uv1 = 1;
 
 	/* the 14-bit pnode */
 	write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
@@ -1461,21 +1680,33 @@
 	 */
 	for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
 		memset(bd2, 0, sizeof(struct bau_desc));
-		bd2->header.swack_flag =	1;
-		/*
-		 * The base_dest_nasid set in the message header is the nasid
-		 * of the first uvhub in the partition. The bit map will
-		 * indicate destination pnode numbers relative to that base.
-		 * They may not be consecutive if nasid striding is being used.
-		 */
-		bd2->header.base_dest_nasid =	UV_PNODE_TO_NASID(base_pnode);
-		bd2->header.dest_subnodeid =	UV_LB_SUBNODEID;
-		bd2->header.command =		UV_NET_ENDPOINT_INTD;
-		bd2->header.int_both =		1;
-		/*
-		 * all others need to be set to zero:
-		 *   fairness chaining multilevel count replied_to
-		 */
+		if (uv1) {
+			uv1_hdr = &bd2->header.uv1_hdr;
+			uv1_hdr->swack_flag =	1;
+			/*
+			 * The base_dest_nasid set in the message header
+			 * is the nasid of the first uvhub in the partition.
+			 * The bit map will indicate destination pnode numbers
+			 * relative to that base. They may not be consecutive
+			 * if nasid striding is being used.
+			 */
+			uv1_hdr->base_dest_nasid =
+						UV_PNODE_TO_NASID(base_pnode);
+			uv1_hdr->dest_subnodeid =	UV_LB_SUBNODEID;
+			uv1_hdr->command =		UV_NET_ENDPOINT_INTD;
+			uv1_hdr->int_both =		1;
+			/*
+			 * all others need to be set to zero:
+			 *   fairness chaining multilevel count replied_to
+			 */
+		} else {
+			uv2_hdr = &bd2->header.uv2_hdr;
+			uv2_hdr->swack_flag =	1;
+			uv2_hdr->base_dest_nasid =
+						UV_PNODE_TO_NASID(base_pnode);
+			uv2_hdr->dest_subnodeid =	UV_LB_SUBNODEID;
+			uv2_hdr->command =		UV_NET_ENDPOINT_INTD;
+		}
 	}
 	for_each_present_cpu(cpu) {
 		if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
@@ -1531,6 +1762,7 @@
 	write_mmr_payload_first(pnode, pn_first);
 	write_mmr_payload_tail(pnode, first);
 	write_mmr_payload_last(pnode, last);
+	write_gmmr_sw_ack(pnode, 0xffffUL);
 
 	/* in effect, all msg_type's are set to MSG_NOOP */
 	memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
@@ -1584,14 +1816,14 @@
 		ts_ns = base * mult1 * mult2;
 		ret = ts_ns / 1000;
 	} else {
-		/* 4 bits  0/1 for 10/80us, 3 bits of multiplier */
-		mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+		/* 4 bits  0/1 for 10/80us base, 3 bits of multiplier */
+		mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
 		mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
 		if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
-			mult1 = 80;
+			base = 80;
 		else
-			mult1 = 10;
-		base = mmr_image & UV2_ACK_MASK;
+			base = 10;
+		mult1 = mmr_image & UV2_ACK_MASK;
 		ret = mult1 * base;
 	}
 	return ret;
@@ -1618,6 +1850,7 @@
 		bcp->cong_response_us		= congested_respns_us;
 		bcp->cong_reps			= congested_reps;
 		bcp->cong_period		= congested_period;
+		bcp->clocks_per_100_usec =	usec_2_cycles(100);
 	}
 }
 
@@ -1728,8 +1961,17 @@
 		bcp->cpus_in_socket = sdp->num_cpus;
 		bcp->socket_master = *smasterp;
 		bcp->uvhub = bdp->uvhub;
+		if (is_uv1_hub())
+			bcp->uvhub_version = 1;
+		else if (is_uv2_hub())
+			bcp->uvhub_version = 2;
+		else {
+			printk(KERN_EMERG "uvhub version not 1 or 2\n");
+			return 1;
+		}
 		bcp->uvhub_master = *hmasterp;
 		bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+		bcp->using_desc = bcp->uvhub_cpu;
 		if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
 			printk(KERN_EMERG "%d cpus per uvhub invalid\n",
 				bcp->uvhub_cpu);
@@ -1845,6 +2087,8 @@
 			uv_base_pnode = uv_blade_to_pnode(uvhub);
 	}
 
+	enable_timeouts();
+
 	if (init_per_cpu(nuvhubs, uv_base_pnode)) {
 		nobau = 1;
 		return 0;
@@ -1855,7 +2099,6 @@
 		if (uv_blade_nr_possible_cpus(uvhub))
 			init_uvhub(uvhub, vector, uv_base_pnode);
 
-	enable_timeouts();
 	alloc_intr_gate(vector, uv_bau_message_intr1);
 
 	for_each_possible_blade(uvhub) {
@@ -1867,7 +2110,8 @@
 			val = 1L << 63;
 			write_gmmr_activation(pnode, val);
 			mmr = 1; /* should be 1 to broadcast to both sockets */
-			write_mmr_data_broadcast(pnode, mmr);
+			if (!is_uv1_hub())
+				write_mmr_data_broadcast(pnode, mmr);
 		}
 	}
 
diff --git a/arch/x86/um/shared/sysdep/ptrace.h b/arch/x86/um/shared/sysdep/ptrace.h
index 5ef9344..2bbe1ec2 100644
--- a/arch/x86/um/shared/sysdep/ptrace.h
+++ b/arch/x86/um/shared/sysdep/ptrace.h
@@ -1,3 +1,6 @@
+#ifndef __SYSDEP_X86_PTRACE_H
+#define __SYSDEP_X86_PTRACE_H
+
 #ifdef __i386__
 #include "ptrace_32.h"
 #else
@@ -8,3 +11,5 @@
 {
 	return UPT_SYSCALL_RET(regs);
 }
+
+#endif /* __SYSDEP_X86_PTRACE_H */
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ecb26b4..c07f44f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -20,11 +20,12 @@
 # All the builtin files are in the "acpi." module_param namespace.
 acpi-y				+= osl.o utils.o reboot.o
 acpi-y				+= atomicio.o
+acpi-y				+= nvs.o
 
 # sleep related files
 acpi-y				+= wakeup.o
 acpi-y				+= sleep.o
-acpi-$(CONFIG_ACPI_SLEEP)	+= proc.o nvs.o
+acpi-$(CONFIG_ACPI_SLEEP)	+= proc.o
 
 
 #
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 301bd2d..0ca208b 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -8,41 +8,151 @@
 # use acpi.o to put all files here into acpi.o modparam namespace
 obj-y	+= acpi.o
 
-acpi-y := dsfield.o   dsmthdat.o  dsopcode.o  dswexec.o  dswscope.o \
-	 dsmethod.o  dsobject.o  dsutils.o   dswload.o  dswstate.o \
-	 dsinit.o dsargs.o dscontrol.o dswload2.o
+acpi-y :=		\
+	dsargs.o	\
+	dscontrol.o	\
+	dsfield.o	\
+	dsinit.o	\
+	dsmethod.o	\
+	dsmthdat.o	\
+	dsobject.o	\
+	dsopcode.o	\
+	dsutils.o	\
+	dswexec.o	\
+	dswload.o	\
+	dswload2.o	\
+	dswscope.o	\
+	dswstate.o
 
-acpi-y += evevent.o  evregion.o  evsci.o    evxfevnt.o \
-	 evmisc.o   evrgnini.o  evxface.o  evxfregn.o \
-	 evgpe.o    evgpeblk.o evgpeinit.o  evgpeutil.o evxfgpe.o evglock.o
+acpi-y +=		\
+	evevent.o	\
+	evgpe.o		\
+	evgpeblk.o	\
+	evgpeinit.o	\
+	evgpeutil.o	\
+	evglock.o	\
+	evmisc.o	\
+	evregion.o	\
+	evrgnini.o	\
+	evsci.o		\
+	evxface.o	\
+	evxfevnt.o	\
+	evxfgpe.o	\
+	evxfregn.o
 
-acpi-y += exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
-	 exconvrt.o  exfldio.o  exoparg1.o  exprep.o    exresop.o   exsystem.o\
-	 excreate.o  exmisc.o   exoparg2.o  exregion.o  exstore.o   exutils.o \
-	 exdump.o    exmutex.o  exoparg3.o  exresnte.o  exstoren.o  exdebug.o
+acpi-y +=		\
+	exconfig.o	\
+	exconvrt.o	\
+	excreate.o	\
+	exdebug.o	\
+	exdump.o	\
+	exfield.o	\
+	exfldio.o	\
+	exmutex.o	\
+	exnames.o	\
+	exoparg1.o	\
+	exoparg2.o	\
+	exoparg3.o	\
+	exoparg6.o	\
+	exprep.o	\
+	exmisc.o	\
+	exregion.o	\
+	exresnte.o	\
+	exresolv.o	\
+	exresop.o	\
+	exstore.o	\
+	exstoren.o	\
+	exstorob.o	\
+	exsystem.o	\
+	exutils.o
 
-acpi-y += hwacpi.o  hwgpe.o  hwregs.o  hwsleep.o hwxface.o hwvalid.o hwpci.o
+acpi-y +=		\
+	hwacpi.o	\
+	hwgpe.o		\
+	hwpci.o		\
+	hwregs.o	\
+	hwsleep.o	\
+	hwvalid.o	\
+	hwxface.o
 
 acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
 
-acpi-y += nsaccess.o  nsload.o    nssearch.o  nsxfeval.o \
-	 nsalloc.o   nseval.o    nsnames.o   nsutils.o   nsxfname.o \
-	 nsdump.o    nsinit.o    nsobject.o  nswalk.o    nsxfobj.o  \
-	 nsparse.o   nspredef.o  nsrepair.o  nsrepair2.o
+acpi-y +=		\
+	nsaccess.o	\
+	nsalloc.o	\
+	nsdump.o	\
+	nseval.o	\
+	nsinit.o	\
+	nsload.o	\
+	nsnames.o	\
+	nsobject.o	\
+	nsparse.o	\
+	nspredef.o	\
+	nsrepair.o	\
+	nsrepair2.o	\
+	nssearch.o	\
+	nsutils.o	\
+	nswalk.o	\
+	nsxfeval.o	\
+	nsxfname.o	\
+	nsxfobj.o
 
 acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
 
-acpi-y += psargs.o    psparse.o  psloop.o pstree.o   pswalk.o  \
-	 psopcode.o  psscope.o  psutils.o  psxface.o
+acpi-y +=		\
+	psargs.o	\
+	psloop.o	\
+	psopcode.o	\
+	psparse.o	\
+	psscope.o	\
+	pstree.o	\
+	psutils.o	\
+	pswalk.o	\
+	psxface.o
 
-acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
-	 rscalc.o  rsirq.o  rsmemory.o  rsutils.o
+acpi-y +=		\
+	rsaddr.o	\
+	rscalc.o	\
+	rscreate.o	\
+	rsinfo.o	\
+	rsio.o		\
+	rsirq.o		\
+	rslist.o	\
+	rsmemory.o	\
+	rsmisc.o	\
+	rsserial.o	\
+	rsutils.o	\
+	rsxface.o
 
 acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
 
-acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
+acpi-y +=		\
+	tbfadt.o	\
+	tbfind.o	\
+	tbinstal.o	\
+	tbutils.o	\
+	tbxface.o	\
+	tbxfroot.o
 
-acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
-		utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
-		utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
-		utosi.o utxferror.o utdecode.o
+acpi-y +=		\
+	utaddress.o	\
+	utalloc.o	\
+	utcopy.o	\
+	utdebug.o	\
+	utdecode.o	\
+	utdelete.o	\
+	uteval.o	\
+	utglobal.o	\
+	utids.o		\
+	utinit.o	\
+	utlock.o	\
+	utmath.o	\
+	utmisc.o	\
+	utmutex.o	\
+	utobject.o	\
+	utosi.o		\
+	utresrc.o	\
+	utstate.o	\
+	utxface.o	\
+	utxferror.o	\
+	utxfmutex.o
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index e0ba17f..a44bd42 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index f895a24..1f30af6 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -123,6 +123,10 @@
 
 #define ACPI_MAX_SLEEP                  2000	/* Two seconds */
 
+/* Address Range lists are per-space_id (Memory and I/O only) */
+
+#define ACPI_ADDRESS_RANGE_MAX          2
+
 /******************************************************************************
  *
  * ACPI Specification constants (Do not change unless the specification changes)
@@ -202,9 +206,10 @@
 #define ACPI_RSDP_CHECKSUM_LENGTH       20
 #define ACPI_RSDP_XCHECKSUM_LENGTH      36
 
-/* SMBus and IPMI bidirectional buffer size */
+/* SMBus, GSBus and IPMI bidirectional buffer size */
 
 #define ACPI_SMBUS_BUFFER_SIZE          34
+#define ACPI_GSBUS_BUFFER_SIZE          34
 #define ACPI_IPMI_BUFFER_SIZE           66
 
 /* _sx_d and _sx_w control methods */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index eb0b1f8..deaa819 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 2d1b7ff..5935ba6 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index bea3b48..c53caa5 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -162,6 +162,7 @@
 
 acpi_status
 acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+			       union acpi_operand_object *field_obj,
 			       u32 function,
 			       u32 region_offset, u32 bit_width, u64 *value);
 
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index e6652d7..2853f76 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -140,8 +140,19 @@
 acpi_name acpi_gbl_trace_method_name;
 u8 acpi_gbl_system_awake_and_running;
 
+/*
+ * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
+ * that the ACPI hardware is no longer required. A flag in the FADT indicates
+ * a reduced HW machine, and that flag is duplicated here for convenience.
+ */
+u8 acpi_gbl_reduced_hardware;
+
 #endif
 
+/* Do not disassemble buffers to resource descriptors */
+
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
+
 /*****************************************************************************
  *
  * Debug support
@@ -207,7 +218,7 @@
 
 /*****************************************************************************
  *
- * Mutual exlusion within ACPICA subsystem
+ * Mutual exclusion within ACPICA subsystem
  *
  ****************************************************************************/
 
@@ -295,6 +306,8 @@
 ACPI_EXTERN u8 acpi_gbl_events_initialized;
 ACPI_EXTERN u8 acpi_gbl_osi_data;
 ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
+ACPI_EXTERN struct acpi_address_range
+    *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
 
 #ifndef DEFINE_ACPI_GLOBALS
 
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index e7213be..677793e 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 3731e1c..eb30863 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -468,6 +468,8 @@
 
 void acpi_ex_integer_to_string(char *dest, u64 value);
 
+u8 acpi_is_valid_space_id(u8 space_id);
+
 /*
  * exregion - default op_region handlers
  */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 5552125..3f24068 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@
 
 /* Total number of aml opcodes defined */
 
-#define AML_NUM_OPCODES                 0x7F
+#define AML_NUM_OPCODES                 0x81
 
 /* Forward declarations */
 
@@ -249,12 +249,16 @@
 	struct acpi_namespace_node *field_node;
 	struct acpi_namespace_node *register_node;
 	struct acpi_namespace_node *data_register_node;
+	struct acpi_namespace_node *connection_node;
+	u8 *resource_buffer;
 	u32 bank_value;
 	u32 field_bit_position;
 	u32 field_bit_length;
+	u16 resource_length;
 	u8 field_flags;
 	u8 attribute;
 	u8 field_type;
+	u8 access_length;
 };
 
 typedef
@@ -315,7 +319,8 @@
 
 /*
  * Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
- * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT
+ * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT,
+ * ACPI_PTYPE2_FIX_VAR
  */
 struct acpi_package_info {
 	u8 type;
@@ -625,6 +630,15 @@
 
 typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
 
+/* Address Range info block */
+
+struct acpi_address_range {
+	struct acpi_address_range *next;
+	struct acpi_namespace_node *region_node;
+	acpi_physical_address start_address;
+	acpi_physical_address end_address;
+};
+
 /*****************************************************************************
  *
  * Parser typedefs and structs
@@ -951,7 +965,7 @@
 #define ACPI_RESOURCE_NAME_END_DEPENDENT        0x38
 #define ACPI_RESOURCE_NAME_IO                   0x40
 #define ACPI_RESOURCE_NAME_FIXED_IO             0x48
-#define ACPI_RESOURCE_NAME_RESERVED_S1          0x50
+#define ACPI_RESOURCE_NAME_FIXED_DMA            0x50
 #define ACPI_RESOURCE_NAME_RESERVED_S2          0x58
 #define ACPI_RESOURCE_NAME_RESERVED_S3          0x60
 #define ACPI_RESOURCE_NAME_RESERVED_S4          0x68
@@ -973,7 +987,9 @@
 #define ACPI_RESOURCE_NAME_EXTENDED_IRQ         0x89
 #define ACPI_RESOURCE_NAME_ADDRESS64            0x8A
 #define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64   0x8B
-#define ACPI_RESOURCE_NAME_LARGE_MAX            0x8B
+#define ACPI_RESOURCE_NAME_GPIO                 0x8C
+#define ACPI_RESOURCE_NAME_SERIAL_BUS           0x8E
+#define ACPI_RESOURCE_NAME_LARGE_MAX            0x8E
 
 /*****************************************************************************
  *
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index b7491ee..ef338a9 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 79a598c..2c9e0f0 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 1055769..c065078 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@
 	u32                             base_byte_offset;   /* Byte offset within containing object */\
 	u32                             value;              /* Value to store into the Bank or Index register */\
 	u8                              start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
+	u8                              access_length;	/* For serial regions/fields */
 
 
 struct acpi_object_field_common {	/* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
@@ -261,7 +262,9 @@
 };
 
 struct acpi_object_region_field {
-	ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj;	/* Containing op_region object */
+	ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
+	union acpi_operand_object *region_obj;	/* Containing op_region object */
+	u8 *resource_buffer;	/* resource_template for serial regions/fields */
 };
 
 struct acpi_object_bank_field {
@@ -358,6 +361,7 @@
  */
 struct acpi_object_extra {
 	ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG;	/* _REG method for this region (if any) */
+	struct acpi_namespace_node *scope_node;
 	void *region_context;	/* Region-specific data */
 	u8 *aml_start;
 	u32 aml_length;
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index bb2ccfa..9440d05 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,7 @@
 #define ARGP_CONCAT_OP                  ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_CONCAT_RES_OP              ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_COND_REF_OF_OP             ARGP_LIST2 (ARGP_SUPERNAME,  ARGP_SUPERNAME)
+#define ARGP_CONNECTFIELD_OP            ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_CONTINUE_OP                ARG_NONE
 #define ARGP_COPY_OP                    ARGP_LIST2 (ARGP_TERMARG,    ARGP_SIMPLENAME)
 #define ARGP_CREATE_BIT_FIELD_OP        ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_NAME)
@@ -164,6 +165,7 @@
 #define ARGP_RETURN_OP                  ARGP_LIST1 (ARGP_TERMARG)
 #define ARGP_REVISION_OP                ARG_NONE
 #define ARGP_SCOPE_OP                   ARGP_LIST3 (ARGP_PKGLENGTH,  ARGP_NAME,          ARGP_TERMLIST)
+#define ARGP_SERIALFIELD_OP             ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_SHIFT_LEFT_OP              ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_SHIFT_RIGHT_OP             ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_SIGNAL_OP                  ARGP_LIST1 (ARGP_SUPERNAME)
@@ -223,6 +225,7 @@
 #define ARGI_CONCAT_OP                  ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA,   ARGI_TARGETREF)
 #define ARGI_CONCAT_RES_OP              ARGI_LIST3 (ARGI_BUFFER,     ARGI_BUFFER,        ARGI_TARGETREF)
 #define ARGI_COND_REF_OF_OP             ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
+#define ARGI_CONNECTFIELD_OP            ARGI_INVALID_OPCODE
 #define ARGI_CONTINUE_OP                ARGI_INVALID_OPCODE
 #define ARGI_COPY_OP                    ARGI_LIST2 (ARGI_ANYTYPE,    ARGI_SIMPLE_TARGET)
 #define ARGI_CREATE_BIT_FIELD_OP        ARGI_LIST3 (ARGI_BUFFER,     ARGI_INTEGER,       ARGI_REFERENCE)
@@ -294,6 +297,7 @@
 #define ARGI_RETURN_OP                  ARGI_INVALID_OPCODE
 #define ARGI_REVISION_OP                ARG_NONE
 #define ARGI_SCOPE_OP                   ARGI_INVALID_OPCODE
+#define ARGI_SERIALFIELD_OP             ARGI_INVALID_OPCODE
 #define ARGI_SHIFT_LEFT_OP              ARGI_LIST3 (ARGI_INTEGER,    ARGI_INTEGER,       ARGI_TARGETREF)
 #define ARGI_SHIFT_RIGHT_OP             ARGI_LIST3 (ARGI_INTEGER,    ARGI_INTEGER,       ARGI_TARGETREF)
 #define ARGI_SIGNAL_OP                  ARGI_LIST1 (ARGI_EVENT)
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 5ea1e06..b725d78 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index c445cca..bbb34c9 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -94,6 +94,14 @@
  * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
  *      (Used for _ART, _FPS)
  *
+ * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
+ *      followed by an optional element
+ *      object type
+ *      count
+ *      object type
+ *      count = 0 (optional)
+ *      (Used for _DLM)
+ *
  *****************************************************************************/
 
 enum acpi_return_package_types {
@@ -105,7 +113,8 @@
 	ACPI_PTYPE2_PKG_COUNT = 6,
 	ACPI_PTYPE2_FIXED = 7,
 	ACPI_PTYPE2_MIN = 8,
-	ACPI_PTYPE2_REV_FIXED = 9
+	ACPI_PTYPE2_REV_FIXED = 9,
+	ACPI_PTYPE2_FIX_VAR = 10
 };
 
 #ifdef ACPI_CREATE_PREDEFINED_TABLE
@@ -154,6 +163,7 @@
 	{{"_AC8", 0, ACPI_RTYPE_INTEGER}},
 	{{"_AC9", 0, ACPI_RTYPE_INTEGER}},
 	{{"_ADR", 0, ACPI_RTYPE_INTEGER}},
+	{{"_AEI", 0, ACPI_RTYPE_BUFFER}},
 	{{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
 			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
 
@@ -229,6 +239,13 @@
 	{{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
 			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
 
+	{{"_CLS", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (3 Int) */
+	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
+
+	{{"_CPC", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Ints/Bufs) */
+	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
+	  0}},
+
 	{{"_CRS", 0, ACPI_RTYPE_BUFFER}},
 	{{"_CRT", 0, ACPI_RTYPE_INTEGER}},
 	{{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
@@ -237,12 +254,21 @@
 	{{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
 			  {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
 
+	{{"_CWS", 1, ACPI_RTYPE_INTEGER}},
 	{{"_DCK", 1, ACPI_RTYPE_INTEGER}},
 	{{"_DCS", 0, ACPI_RTYPE_INTEGER}},
 	{{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
 	{{"_DDN", 0, ACPI_RTYPE_STRING}},
+	{{"_DEP", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Refs) */
+	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
 	{{"_DGS", 0, ACPI_RTYPE_INTEGER}},
 	{{"_DIS", 0, 0}},
+
+	{{"_DLM", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
+	{{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
+	   ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
+
 	{{"_DMA", 0, ACPI_RTYPE_BUFFER}},
 	{{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
 			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
@@ -262,6 +288,7 @@
 	{{"_EJ3", 1, 0}},
 	{{"_EJ4", 1, 0}},
 	{{"_EJD", 0, ACPI_RTYPE_STRING}},
+	{{"_EVT", 1, 0}},
 	{{"_FDE", 0, ACPI_RTYPE_BUFFER}},
 	{{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
 			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
@@ -281,14 +308,17 @@
 	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
 
 	{{"_GAI", 0, ACPI_RTYPE_INTEGER}},
+	{{"_GCP", 0, ACPI_RTYPE_INTEGER}},
 	{{"_GHL", 0, ACPI_RTYPE_INTEGER}},
 	{{"_GLK", 0, ACPI_RTYPE_INTEGER}},
 	{{"_GPD", 0, ACPI_RTYPE_INTEGER}},
 	{{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
+	{{"_GRT", 0, ACPI_RTYPE_BUFFER}},
 	{{"_GSB", 0, ACPI_RTYPE_INTEGER}},
 	{{"_GTF", 0, ACPI_RTYPE_BUFFER}},
 	{{"_GTM", 0, ACPI_RTYPE_BUFFER}},
 	{{"_GTS", 1, 0}},
+	{{"_GWS", 1, ACPI_RTYPE_INTEGER}},
 	{{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
 	{{"_HOT", 0, ACPI_RTYPE_INTEGER}},
 	{{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
@@ -303,6 +333,7 @@
 	{{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
 			  {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
 
+	{{"_HRV", 0, ACPI_RTYPE_INTEGER}},
 	{{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
 	{{"_INI", 0, 0}},
 	{{"_IRC", 0, 0}},
@@ -361,6 +392,9 @@
 	{{"_PR3", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Refs) */
 	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
 
+	{{"_PRE", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Refs) */
+	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
 	{{"_PRL", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Refs) */
 	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
 
@@ -391,6 +425,7 @@
 	{{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
 			  {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
 
+	{{"_PSE", 1, 0}},
 	{{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
 			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
 
@@ -457,6 +492,7 @@
 	{{"_SLI", 0, ACPI_RTYPE_BUFFER}},
 	{{"_SPD", 1, ACPI_RTYPE_INTEGER}},
 	{{"_SRS", 1, 0}},
+	{{"_SRT", 1, ACPI_RTYPE_INTEGER}},
 	{{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
 	{{"_SST", 1, 0}},
 	{{"_STA", 0, ACPI_RTYPE_INTEGER}},
@@ -464,6 +500,7 @@
 	{{"_STP", 2, ACPI_RTYPE_INTEGER}},
 	{{"_STR", 0, ACPI_RTYPE_BUFFER}},
 	{{"_STV", 2, ACPI_RTYPE_INTEGER}},
+	{{"_SUB", 0, ACPI_RTYPE_STRING}},
 	{{"_SUN", 0, ACPI_RTYPE_INTEGER}},
 	{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
 	{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index f08b55b..0347d09 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -73,28 +73,40 @@
 
 /* Resource conversion opcodes */
 
-#define ACPI_RSC_INITGET                0
-#define ACPI_RSC_INITSET                1
-#define ACPI_RSC_FLAGINIT               2
-#define ACPI_RSC_1BITFLAG               3
-#define ACPI_RSC_2BITFLAG               4
-#define ACPI_RSC_COUNT                  5
-#define ACPI_RSC_COUNT16                6
-#define ACPI_RSC_LENGTH                 7
-#define ACPI_RSC_MOVE8                  8
-#define ACPI_RSC_MOVE16                 9
-#define ACPI_RSC_MOVE32                 10
-#define ACPI_RSC_MOVE64                 11
-#define ACPI_RSC_SET8                   12
-#define ACPI_RSC_DATA8                  13
-#define ACPI_RSC_ADDRESS                14
-#define ACPI_RSC_SOURCE                 15
-#define ACPI_RSC_SOURCEX                16
-#define ACPI_RSC_BITMASK                17
-#define ACPI_RSC_BITMASK16              18
-#define ACPI_RSC_EXIT_NE                19
-#define ACPI_RSC_EXIT_LE                20
-#define ACPI_RSC_EXIT_EQ                21
+typedef enum {
+	ACPI_RSC_INITGET = 0,
+	ACPI_RSC_INITSET,
+	ACPI_RSC_FLAGINIT,
+	ACPI_RSC_1BITFLAG,
+	ACPI_RSC_2BITFLAG,
+	ACPI_RSC_3BITFLAG,
+	ACPI_RSC_ADDRESS,
+	ACPI_RSC_BITMASK,
+	ACPI_RSC_BITMASK16,
+	ACPI_RSC_COUNT,
+	ACPI_RSC_COUNT16,
+	ACPI_RSC_COUNT_GPIO_PIN,
+	ACPI_RSC_COUNT_GPIO_RES,
+	ACPI_RSC_COUNT_GPIO_VEN,
+	ACPI_RSC_COUNT_SERIAL_RES,
+	ACPI_RSC_COUNT_SERIAL_VEN,
+	ACPI_RSC_DATA8,
+	ACPI_RSC_EXIT_EQ,
+	ACPI_RSC_EXIT_LE,
+	ACPI_RSC_EXIT_NE,
+	ACPI_RSC_LENGTH,
+	ACPI_RSC_MOVE_GPIO_PIN,
+	ACPI_RSC_MOVE_GPIO_RES,
+	ACPI_RSC_MOVE_SERIAL_RES,
+	ACPI_RSC_MOVE_SERIAL_VEN,
+	ACPI_RSC_MOVE8,
+	ACPI_RSC_MOVE16,
+	ACPI_RSC_MOVE32,
+	ACPI_RSC_MOVE64,
+	ACPI_RSC_SET8,
+	ACPI_RSC_SOURCE,
+	ACPI_RSC_SOURCEX
+} ACPI_RSCONVERT_OPCODES;
 
 /* Resource Conversion sub-opcodes */
 
@@ -106,6 +118,9 @@
 #define ACPI_RS_OFFSET(f)               (u8) ACPI_OFFSET (struct acpi_resource,f)
 #define AML_OFFSET(f)                   (u8) ACPI_OFFSET (union aml_resource,f)
 
+/*
+ * Individual entry for the resource dump tables
+ */
 typedef const struct acpi_rsdump_info {
 	u8 opcode;
 	u8 offset;
@@ -116,20 +131,25 @@
 
 /* Values for the Opcode field above */
 
-#define ACPI_RSD_TITLE                  0
-#define ACPI_RSD_LITERAL                1
-#define ACPI_RSD_STRING                 2
-#define ACPI_RSD_UINT8                  3
-#define ACPI_RSD_UINT16                 4
-#define ACPI_RSD_UINT32                 5
-#define ACPI_RSD_UINT64                 6
-#define ACPI_RSD_1BITFLAG               7
-#define ACPI_RSD_2BITFLAG               8
-#define ACPI_RSD_SHORTLIST              9
-#define ACPI_RSD_LONGLIST               10
-#define ACPI_RSD_DWORDLIST              11
-#define ACPI_RSD_ADDRESS                12
-#define ACPI_RSD_SOURCE                 13
+typedef enum {
+	ACPI_RSD_TITLE = 0,
+	ACPI_RSD_1BITFLAG,
+	ACPI_RSD_2BITFLAG,
+	ACPI_RSD_3BITFLAG,
+	ACPI_RSD_ADDRESS,
+	ACPI_RSD_DWORDLIST,
+	ACPI_RSD_LITERAL,
+	ACPI_RSD_LONGLIST,
+	ACPI_RSD_SHORTLIST,
+	ACPI_RSD_SHORTLISTX,
+	ACPI_RSD_SOURCE,
+	ACPI_RSD_STRING,
+	ACPI_RSD_UINT8,
+	ACPI_RSD_UINT16,
+	ACPI_RSD_UINT32,
+	ACPI_RSD_UINT64,
+	ACPI_RSD_WORDLIST
+} ACPI_RSDUMP_OPCODES;
 
 /* restore default alignment */
 
@@ -138,13 +158,18 @@
 /* Resource tables indexed by internal resource type */
 
 extern const u8 acpi_gbl_aml_resource_sizes[];
+extern const u8 acpi_gbl_aml_resource_serial_bus_sizes[];
 extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[];
 
 /* Resource tables indexed by raw AML resource descriptor type */
 
 extern const u8 acpi_gbl_resource_struct_sizes[];
+extern const u8 acpi_gbl_resource_struct_serial_bus_sizes[];
 extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[];
 
+extern struct acpi_rsconvert_info
+    *acpi_gbl_convert_resource_serial_bus_dispatch[];
+
 struct acpi_vendor_walk_info {
 	struct acpi_vendor_uuid *uuid;
 	struct acpi_buffer *buffer;
@@ -190,6 +215,10 @@
 acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
 			    struct acpi_buffer *ret_buffer);
 
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+			    struct acpi_buffer *ret_buffer);
+
 /*
  * rscalc
  */
@@ -293,6 +322,11 @@
 extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[];
 extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
 extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
+extern struct acpi_rsconvert_info acpi_rs_convert_gpio[];
+extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
+extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
 
 /* These resources require separate get/set tables */
 
@@ -310,6 +344,7 @@
  * rsinfo
  */
 extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
+extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
 
 /*
  * rsdump
@@ -331,6 +366,12 @@
 extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
 extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
 extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
+extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
+extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
+extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
 #endif
 
 #endif				/* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 1623b24..0404df6 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 967f081..d5bec30 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 99c140d..925ccf2 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
 #define _ACUTILS_H
 
 extern const u8 acpi_gbl_resource_aml_sizes[];
+extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
 
 /* Strings used by the disassembler and debugger resource dump routines */
 
@@ -579,6 +580,24 @@
 #endif				/* ACPI_DBG_TRACK_ALLOCATIONS */
 
 /*
+ * utaddress - address range check
+ */
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+			  acpi_physical_address address,
+			  u32 length, struct acpi_namespace_node *region_node);
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+			     struct acpi_namespace_node *region_node);
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+			    acpi_physical_address address, u32 length, u8 warn);
+
+void acpi_ut_delete_address_lists(void);
+
+/*
  * utxferror - various error/warning output functions
  */
 void ACPI_INTERNAL_VAR_XFACE
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1077f17..905280f 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -189,6 +189,14 @@
 #define AML_LNOTEQUAL_OP            (u16) 0x9293
 
 /*
+ * Opcodes for "Field" operators
+ */
+#define AML_FIELD_OFFSET_OP         (u8) 0x00
+#define AML_FIELD_ACCESS_OP         (u8) 0x01
+#define AML_FIELD_CONNECTION_OP     (u8) 0x02	/* ACPI 5.0 */
+#define AML_FIELD_EXT_ACCESS_OP     (u8) 0x03	/* ACPI 5.0 */
+
+/*
  * Internal opcodes
  * Use only "Unknown" AML opcodes, don't attempt to use
  * any valid ACPI ASCII values (A-Z, 0-9, '-')
@@ -202,6 +210,8 @@
 #define AML_INT_METHODCALL_OP       (u16) 0x0035
 #define AML_INT_RETURN_VALUE_OP     (u16) 0x0036
 #define AML_INT_EVAL_SUBTREE_OP     (u16) 0x0037
+#define AML_INT_CONNECTION_OP       (u16) 0x0038
+#define AML_INT_EXTACCESSFIELD_OP   (u16) 0x0039
 
 #define ARG_NONE                    0x0
 
@@ -456,13 +466,16 @@
  * access_as keyword
  */
 typedef enum {
-	AML_FIELD_ATTRIB_SMB_QUICK = 0x02,
-	AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04,
-	AML_FIELD_ATTRIB_SMB_BYTE = 0x06,
-	AML_FIELD_ATTRIB_SMB_WORD = 0x08,
-	AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A,
-	AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C,
-	AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
+	AML_FIELD_ATTRIB_QUICK = 0x02,
+	AML_FIELD_ATTRIB_SEND_RCV = 0x04,
+	AML_FIELD_ATTRIB_BYTE = 0x06,
+	AML_FIELD_ATTRIB_WORD = 0x08,
+	AML_FIELD_ATTRIB_BLOCK = 0x0A,
+	AML_FIELD_ATTRIB_MULTIBYTE = 0x0B,
+	AML_FIELD_ATTRIB_WORD_CALL = 0x0C,
+	AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D,
+	AML_FIELD_ATTRIB_RAW_BYTES = 0x0E,
+	AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F
 } AML_ACCESS_ATTRIBUTE;
 
 /* Bit fields in the AML method_flags byte */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 59122cd..7b2128f 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -58,29 +58,48 @@
 #define ACPI_RESTAG_TYPESPECIFICATTRIBUTES      "_ATT"
 #define ACPI_RESTAG_BASEADDRESS                 "_BAS"
 #define ACPI_RESTAG_BUSMASTER                   "_BM_"	/* Master(1), Slave(0) */
+#define ACPI_RESTAG_DEBOUNCETIME                "_DBT"
 #define ACPI_RESTAG_DECODE                      "_DEC"
+#define ACPI_RESTAG_DEVICEPOLARITY              "_DPL"
 #define ACPI_RESTAG_DMA                         "_DMA"
 #define ACPI_RESTAG_DMATYPE                     "_TYP"	/* Compatible(0), A(1), B(2), F(3) */
+#define ACPI_RESTAG_DRIVESTRENGTH               "_DRS"
+#define ACPI_RESTAG_ENDIANNESS                  "_END"
+#define ACPI_RESTAG_FLOWCONTROL                 "_FLC"
 #define ACPI_RESTAG_GRANULARITY                 "_GRA"
 #define ACPI_RESTAG_INTERRUPT                   "_INT"
 #define ACPI_RESTAG_INTERRUPTLEVEL              "_LL_"	/* active_lo(1), active_hi(0) */
 #define ACPI_RESTAG_INTERRUPTSHARE              "_SHR"	/* Shareable(1), no_share(0) */
 #define ACPI_RESTAG_INTERRUPTTYPE               "_HE_"	/* Edge(1), Level(0) */
+#define ACPI_RESTAG_IORESTRICTION               "_IOR"
 #define ACPI_RESTAG_LENGTH                      "_LEN"
+#define ACPI_RESTAG_LINE                        "_LIN"
 #define ACPI_RESTAG_MEMATTRIBUTES               "_MTP"	/* Memory(0), Reserved(1), ACPI(2), NVS(3) */
 #define ACPI_RESTAG_MEMTYPE                     "_MEM"	/* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
 #define ACPI_RESTAG_MAXADDR                     "_MAX"
 #define ACPI_RESTAG_MINADDR                     "_MIN"
 #define ACPI_RESTAG_MAXTYPE                     "_MAF"
 #define ACPI_RESTAG_MINTYPE                     "_MIF"
+#define ACPI_RESTAG_MODE                        "_MOD"
+#define ACPI_RESTAG_PARITY                      "_PAR"
+#define ACPI_RESTAG_PHASE                       "_PHA"
+#define ACPI_RESTAG_PIN                         "_PIN"
+#define ACPI_RESTAG_PINCONFIG                   "_PPI"
+#define ACPI_RESTAG_POLARITY                    "_POL"
 #define ACPI_RESTAG_REGISTERBITOFFSET           "_RBO"
 #define ACPI_RESTAG_REGISTERBITWIDTH            "_RBW"
 #define ACPI_RESTAG_RANGETYPE                   "_RNG"
 #define ACPI_RESTAG_READWRITETYPE               "_RW_"	/* read_only(0), Writeable (1) */
+#define ACPI_RESTAG_LENGTH_RX                   "_RXL"
+#define ACPI_RESTAG_LENGTH_TX                   "_TXL"
+#define ACPI_RESTAG_SLAVEMODE                   "_SLV"
+#define ACPI_RESTAG_SPEED                       "_SPE"
+#define ACPI_RESTAG_STOPBITS                    "_STB"
 #define ACPI_RESTAG_TRANSLATION                 "_TRA"
 #define ACPI_RESTAG_TRANSTYPE                   "_TRS"	/* Sparse(1), Dense(0) */
 #define ACPI_RESTAG_TYPE                        "_TTP"	/* Translation(1), Static (0) */
 #define ACPI_RESTAG_XFERTYPE                    "_SIZ"	/* 8(0), 8_and16(1), 16(2) */
+#define ACPI_RESTAG_VENDORDATA                  "_VEN"
 
 /* Default sizes for "small" resource descriptors */
 
@@ -90,6 +109,7 @@
 #define ASL_RDESC_END_DEPEND_SIZE               0x00
 #define ASL_RDESC_IO_SIZE                       0x07
 #define ASL_RDESC_FIXED_IO_SIZE                 0x03
+#define ASL_RDESC_FIXED_DMA_SIZE                0x05
 #define ASL_RDESC_END_TAG_SIZE                  0x01
 
 struct asl_resource_node {
@@ -164,6 +184,12 @@
 	AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum;
 };
 
+struct aml_resource_fixed_dma {
+	AML_RESOURCE_SMALL_HEADER_COMMON u16 request_lines;
+	u16 channels;
+	u8 width;
+};
+
 /*
  * LARGE descriptors
  */
@@ -263,6 +289,110 @@
 	u64 address;
 };
 
+/* Common descriptor for gpio_int and gpio_io (ACPI 5.0) */
+
+struct aml_resource_gpio {
+	AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+	u8 connection_type;
+	u16 flags;
+	u16 int_flags;
+	u8 pin_config;
+	u16 drive_strength;
+	u16 debounce_timeout;
+	u16 pin_table_offset;
+	u8 res_source_index;
+	u16 res_source_offset;
+	u16 vendor_offset;
+	u16 vendor_length;
+	/*
+	 * Optional fields follow immediately:
+	 * 1) PIN list (Words)
+	 * 2) Resource Source String
+	 * 3) Vendor Data bytes
+	 */
+};
+
+#define AML_RESOURCE_GPIO_REVISION              1	/* ACPI 5.0 */
+
+/* Values for connection_type above */
+
+#define AML_RESOURCE_GPIO_TYPE_INT              0
+#define AML_RESOURCE_GPIO_TYPE_IO               1
+#define AML_RESOURCE_MAX_GPIOTYPE               1
+
+/* Common preamble for all serial descriptors (ACPI 5.0) */
+
+#define AML_RESOURCE_SERIAL_COMMON \
+	u8                              revision_id; \
+	u8                              res_source_index; \
+	u8                              type; \
+	u8                              flags; \
+	u16                             type_specific_flags; \
+	u8                              type_revision_id; \
+	u16                             type_data_length; \
+
+/* Values for the type field above */
+
+#define AML_RESOURCE_I2C_SERIALBUSTYPE          1
+#define AML_RESOURCE_SPI_SERIALBUSTYPE          2
+#define AML_RESOURCE_UART_SERIALBUSTYPE         3
+#define AML_RESOURCE_MAX_SERIALBUSTYPE          3
+#define AML_RESOURCE_VENDOR_SERIALBUSTYPE       192	/* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */
+
+struct aml_resource_common_serialbus {
+AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON};
+
+struct aml_resource_i2c_serialbus {
+	AML_RESOURCE_LARGE_HEADER_COMMON
+	    AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+	u16 slave_address;
+	/*
+	 * Optional fields follow immediately:
+	 * 1) Vendor Data bytes
+	 * 2) Resource Source String
+	 */
+};
+
+#define AML_RESOURCE_I2C_REVISION               1	/* ACPI 5.0 */
+#define AML_RESOURCE_I2C_TYPE_REVISION          1	/* ACPI 5.0 */
+#define AML_RESOURCE_I2C_MIN_DATA_LEN           6
+
+struct aml_resource_spi_serialbus {
+	AML_RESOURCE_LARGE_HEADER_COMMON
+	    AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+	u8 data_bit_length;
+	u8 clock_phase;
+	u8 clock_polarity;
+	u16 device_selection;
+	/*
+	 * Optional fields follow immediately:
+	 * 1) Vendor Data bytes
+	 * 2) Resource Source String
+	 */
+};
+
+#define AML_RESOURCE_SPI_REVISION               1	/* ACPI 5.0 */
+#define AML_RESOURCE_SPI_TYPE_REVISION          1	/* ACPI 5.0 */
+#define AML_RESOURCE_SPI_MIN_DATA_LEN           9
+
+struct aml_resource_uart_serialbus {
+	AML_RESOURCE_LARGE_HEADER_COMMON
+	    AML_RESOURCE_SERIAL_COMMON u32 default_baud_rate;
+	u16 rx_fifo_size;
+	u16 tx_fifo_size;
+	u8 parity;
+	u8 lines_enabled;
+	/*
+	 * Optional fields follow immediately:
+	 * 1) Vendor Data bytes
+	 * 2) Resource Source String
+	 */
+};
+
+#define AML_RESOURCE_UART_REVISION              1	/* ACPI 5.0 */
+#define AML_RESOURCE_UART_TYPE_REVISION         1	/* ACPI 5.0 */
+#define AML_RESOURCE_UART_MIN_DATA_LEN          10
+
 /* restore default alignment */
 
 #pragma pack()
@@ -284,6 +414,7 @@
 	struct aml_resource_end_dependent end_dpf;
 	struct aml_resource_io io;
 	struct aml_resource_fixed_io fixed_io;
+	struct aml_resource_fixed_dma fixed_dma;
 	struct aml_resource_vendor_small vendor_small;
 	struct aml_resource_end_tag end_tag;
 
@@ -299,6 +430,11 @@
 	struct aml_resource_address64 address64;
 	struct aml_resource_extended_address64 ext_address64;
 	struct aml_resource_extended_irq extended_irq;
+	struct aml_resource_gpio gpio;
+	struct aml_resource_i2c_serialbus i2c_serial_bus;
+	struct aml_resource_spi_serialbus spi_serial_bus;
+	struct aml_resource_uart_serialbus uart_serial_bus;
+	struct aml_resource_common_serialbus common_serial_bus;
 
 	/* Utility overlays */
 
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 8c7b997..80eb190 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -250,6 +250,13 @@
 	status = acpi_ds_execute_arguments(node, node->parent,
 					   extra_desc->extra.aml_length,
 					   extra_desc->extra.aml_start);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_ut_add_address_range(obj_desc->region.space_id,
+					   obj_desc->region.address,
+					   obj_desc->region.length, node);
 	return_ACPI_STATUS(status);
 }
 
@@ -384,8 +391,15 @@
 
 	/* Execute the argument AML */
 
-	status = acpi_ds_execute_arguments(node, node->parent,
+	status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
 					   extra_desc->extra.aml_length,
 					   extra_desc->extra.aml_start);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_ut_add_address_range(obj_desc->region.space_id,
+					   obj_desc->region.address,
+					   obj_desc->region.length, node);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 26c49ff..effe4ca 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 34be60c..cd243cf 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -221,6 +221,7 @@
 {
 	acpi_status status;
 	u64 position;
+	union acpi_parse_object *child;
 
 	ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
 
@@ -232,10 +233,11 @@
 
 	while (arg) {
 		/*
-		 * Three types of field elements are handled:
-		 * 1) Offset - specifies a bit offset
-		 * 2) access_as - changes the access mode
-		 * 3) Name - Enters a new named field into the namespace
+		 * Four types of field elements are handled:
+		 * 1) Name - Enters a new named field into the namespace
+		 * 2) Offset - specifies a bit offset
+		 * 3) access_as - changes the access mode/attributes
+		 * 4) Connection - Associate a resource template with the field
 		 */
 		switch (arg->common.aml_opcode) {
 		case AML_INT_RESERVEDFIELD_OP:
@@ -253,21 +255,70 @@
 			break;
 
 		case AML_INT_ACCESSFIELD_OP:
-
+		case AML_INT_EXTACCESSFIELD_OP:
 			/*
-			 * Get a new access_type and access_attribute -- to be used for all
-			 * field units that follow, until field end or another access_as
-			 * keyword.
+			 * Get new access_type, access_attribute, and access_length fields
+			 * -- to be used for all field units that follow, until the
+			 * end-of-field or another access_as keyword is encountered.
+			 * NOTE. These three bytes are encoded in the integer value
+			 * of the parseop for convenience.
 			 *
 			 * In field_flags, preserve the flag bits other than the
-			 * ACCESS_TYPE bits
+			 * ACCESS_TYPE bits.
 			 */
+
+			/* access_type (byte_acc, word_acc, etc.) */
+
 			info->field_flags = (u8)
 			    ((info->
 			      field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
-			     ((u8) ((u32) arg->common.value.integer >> 8)));
+			     ((u8)((u32)(arg->common.value.integer & 0x07))));
 
-			info->attribute = (u8) (arg->common.value.integer);
+			/* access_attribute (attrib_quick, attrib_byte, etc.) */
+
+			info->attribute =
+			    (u8)((arg->common.value.integer >> 8) & 0xFF);
+
+			/* access_length (for serial/buffer protocols) */
+
+			info->access_length =
+			    (u8)((arg->common.value.integer >> 16) & 0xFF);
+			break;
+
+		case AML_INT_CONNECTION_OP:
+			/*
+			 * Clear any previous connection. New connection is used for all
+			 * fields that follow, similar to access_as
+			 */
+			info->resource_buffer = NULL;
+			info->connection_node = NULL;
+
+			/*
+			 * A Connection() is either an actual resource descriptor (buffer)
+			 * or a named reference to a resource template
+			 */
+			child = arg->common.value.arg;
+			if (child->common.aml_opcode == AML_INT_BYTELIST_OP) {
+				info->resource_buffer = child->named.data;
+				info->resource_length =
+				    (u16)child->named.value.integer;
+			} else {
+				/* Lookup the Connection() namepath, it should already exist */
+
+				status = acpi_ns_lookup(walk_state->scope_info,
+							child->common.value.
+							name, ACPI_TYPE_ANY,
+							ACPI_IMODE_EXECUTE,
+							ACPI_NS_DONT_OPEN_SCOPE,
+							walk_state,
+							&info->connection_node);
+				if (ACPI_FAILURE(status)) {
+					ACPI_ERROR_NAMESPACE(child->common.
+							     value.name,
+							     status);
+					return_ACPI_STATUS(status);
+				}
+			}
 			break;
 
 		case AML_INT_NAMEDFIELD_OP:
@@ -374,6 +425,8 @@
 		}
 	}
 
+	ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
+
 	/* Second arg is the field flags */
 
 	arg = arg->common.next;
@@ -386,7 +439,6 @@
 	info.region_node = region_node;
 
 	status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
 	return_ACPI_STATUS(status);
 }
 
@@ -474,8 +526,8 @@
 	 */
 	while (arg) {
 		/*
-		 * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
-		 * field names in order to enter them into the namespace.
+		 * Ignore OFFSET/ACCESSAS/CONNECTION terms here; we are only interested
+		 * in the field names in order to enter them into the namespace.
 		 */
 		if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
 			status = acpi_ns_lookup(walk_state->scope_info,
@@ -651,6 +703,5 @@
 	info.region_node = region_node;
 
 	status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a7718bf..9e5ac7f 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 5d79775..00f5dab 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 905ce29..b40bd50 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index f42e17e..d7045ca 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index c627a28..e5eff75 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 2c477ce..1abcda3 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index fe40e4c..642f3c0 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 324acec..552aa3a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 9763181..ae71477 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 76a661f..9e9490a 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index a6c374e..c9c2ac1 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index d458b04..6729ebe 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -71,6 +71,12 @@
 
 	ACPI_FUNCTION_TRACE(ev_initialize_events);
 
+	/* If Hardware Reduced flag is set, there are no fixed events */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/*
 	 * Initialize the Fixed and General Purpose Events. This is done prior to
 	 * enabling SCIs to prevent interrupts from occurring before the handlers
@@ -111,6 +117,12 @@
 
 	ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
 
+	/* If Hardware Reduced flag is set, there is no ACPI h/w */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/* Install the SCI handler */
 
 	status = acpi_ev_install_sci_handler();
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 56a562a..5e5683c 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,12 @@
 
 	ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
 
+	/* If Hardware Reduced flag is set, there is no global lock */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/* Attempt installation of the global lock handler */
 
 	status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 65c79ad..9e88cb6 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index ca2c41a..be75339 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index ce9aa9f..adf7494 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 80a81d0..2507393 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index d0b3318..84966f4 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index f0edf5c..1b0180a 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -329,6 +329,7 @@
  * FUNCTION:    acpi_ev_address_space_dispatch
  *
  * PARAMETERS:  region_obj          - Internal region object
+ *              field_obj           - Corresponding field. Can be NULL.
  *              Function            - Read or Write operation
  *              region_offset       - Where in the region to read or write
  *              bit_width           - Field width in bits (8, 16, 32, or 64)
@@ -344,6 +345,7 @@
 
 acpi_status
 acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+			       union acpi_operand_object *field_obj,
 			       u32 function,
 			       u32 region_offset, u32 bit_width, u64 *value)
 {
@@ -353,6 +355,7 @@
 	union acpi_operand_object *handler_desc;
 	union acpi_operand_object *region_obj2;
 	void *region_context = NULL;
+	struct acpi_connection_info *context;
 
 	ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
 
@@ -375,6 +378,8 @@
 		return_ACPI_STATUS(AE_NOT_EXIST);
 	}
 
+	context = handler_desc->address_space.context;
+
 	/*
 	 * It may be the case that the region has never been initialized.
 	 * Some types of regions require special init code
@@ -404,8 +409,7 @@
 		acpi_ex_exit_interpreter();
 
 		status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
-				      handler_desc->address_space.context,
-				      &region_context);
+				      context, &region_context);
 
 		/* Re-enter the interpreter */
 
@@ -455,6 +459,25 @@
 			  acpi_ut_get_region_name(region_obj->region.
 						  space_id)));
 
+	/*
+	 * Special handling for generic_serial_bus and general_purpose_io:
+	 * There are three extra parameters that must be passed to the
+	 * handler via the context:
+	 *   1) Connection buffer, a resource template from Connection() op.
+	 *   2) Length of the above buffer.
+	 *   3) Actual access length from the access_as() op.
+	 */
+	if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
+	     (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
+	    context && field_obj) {
+
+		/* Get the Connection (resource_template) buffer */
+
+		context->connection = field_obj->field.resource_buffer;
+		context->length = field_obj->field.resource_length;
+		context->access_length = field_obj->field.access_length;
+	}
+
 	if (!(handler_desc->address_space.handler_flags &
 	      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
 		/*
@@ -469,7 +492,7 @@
 
 	status = handler(function,
 			 (region_obj->region.address + region_offset),
-			 bit_width, value, handler_desc->address_space.context,
+			 bit_width, value, context,
 			 region_obj2->extra.region_context);
 
 	if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 55a5d35..819c17f 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 2ebd40e..26065c6 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index f4f523b..61944e8 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 20516e5..1768bbe 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index f06a3ee..33388fd 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index aee887e..6019208c 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 745a42b..c86d44e 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@
 	/* Bytewise reads */
 
 	for (i = 0; i < length; i++) {
-		status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
-							region_offset, 8,
-							&value);
+		status =
+		    acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
+						   region_offset, 8, &value);
 		if (ACPI_FAILURE(status)) {
 			return status;
 		}
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 74162a1..e385436 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 110711a..3f5bc99 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -267,7 +267,7 @@
  *
  * PARAMETERS:  aml_start           - Pointer to the region declaration AML
  *              aml_length          - Max length of the declaration AML
- *              region_space        - space_iD for the region
+ *              space_id            - Address space ID for the region
  *              walk_state          - Current state
  *
  * RETURN:      Status
@@ -279,7 +279,7 @@
 acpi_status
 acpi_ex_create_region(u8 * aml_start,
 		      u32 aml_length,
-		      u8 region_space, struct acpi_walk_state *walk_state)
+		      u8 space_id, struct acpi_walk_state *walk_state)
 {
 	acpi_status status;
 	union acpi_operand_object *obj_desc;
@@ -304,16 +304,19 @@
 	 * Space ID must be one of the predefined IDs, or in the user-defined
 	 * range
 	 */
-	if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
-	    (region_space < ACPI_USER_REGION_BEGIN) &&
-	    (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
-		ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
-			    region_space));
-		return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+	if (!acpi_is_valid_space_id(space_id)) {
+		/*
+		 * Print an error message, but continue. We don't want to abort
+		 * a table load for this exception. Instead, if the region is
+		 * actually used at runtime, abort the executing method.
+		 */
+		ACPI_ERROR((AE_INFO,
+			    "Invalid/unknown Address Space ID: 0x%2.2X",
+			    space_id));
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
-			  acpi_ut_get_region_name(region_space), region_space));
+			  acpi_ut_get_region_name(space_id), space_id));
 
 	/* Create the region descriptor */
 
@@ -330,10 +333,16 @@
 	region_obj2 = obj_desc->common.next_object;
 	region_obj2->extra.aml_start = aml_start;
 	region_obj2->extra.aml_length = aml_length;
+	if (walk_state->scope_info) {
+		region_obj2->extra.scope_node =
+		    walk_state->scope_info->scope.node;
+	} else {
+		region_obj2->extra.scope_node = node;
+	}
 
 	/* Init the region from the operands */
 
-	obj_desc->region.space_id = region_space;
+	obj_desc->region.space_id = space_id;
 	obj_desc->region.address = 0;
 	obj_desc->region.length = 0;
 	obj_desc->region.node = node;
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index c7a2f1e..e211e9c 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 61b8c0e..2a6ac0a 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -192,10 +192,13 @@
 	 "Buffer Object"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_region_field[3] = {
+static struct acpi_exdump_info acpi_ex_dump_region_field[5] = {
 	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
 	{ACPI_EXD_FIELD, 0, NULL},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer),
+	 "ResourceBuffer"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 0bde223..dc092f5 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -100,18 +100,25 @@
 		   (obj_desc->field.region_obj->region.space_id ==
 		    ACPI_ADR_SPACE_SMBUS
 		    || obj_desc->field.region_obj->region.space_id ==
+		    ACPI_ADR_SPACE_GSBUS
+		    || obj_desc->field.region_obj->region.space_id ==
 		    ACPI_ADR_SPACE_IPMI)) {
 		/*
-		 * This is an SMBus or IPMI read. We must create a buffer to hold
+		 * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
 		 * the data and then directly access the region handler.
 		 *
-		 * Note: Smbus protocol value is passed in upper 16-bits of Function
+		 * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
 		 */
 		if (obj_desc->field.region_obj->region.space_id ==
 		    ACPI_ADR_SPACE_SMBUS) {
 			length = ACPI_SMBUS_BUFFER_SIZE;
 			function =
 			    ACPI_READ | (obj_desc->field.attribute << 16);
+		} else if (obj_desc->field.region_obj->region.space_id ==
+			   ACPI_ADR_SPACE_GSBUS) {
+			length = ACPI_GSBUS_BUFFER_SIZE;
+			function =
+			    ACPI_READ | (obj_desc->field.attribute << 16);
 		} else {	/* IPMI */
 
 			length = ACPI_IPMI_BUFFER_SIZE;
@@ -248,21 +255,23 @@
 		   (obj_desc->field.region_obj->region.space_id ==
 		    ACPI_ADR_SPACE_SMBUS
 		    || obj_desc->field.region_obj->region.space_id ==
+		    ACPI_ADR_SPACE_GSBUS
+		    || obj_desc->field.region_obj->region.space_id ==
 		    ACPI_ADR_SPACE_IPMI)) {
 		/*
-		 * This is an SMBus or IPMI write. We will bypass the entire field
+		 * This is an SMBus, GSBus or IPMI write. We will bypass the entire field
 		 * mechanism and handoff the buffer directly to the handler. For
 		 * these address spaces, the buffer is bi-directional; on a write,
 		 * return data is returned in the same buffer.
 		 *
 		 * Source must be a buffer of sufficient size:
-		 * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE.
+		 * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
 		 *
-		 * Note: SMBus protocol type is passed in upper 16-bits of Function
+		 * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
 		 */
 		if (source_desc->common.type != ACPI_TYPE_BUFFER) {
 			ACPI_ERROR((AE_INFO,
-				    "SMBus or IPMI write requires Buffer, found type %s",
+				    "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
 				    acpi_ut_get_object_type_name(source_desc)));
 
 			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -273,6 +282,11 @@
 			length = ACPI_SMBUS_BUFFER_SIZE;
 			function =
 			    ACPI_WRITE | (obj_desc->field.attribute << 16);
+		} else if (obj_desc->field.region_obj->region.space_id ==
+			   ACPI_ADR_SPACE_GSBUS) {
+			length = ACPI_GSBUS_BUFFER_SIZE;
+			function =
+			    ACPI_WRITE | (obj_desc->field.attribute << 16);
 		} else {	/* IPMI */
 
 			length = ACPI_IPMI_BUFFER_SIZE;
@@ -281,7 +295,7 @@
 
 		if (source_desc->buffer.length < length) {
 			ACPI_ERROR((AE_INFO,
-				    "SMBus or IPMI write requires Buffer of length %u, found length %u",
+				    "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
 				    length, source_desc->buffer.length));
 
 			return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f915a7f..149de45 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,7 @@
 {
 	acpi_status status = AE_OK;
 	union acpi_operand_object *rgn_desc;
+	u8 space_id;
 
 	ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
 
@@ -101,6 +102,17 @@
 		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
 	}
 
+	space_id = rgn_desc->region.space_id;
+
+	/* Validate the Space ID */
+
+	if (!acpi_is_valid_space_id(space_id)) {
+		ACPI_ERROR((AE_INFO,
+			    "Invalid/unknown Address Space ID: 0x%2.2X",
+			    space_id));
+		return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+	}
+
 	/*
 	 * If the Region Address and Length have not been previously evaluated,
 	 * evaluate them now and save the results.
@@ -119,11 +131,12 @@
 	}
 
 	/*
-	 * Exit now for SMBus or IPMI address space, it has a non-linear
+	 * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
 	 * address space and the request cannot be directly validated
 	 */
-	if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
-	    rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
+	if (space_id == ACPI_ADR_SPACE_SMBUS ||
+	    space_id == ACPI_ADR_SPACE_GSBUS ||
+	    space_id == ACPI_ADR_SPACE_IPMI) {
 
 		/* SMBus or IPMI has a non-linear address space */
 
@@ -271,11 +284,12 @@
 
 	/* Invoke the appropriate address_space/op_region handler */
 
-	status =
-	    acpi_ev_address_space_dispatch(rgn_desc, function, region_offset,
-					   ACPI_MUL_8(obj_desc->common_field.
-						      access_byte_width),
-					   value);
+	status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc,
+						function, region_offset,
+						ACPI_MUL_8(obj_desc->
+							   common_field.
+							   access_byte_width),
+						value);
 
 	if (ACPI_FAILURE(status)) {
 		if (status == AE_NOT_IMPLEMENTED) {
@@ -316,6 +330,7 @@
 static u8
 acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
 {
+	ACPI_FUNCTION_NAME(ex_register_overflow);
 
 	if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
 		/*
@@ -330,6 +345,11 @@
 		 * The Value is larger than the maximum value that can fit into
 		 * the register.
 		 */
+		ACPI_ERROR((AE_INFO,
+			    "Index value 0x%8.8X%8.8X overflows field width 0x%X",
+			    ACPI_FORMAT_UINT64(value),
+			    obj_desc->common_field.bit_length));
+
 		return (TRUE);
 	}
 
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 703d88e..0a08933 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index be1c56e..60933e9 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 49ec049..fcc75fa 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 236ead1..9ba8c73 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 2571b4a..879e8a2 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 1b48d9d..71fcc65 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f4a2787..0786b86 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index cc95e20..30157f5 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,7 @@
 #include "acinterp.h"
 #include "amlcode.h"
 #include "acnamesp.h"
+#include "acdispat.h"
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exprep")
@@ -455,6 +456,30 @@
 		obj_desc->field.region_obj =
 		    acpi_ns_get_attached_object(info->region_node);
 
+		/* Fields specific to generic_serial_bus fields */
+
+		obj_desc->field.access_length = info->access_length;
+
+		if (info->connection_node) {
+			second_desc = info->connection_node->object;
+			if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
+				status =
+				    acpi_ds_get_buffer_arguments(second_desc);
+				if (ACPI_FAILURE(status)) {
+					acpi_ut_delete_object_desc(obj_desc);
+					return_ACPI_STATUS(status);
+				}
+			}
+
+			obj_desc->field.resource_buffer =
+			    second_desc->buffer.pointer;
+			obj_desc->field.resource_length =
+			    (u16)second_desc->buffer.length;
+		} else if (info->resource_buffer) {
+			obj_desc->field.resource_buffer = info->resource_buffer;
+			obj_desc->field.resource_length = info->resource_length;
+		}
+
 		/* Allow full data read from EC address space */
 
 		if ((obj_desc->field.region_obj->region.space_id ==
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index f0d5e14..12d51df 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 55997e4..fa50e77 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index db502cd..6e335dc 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index e3bb00c..a67b1d9 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index c0c8842..c6cf843 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index a979017..b35bed5 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc665cc..65a45d8 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index df66e7b..191a129 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 8ad9314..eb6798b 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -435,4 +435,29 @@
 	}
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_is_valid_space_id
+ *
+ * PARAMETERS:  space_id            - ID to be validated
+ *
+ * RETURN:      TRUE if valid/supported ID.
+ *
+ * DESCRIPTION: Validate an operation region space_iD.
+ *
+ ******************************************************************************/
+
+u8 acpi_is_valid_space_id(u8 space_id)
+{
+
+	if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) &&
+	    (space_id < ACPI_USER_REGION_BEGIN) &&
+	    (space_id != ACPI_ADR_SPACE_DATA_TABLE) &&
+	    (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+		return (FALSE);
+	}
+
+	return (TRUE);
+}
+
 #endif
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index fc380d3..d21ec5f 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index f610d88..1a6894a 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 050fd22..1455ddc 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index cc70f3f..4ea4eeb5 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d52da30..3c4a922 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 50d21c4..d4973d9 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 5f16058..6e5c43a 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,8 @@
 	/* Supported widths are 8/16/32 */
 
 	if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
+		ACPI_ERROR((AE_INFO,
+			    "Bad BitWidth parameter: %8.8X", bit_width));
 		return AE_BAD_PARAMETER;
 	}
 
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index d707756..9d38eb6 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index d93172f..61623f3 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1d0ef15..7c3d3ce 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index b683cc2..b7f2b3b 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 2ed294b..30ea5bc 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index c1bd02b..f375cb8 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index fd7c638..9d84ec2 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 5f7dc69..5cbf15f 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d5fa520..b20e7c8 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 3bb8bf1..dd77a3c 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index b3234fa..ec7ba2d 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index c845c80..bbe46a4 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -620,6 +620,7 @@
 	case ACPI_PTYPE2_FIXED:
 	case ACPI_PTYPE2_MIN:
 	case ACPI_PTYPE2_COUNT:
+	case ACPI_PTYPE2_FIX_VAR:
 
 		/*
 		 * These types all return a single Package that consists of a
@@ -759,6 +760,34 @@
 			}
 			break;
 
+		case ACPI_PTYPE2_FIX_VAR:
+			/*
+			 * Each subpackage has a fixed number of elements and an
+			 * optional element
+			 */
+			expected_count =
+			    package->ret_info.count1 + package->ret_info.count2;
+			if (sub_package->package.count < expected_count) {
+				goto package_too_small;
+			}
+
+			status =
+			    acpi_ns_check_package_elements(data, sub_elements,
+							   package->ret_info.
+							   object_type1,
+							   package->ret_info.
+							   count1,
+							   package->ret_info.
+							   object_type2,
+							   sub_package->package.
+							   count -
+							   package->ret_info.
+							   count1, 0);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+			break;
+
 		case ACPI_PTYPE2_FIXED:
 
 			/* Each sub-package has a fixed length */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index ac7b854..9c35d20 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -634,6 +634,7 @@
 	case ACPI_PTYPE2_FIXED:
 	case ACPI_PTYPE2_MIN:
 	case ACPI_PTYPE2_REV_FIXED:
+	case ACPI_PTYPE2_FIX_VAR:
 		break;
 
 	default:
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 024c4f2..726bc8e 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -467,11 +467,12 @@
 	}
 
 	/*
-	 * Copy and uppercase the string. From the ACPI specification:
+	 * Copy and uppercase the string. From the ACPI 5.0 specification:
 	 *
 	 * A valid PNP ID must be of the form "AAA####" where A is an uppercase
 	 * letter and # is a hex digit. A valid ACPI ID must be of the form
-	 * "ACPI####" where # is a hex digit.
+	 * "NNNN####" where N is an uppercase letter or decimal digit, and
+	 * # is a hex digit.
 	 */
 	for (dest = new_string->string.pointer; *source; dest++, source++) {
 		*dest = (char)ACPI_TOUPPER(*source);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 28b0d7a..507043d 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index cb1b104..a535b7a 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 345f0c3..f69895a 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index e7f016d..71d15f6 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 83bf930..af401c9 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 57e6d82..880a605 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index e1fad0e..5ac36ab 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -484,34 +484,54 @@
 static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
 						       *parser_state)
 {
-	u32 aml_offset = (u32)
-	    ACPI_PTR_DIFF(parser_state->aml,
-			  parser_state->aml_start);
+	u32 aml_offset;
 	union acpi_parse_object *field;
+	union acpi_parse_object *arg = NULL;
 	u16 opcode;
 	u32 name;
+	u8 access_type;
+	u8 access_attribute;
+	u8 access_length;
+	u32 pkg_length;
+	u8 *pkg_end;
+	u32 buffer_length;
 
 	ACPI_FUNCTION_TRACE(ps_get_next_field);
 
+	aml_offset =
+	    (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
+
 	/* Determine field type */
 
 	switch (ACPI_GET8(parser_state->aml)) {
-	default:
-
-		opcode = AML_INT_NAMEDFIELD_OP;
-		break;
-
-	case 0x00:
+	case AML_FIELD_OFFSET_OP:
 
 		opcode = AML_INT_RESERVEDFIELD_OP;
 		parser_state->aml++;
 		break;
 
-	case 0x01:
+	case AML_FIELD_ACCESS_OP:
 
 		opcode = AML_INT_ACCESSFIELD_OP;
 		parser_state->aml++;
 		break;
+
+	case AML_FIELD_CONNECTION_OP:
+
+		opcode = AML_INT_CONNECTION_OP;
+		parser_state->aml++;
+		break;
+
+	case AML_FIELD_EXT_ACCESS_OP:
+
+		opcode = AML_INT_EXTACCESSFIELD_OP;
+		parser_state->aml++;
+		break;
+
+	default:
+
+		opcode = AML_INT_NAMEDFIELD_OP;
+		break;
 	}
 
 	/* Allocate a new field op */
@@ -549,16 +569,111 @@
 		break;
 
 	case AML_INT_ACCESSFIELD_OP:
+	case AML_INT_EXTACCESSFIELD_OP:
 
 		/*
 		 * Get access_type and access_attrib and merge into the field Op
-		 * access_type is first operand, access_attribute is second
+		 * access_type is first operand, access_attribute is second. stuff
+		 * these bytes into the node integer value for convenience.
 		 */
-		field->common.value.integer =
-		    (((u32) ACPI_GET8(parser_state->aml) << 8));
+
+		/* Get the two bytes (Type/Attribute) */
+
+		access_type = ACPI_GET8(parser_state->aml);
 		parser_state->aml++;
-		field->common.value.integer |= ACPI_GET8(parser_state->aml);
+		access_attribute = ACPI_GET8(parser_state->aml);
 		parser_state->aml++;
+
+		field->common.value.integer = (u8)access_type;
+		field->common.value.integer |= (u16)(access_attribute << 8);
+
+		/* This opcode has a third byte, access_length */
+
+		if (opcode == AML_INT_EXTACCESSFIELD_OP) {
+			access_length = ACPI_GET8(parser_state->aml);
+			parser_state->aml++;
+
+			field->common.value.integer |=
+			    (u32)(access_length << 16);
+		}
+		break;
+
+	case AML_INT_CONNECTION_OP:
+
+		/*
+		 * Argument for Connection operator can be either a Buffer
+		 * (resource descriptor), or a name_string.
+		 */
+		if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
+			parser_state->aml++;
+
+			pkg_end = parser_state->aml;
+			pkg_length =
+			    acpi_ps_get_next_package_length(parser_state);
+			pkg_end += pkg_length;
+
+			if (parser_state->aml < pkg_end) {
+
+				/* Non-empty list */
+
+				arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+				if (!arg) {
+					return_PTR(NULL);
+				}
+
+				/* Get the actual buffer length argument */
+
+				opcode = ACPI_GET8(parser_state->aml);
+				parser_state->aml++;
+
+				switch (opcode) {
+				case AML_BYTE_OP:	/* AML_BYTEDATA_ARG */
+					buffer_length =
+					    ACPI_GET8(parser_state->aml);
+					parser_state->aml += 1;
+					break;
+
+				case AML_WORD_OP:	/* AML_WORDDATA_ARG */
+					buffer_length =
+					    ACPI_GET16(parser_state->aml);
+					parser_state->aml += 2;
+					break;
+
+				case AML_DWORD_OP:	/* AML_DWORDATA_ARG */
+					buffer_length =
+					    ACPI_GET32(parser_state->aml);
+					parser_state->aml += 4;
+					break;
+
+				default:
+					buffer_length = 0;
+					break;
+				}
+
+				/* Fill in bytelist data */
+
+				arg->named.value.size = buffer_length;
+				arg->named.data = parser_state->aml;
+			}
+
+			/* Skip to End of byte data */
+
+			parser_state->aml = pkg_end;
+		} else {
+			arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+			if (!arg) {
+				return_PTR(NULL);
+			}
+
+			/* Get the Namestring argument */
+
+			arg->common.value.name =
+			    acpi_ps_get_next_namestring(parser_state);
+		}
+
+		/* Link the buffer/namestring to parent (CONNECTION_OP) */
+
+		acpi_ps_append_arg(field, arg);
 		break;
 
 	default:
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 01dd70d..9547ad8 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index bed08de..a0226fd 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -638,7 +638,16 @@
 
 /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
 		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
-		 AML_FLAGS_EXEC_0A_0T_1R)
+			 AML_FLAGS_EXEC_0A_0T_1R),
+
+/* ACPI 5.0 opcodes */
+
+/* 7F */ ACPI_OP("-ConnectField-", ARGP_CONNECTFIELD_OP,
+			 ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+			 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
+/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
+			 ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+			 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
 
 /*! [End] no source code translation !*/
 };
@@ -657,7 +666,7 @@
 /* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
 /* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
 /* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
-/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
 /* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
 /* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
 /* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 9bb0cbd..2ff9c35 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a5faa13..c872aa4 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index f1464c0..2b03cdb 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -74,6 +74,12 @@
 
 	ACPI_FUNCTION_ENTRY();
 
+/*
+	if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP)
+	{
+		return (Op->Common.Value.Arg);
+	}
+*/
 	/* Get the info structure for this opcode */
 
 	op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 7eda785..13bb131 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 3312d63..ab96cf4 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 8086805..9d98c5f 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 9e66f90..a030565 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 3a8a89e..3c6df4b 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -313,6 +313,38 @@
 							  resource_source));
 			break;
 
+		case ACPI_RESOURCE_TYPE_GPIO:
+
+			total_size =
+			    (acpi_rs_length) (total_size +
+					      (resource->data.gpio.
+					       pin_table_length * 2) +
+					      resource->data.gpio.
+					      resource_source.string_length +
+					      resource->data.gpio.
+					      vendor_length);
+
+			break;
+
+		case ACPI_RESOURCE_TYPE_SERIAL_BUS:
+
+			total_size =
+			    acpi_gbl_aml_resource_serial_bus_sizes[resource->
+								   data.
+								   common_serial_bus.
+								   type];
+
+			total_size = (acpi_rs_length) (total_size +
+						       resource->data.
+						       i2c_serial_bus.
+						       resource_source.
+						       string_length +
+						       resource->data.
+						       i2c_serial_bus.
+						       vendor_length);
+
+			break;
+
 		default:
 			break;
 		}
@@ -362,10 +394,11 @@
 	u32 extra_struct_bytes;
 	u8 resource_index;
 	u8 minimum_aml_resource_length;
+	union aml_resource *aml_resource;
 
 	ACPI_FUNCTION_TRACE(rs_get_list_length);
 
-	*size_needed = 0;
+	*size_needed = ACPI_RS_SIZE_MIN;	/* Minimum size is one end_tag */
 	end_aml = aml_buffer + aml_buffer_length;
 
 	/* Walk the list of AML resource descriptors */
@@ -376,9 +409,15 @@
 
 		status = acpi_ut_validate_resource(aml_buffer, &resource_index);
 		if (ACPI_FAILURE(status)) {
+			/*
+			 * Exit on failure. Cannot continue because the descriptor length
+			 * may be bogus also.
+			 */
 			return_ACPI_STATUS(status);
 		}
 
+		aml_resource = (void *)aml_buffer;
+
 		/* Get the resource length and base (minimum) AML size */
 
 		resource_length = acpi_ut_get_resource_length(aml_buffer);
@@ -422,10 +461,8 @@
 
 		case ACPI_RESOURCE_NAME_END_TAG:
 			/*
-			 * End Tag:
-			 * This is the normal exit, add size of end_tag
+			 * End Tag: This is the normal exit
 			 */
-			*size_needed += ACPI_RS_SIZE_MIN;
 			return_ACPI_STATUS(AE_OK);
 
 		case ACPI_RESOURCE_NAME_ADDRESS32:
@@ -457,6 +494,33 @@
 							 minimum_aml_resource_length);
 			break;
 
+		case ACPI_RESOURCE_NAME_GPIO:
+
+			/* Vendor data is optional */
+
+			if (aml_resource->gpio.vendor_length) {
+				extra_struct_bytes +=
+				    aml_resource->gpio.vendor_offset -
+				    aml_resource->gpio.pin_table_offset +
+				    aml_resource->gpio.vendor_length;
+			} else {
+				extra_struct_bytes +=
+				    aml_resource->large_header.resource_length +
+				    sizeof(struct aml_resource_large_header) -
+				    aml_resource->gpio.pin_table_offset;
+			}
+			break;
+
+		case ACPI_RESOURCE_NAME_SERIAL_BUS:
+
+			minimum_aml_resource_length =
+			    acpi_gbl_resource_aml_serial_bus_sizes
+			    [aml_resource->common_serial_bus.type];
+			extra_struct_bytes +=
+			    aml_resource->common_serial_bus.resource_length -
+			    minimum_aml_resource_length;
+			break;
+
 		default:
 			break;
 		}
@@ -467,9 +531,18 @@
 		 * Important: Round the size up for the appropriate alignment. This
 		 * is a requirement on IA64.
 		 */
-		buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
-		    extra_struct_bytes;
-		buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
+		if (acpi_ut_get_resource_type(aml_buffer) ==
+		    ACPI_RESOURCE_NAME_SERIAL_BUS) {
+			buffer_size =
+			    acpi_gbl_resource_struct_serial_bus_sizes
+			    [aml_resource->common_serial_bus.type] +
+			    extra_struct_bytes;
+		} else {
+			buffer_size =
+			    acpi_gbl_resource_struct_sizes[resource_index] +
+			    extra_struct_bytes;
+		}
+		buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
 
 		*size_needed += buffer_size;
 
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 4ce6e11..46d6eb38 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,70 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_buffer_to_resource
+ *
+ * PARAMETERS:  aml_buffer          - Pointer to the resource byte stream
+ *              aml_buffer_length   - Length of the aml_buffer
+ *              resource_ptr        - Where the converted resource is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert a raw AML buffer to a resource list
+ *
+ ******************************************************************************/
+acpi_status
+acpi_buffer_to_resource(u8 *aml_buffer,
+			u16 aml_buffer_length,
+			struct acpi_resource **resource_ptr)
+{
+	acpi_status status;
+	acpi_size list_size_needed;
+	void *resource;
+	void *current_resource_ptr;
+
+	/*
+	 * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag
+	 * is not required here.
+	 */
+
+	/* Get the required length for the converted resource */
+
+	status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length,
+					 &list_size_needed);
+	if (status == AE_AML_NO_RESOURCE_END_TAG) {
+		status = AE_OK;
+	}
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Allocate a buffer for the converted resource */
+
+	resource = ACPI_ALLOCATE_ZEROED(list_size_needed);
+	current_resource_ptr = resource;
+	if (!resource) {
+		return (AE_NO_MEMORY);
+	}
+
+	/* Perform the AML-to-Resource conversion */
+
+	status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
+					    acpi_rs_convert_aml_to_resources,
+					    &current_resource_ptr);
+	if (status == AE_AML_NO_RESOURCE_END_TAG) {
+		status = AE_OK;
+	}
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(resource);
+	} else {
+		*resource_ptr = resource;
+	}
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_rs_create_resource_list
  *
  * PARAMETERS:  aml_buffer          - Pointer to the resource byte stream
@@ -66,9 +130,10 @@
  *              of device resources.
  *
  ******************************************************************************/
+
 acpi_status
 acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
-			     struct acpi_buffer *output_buffer)
+			     struct acpi_buffer * output_buffer)
 {
 
 	acpi_status status;
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 33db752..b4c5811 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -61,11 +61,13 @@
 
 static void acpi_rs_out_title(char *title);
 
-static void acpi_rs_dump_byte_list(u16 length, u8 * data);
+static void acpi_rs_dump_byte_list(u16 length, u8 *data);
 
-static void acpi_rs_dump_dword_list(u8 length, u32 * data);
+static void acpi_rs_dump_word_list(u16 length, u16 *data);
 
-static void acpi_rs_dump_short_byte_list(u8 length, u8 * data);
+static void acpi_rs_dump_dword_list(u8 length, u32 *data);
+
+static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
 
 static void
 acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
@@ -309,6 +311,125 @@
 	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
 };
 
+struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
+	 "ConnectionType", acpi_gbl_ct_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
+	 "ProducerConsumer", acpi_gbl_consume_decode},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
+	 acpi_gbl_ppc_decode},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
+	 acpi_gbl_shr_decode},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
+	 "IoRestriction", acpi_gbl_ior_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
+	 acpi_gbl_he_decode},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
+	 acpi_gbl_ll_decode},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
+	 NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
+	 "DebounceTimeout", NULL},
+	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
+	 "ResourceSource", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
+	 "PinTableLength", NULL},
+	{ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
+	 NULL},
+	{ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
+	 NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
+	 "FixedDma", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
+	 "RequestLines", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
+	 NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
+	 acpi_gbl_dts_decode},
+};
+
+#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
+	{ACPI_RSD_UINT8,    ACPI_RSD_OFFSET (common_serial_bus.revision_id),    "RevisionId",               NULL}, \
+	{ACPI_RSD_UINT8,    ACPI_RSD_OFFSET (common_serial_bus.type),           "Type",                     acpi_gbl_sbt_decode}, \
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer",      acpi_gbl_consume_decode}, \
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode),     "SlaveMode",                acpi_gbl_sm_decode}, \
+	{ACPI_RSD_UINT8,    ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId",         NULL}, \
+	{ACPI_RSD_UINT16,   ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength",         NULL}, \
+	{ACPI_RSD_SOURCE,   ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource",          NULL}, \
+	{ACPI_RSD_UINT16,   ACPI_RSD_OFFSET (common_serial_bus.vendor_length),  "VendorLength",             NULL}, \
+	{ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data),   "VendorData",               NULL},
+
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
+	 "Common Serial Bus", NULL},
+	ACPI_RS_DUMP_COMMON_SERIAL_BUS
+};
+
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
+	 "I2C Serial Bus", NULL},
+	ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+					ACPI_RSD_OFFSET(i2c_serial_bus.
+							access_mode),
+					"AccessMode", acpi_gbl_am_decode},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
+	 "ConnectionSpeed", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
+	 "SlaveAddress", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
+	 "Spi Serial Bus", NULL},
+	ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+					ACPI_RSD_OFFSET(spi_serial_bus.
+							wire_mode), "WireMode",
+					acpi_gbl_wm_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
+	 "DevicePolarity", acpi_gbl_dp_decode},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
+	 "DataBitLength", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
+	 "ClockPhase", acpi_gbl_cph_decode},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
+	 "ClockPolarity", acpi_gbl_cpo_decode},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
+	 "DeviceSelection", NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
+	 "ConnectionSpeed", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
+	 "Uart Serial Bus", NULL},
+	ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
+					ACPI_RSD_OFFSET(uart_serial_bus.
+							flow_control),
+					"FlowControl", acpi_gbl_fc_decode},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
+	 "StopBits", acpi_gbl_sb_decode},
+	{ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
+	 "DataBits", acpi_gbl_bpb_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
+	 acpi_gbl_ed_decode},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
+	 acpi_gbl_pt_decode},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
+	 "LinesEnabled", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
+	 "RxFifoSize", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
+	 "TxFifoSize", NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
+	 "ConnectionSpeed", NULL},
+};
+
 /*
  * Tables used for common address descriptor flag fields
  */
@@ -413,7 +534,14 @@
 			/* Data items, 8/16/32/64 bit */
 
 		case ACPI_RSD_UINT8:
-			acpi_rs_out_integer8(name, ACPI_GET8(target));
+			if (table->pointer) {
+				acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+								       table->
+								       pointer
+								       [*target]));
+			} else {
+				acpi_rs_out_integer8(name, ACPI_GET8(target));
+			}
 			break;
 
 		case ACPI_RSD_UINT16:
@@ -444,6 +572,13 @@
 								       0x03]));
 			break;
 
+		case ACPI_RSD_3BITFLAG:
+			acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+							       table->
+							       pointer[*target &
+								       0x07]));
+			break;
+
 		case ACPI_RSD_SHORTLIST:
 			/*
 			 * Short byte list (single line output) for DMA and IRQ resources
@@ -456,6 +591,20 @@
 			}
 			break;
 
+		case ACPI_RSD_SHORTLISTX:
+			/*
+			 * Short byte list (single line output) for GPIO vendor data
+			 * Note: The list length is obtained from the previous table entry
+			 */
+			if (previous_target) {
+				acpi_rs_out_title(name);
+				acpi_rs_dump_short_byte_list(*previous_target,
+							     *
+							     (ACPI_CAST_INDIRECT_PTR
+							      (u8, target)));
+			}
+			break;
+
 		case ACPI_RSD_LONGLIST:
 			/*
 			 * Long byte list for Vendor resource data
@@ -480,6 +629,18 @@
 			}
 			break;
 
+		case ACPI_RSD_WORDLIST:
+			/*
+			 * Word list for GPIO Pin Table
+			 * Note: The list length is obtained from the previous table entry
+			 */
+			if (previous_target) {
+				acpi_rs_dump_word_list(*previous_target,
+						       *(ACPI_CAST_INDIRECT_PTR
+							 (u16, target)));
+			}
+			break;
+
 		case ACPI_RSD_ADDRESS:
 			/*
 			 * Common flags for all Address resources
@@ -627,14 +788,20 @@
 
 		/* Dump the resource descriptor */
 
-		acpi_rs_dump_descriptor(&resource_list->data,
-					acpi_gbl_dump_resource_dispatch[type]);
+		if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+			acpi_rs_dump_descriptor(&resource_list->data,
+						acpi_gbl_dump_serial_bus_dispatch
+						[resource_list->data.
+						 common_serial_bus.type]);
+		} else {
+			acpi_rs_dump_descriptor(&resource_list->data,
+						acpi_gbl_dump_resource_dispatch
+						[type]);
+		}
 
 		/* Point to the next resource structure */
 
-		resource_list =
-		    ACPI_ADD_PTR(struct acpi_resource, resource_list,
-				 resource_list->length);
+		resource_list = ACPI_NEXT_RESOURCE(resource_list);
 
 		/* Exit when END_TAG descriptor is reached */
 
@@ -768,4 +935,13 @@
 	}
 }
 
+static void acpi_rs_dump_word_list(u16 length, u16 *data)
+{
+	u16 i;
+
+	for (i = 0; i < length; i++) {
+		acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]);
+	}
+}
+
 #endif
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index f9ea608..a9fa515 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -76,7 +76,10 @@
 	acpi_rs_convert_address64,	/* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
 	acpi_rs_convert_ext_address64,	/* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
 	acpi_rs_convert_ext_irq,	/* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
-	acpi_rs_convert_generic_reg	/* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+	acpi_rs_convert_generic_reg,	/* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+	acpi_rs_convert_gpio,	/* 0x11, ACPI_RESOURCE_TYPE_GPIO */
+	acpi_rs_convert_fixed_dma,	/* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */
+	NULL,			/* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */
 };
 
 /* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -94,7 +97,7 @@
 	acpi_rs_convert_end_dpf,	/* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
 	acpi_rs_convert_io,	/* 0x08, ACPI_RESOURCE_NAME_IO */
 	acpi_rs_convert_fixed_io,	/* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
-	NULL,			/* 0x0A, Reserved */
+	acpi_rs_convert_fixed_dma,	/* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */
 	NULL,			/* 0x0B, Reserved */
 	NULL,			/* 0x0C, Reserved */
 	NULL,			/* 0x0D, Reserved */
@@ -114,7 +117,19 @@
 	acpi_rs_convert_address16,	/* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
 	acpi_rs_convert_ext_irq,	/* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
 	acpi_rs_convert_address64,	/* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
-	acpi_rs_convert_ext_address64	/* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+	acpi_rs_convert_ext_address64,	/* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+	acpi_rs_convert_gpio,	/* 0x0C, ACPI_RESOURCE_NAME_GPIO */
+	NULL,			/* 0x0D, Reserved */
+	NULL,			/* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */
+};
+
+/* Subtype table for serial_bus -- I2C, SPI, and UART */
+
+struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
+	NULL,
+	acpi_rs_convert_i2c_serial_bus,
+	acpi_rs_convert_spi_serial_bus,
+	acpi_rs_convert_uart_serial_bus,
 };
 
 #ifdef ACPI_FUTURE_USAGE
@@ -140,6 +155,16 @@
 	acpi_rs_dump_ext_address64,	/* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
 	acpi_rs_dump_ext_irq,	/* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
 	acpi_rs_dump_generic_reg,	/* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+	acpi_rs_dump_gpio,	/* ACPI_RESOURCE_TYPE_GPIO */
+	acpi_rs_dump_fixed_dma,	/* ACPI_RESOURCE_TYPE_FIXED_DMA */
+	NULL,			/* ACPI_RESOURCE_TYPE_SERIAL_BUS */
+};
+
+struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
+	NULL,
+	acpi_rs_dump_i2c_serial_bus,	/* AML_RESOURCE_I2C_BUS_TYPE */
+	acpi_rs_dump_spi_serial_bus,	/* AML_RESOURCE_SPI_BUS_TYPE */
+	acpi_rs_dump_uart_serial_bus,	/* AML_RESOURCE_UART_BUS_TYPE */
 };
 #endif
 
@@ -166,7 +191,10 @@
 	sizeof(struct aml_resource_address64),	/* ACPI_RESOURCE_TYPE_ADDRESS64 */
 	sizeof(struct aml_resource_extended_address64),	/*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
 	sizeof(struct aml_resource_extended_irq),	/* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
-	sizeof(struct aml_resource_generic_register)	/* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+	sizeof(struct aml_resource_generic_register),	/* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+	sizeof(struct aml_resource_gpio),	/* ACPI_RESOURCE_TYPE_GPIO */
+	sizeof(struct aml_resource_fixed_dma),	/* ACPI_RESOURCE_TYPE_FIXED_DMA */
+	sizeof(struct aml_resource_common_serialbus),	/* ACPI_RESOURCE_TYPE_SERIAL_BUS */
 };
 
 const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -182,7 +210,7 @@
 	ACPI_RS_SIZE_MIN,
 	ACPI_RS_SIZE(struct acpi_resource_io),
 	ACPI_RS_SIZE(struct acpi_resource_fixed_io),
-	0,
+	ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
 	0,
 	0,
 	0,
@@ -202,5 +230,21 @@
 	ACPI_RS_SIZE(struct acpi_resource_address16),
 	ACPI_RS_SIZE(struct acpi_resource_extended_irq),
 	ACPI_RS_SIZE(struct acpi_resource_address64),
-	ACPI_RS_SIZE(struct acpi_resource_extended_address64)
+	ACPI_RS_SIZE(struct acpi_resource_extended_address64),
+	ACPI_RS_SIZE(struct acpi_resource_gpio),
+	ACPI_RS_SIZE(struct acpi_resource_common_serialbus)
+};
+
+const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
+	0,
+	sizeof(struct aml_resource_i2c_serialbus),
+	sizeof(struct aml_resource_spi_serialbus),
+	sizeof(struct aml_resource_uart_serialbus),
+};
+
+const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
+	0,
+	ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+	ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+	ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
 };
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 0c7efef..f6a0810 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 50b8ad2..e23a9ec 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -264,3 +264,34 @@
 	 AML_OFFSET(dma.dma_channel_mask),
 	 ACPI_RS_OFFSET(data.dma.channel_count)}
 };
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_fixed_dma
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA,
+	 ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA,
+	 sizeof(struct aml_resource_fixed_dma),
+	 0},
+
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * request_lines
+	 * Channels
+	 */
+
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
+	 AML_OFFSET(fixed_dma.request_lines),
+	 2},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
+	 AML_OFFSET(fixed_dma.width),
+	 1},
+
+};
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 1bfcef7..9be129f 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,8 @@
 	struct acpi_resource **resource_ptr =
 	    ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
 	struct acpi_resource *resource;
+	union aml_resource *aml_resource;
+	struct acpi_rsconvert_info *conversion_table;
 	acpi_status status;
 
 	ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
@@ -84,14 +86,37 @@
 			      "Misaligned resource pointer %p", resource));
 	}
 
+	/* Get the appropriate conversion info table */
+
+	aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+	if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+		if (aml_resource->common_serial_bus.type >
+		    AML_RESOURCE_MAX_SERIALBUSTYPE) {
+			conversion_table = NULL;
+		} else {
+			/* This is an I2C, SPI, or UART serial_bus descriptor */
+
+			conversion_table =
+			    acpi_gbl_convert_resource_serial_bus_dispatch
+			    [aml_resource->common_serial_bus.type];
+		}
+	} else {
+		conversion_table =
+		    acpi_gbl_get_resource_dispatch[resource_index];
+	}
+
+	if (!conversion_table) {
+		ACPI_ERROR((AE_INFO,
+			    "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+			    resource_index));
+		return (AE_AML_INVALID_RESOURCE_TYPE);
+	}
+
 	/* Convert the AML byte stream resource to a local resource struct */
 
 	status =
-	    acpi_rs_convert_aml_to_resource(resource,
-					    ACPI_CAST_PTR(union aml_resource,
-							  aml),
-					    acpi_gbl_get_resource_dispatch
-					    [resource_index]);
+	    acpi_rs_convert_aml_to_resource(resource, aml_resource,
+					    conversion_table);
 	if (ACPI_FAILURE(status)) {
 		ACPI_EXCEPTION((AE_INFO, status,
 				"Could not convert AML resource (Type 0x%X)",
@@ -106,7 +131,7 @@
 
 	/* Point to the next structure in the output buffer */
 
-	*resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
+	*resource_ptr = ACPI_NEXT_RESOURCE(resource);
 	return_ACPI_STATUS(AE_OK);
 }
 
@@ -135,6 +160,7 @@
 {
 	u8 *aml = output_buffer;
 	u8 *end_aml = output_buffer + aml_size_needed;
+	struct acpi_rsconvert_info *conversion_table;
 	acpi_status status;
 
 	ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
@@ -154,11 +180,34 @@
 
 		/* Perform the conversion */
 
-		status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
-										 aml_resource,
-										 aml),
-							 acpi_gbl_set_resource_dispatch
-							 [resource->type]);
+		if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+			if (resource->data.common_serial_bus.type >
+			    AML_RESOURCE_MAX_SERIALBUSTYPE) {
+				conversion_table = NULL;
+			} else {
+				/* This is an I2C, SPI, or UART serial_bus descriptor */
+
+				conversion_table =
+				    acpi_gbl_convert_resource_serial_bus_dispatch
+				    [resource->data.common_serial_bus.type];
+			}
+		} else {
+			conversion_table =
+			    acpi_gbl_set_resource_dispatch[resource->type];
+		}
+
+		if (!conversion_table) {
+			ACPI_ERROR((AE_INFO,
+				    "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+				    resource->type));
+			return (AE_AML_INVALID_RESOURCE_TYPE);
+		}
+
+		status = acpi_rs_convert_resource_to_aml(resource,
+						         ACPI_CAST_PTR(union
+								       aml_resource,
+								       aml),
+							 conversion_table);
 		if (ACPI_FAILURE(status)) {
 			ACPI_EXCEPTION((AE_INFO, status,
 					"Could not convert resource (type 0x%X) to AML",
@@ -192,9 +241,7 @@
 
 		/* Point to the next input resource descriptor */
 
-		resource =
-		    ACPI_ADD_PTR(struct acpi_resource, resource,
-				 resource->length);
+		resource = ACPI_NEXT_RESOURCE(resource);
 	}
 
 	/* Completed buffer, but did not find an end_tag resource descriptor */
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 7cc6d86..4fd611a 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 410264b..8073b37 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,10 @@
 
 	ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
 
+	if (!info) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
 	if (((acpi_size) resource) & 0x3) {
 
 		/* Each internal resource struct is expected to be 32-bit aligned */
@@ -101,7 +105,6 @@
 	 * table length (# of table entries)
 	 */
 	count = INIT_TABLE_LENGTH(info);
-
 	while (count) {
 		/*
 		 * Source is the external AML byte stream buffer,
@@ -145,6 +148,14 @@
 			    ((ACPI_GET8(source) >> info->value) & 0x03);
 			break;
 
+		case ACPI_RSC_3BITFLAG:
+			/*
+			 * Mask and shift the flag bits
+			 */
+			ACPI_SET8(destination) = (u8)
+			    ((ACPI_GET8(source) >> info->value) & 0x07);
+			break;
+
 		case ACPI_RSC_COUNT:
 
 			item_count = ACPI_GET8(source);
@@ -163,6 +174,69 @@
 			    (info->value * (item_count - 1));
 			break;
 
+		case ACPI_RSC_COUNT_GPIO_PIN:
+
+			target = ACPI_ADD_PTR(void, aml, info->value);
+			item_count = ACPI_GET16(target) - ACPI_GET16(source);
+
+			resource->length = resource->length + item_count;
+			item_count = item_count / 2;
+			ACPI_SET16(destination) = item_count;
+			break;
+
+		case ACPI_RSC_COUNT_GPIO_VEN:
+
+			item_count = ACPI_GET8(source);
+			ACPI_SET8(destination) = (u8)item_count;
+
+			resource->length = resource->length +
+			    (info->value * item_count);
+			break;
+
+		case ACPI_RSC_COUNT_GPIO_RES:
+
+			/*
+			 * Vendor data is optional (length/offset may both be zero)
+			 * Examine vendor data length field first
+			 */
+			target = ACPI_ADD_PTR(void, aml, (info->value + 2));
+			if (ACPI_GET16(target)) {
+
+				/* Use vendor offset to get resource source length */
+
+				target = ACPI_ADD_PTR(void, aml, info->value);
+				item_count =
+				    ACPI_GET16(target) - ACPI_GET16(source);
+			} else {
+				/* No vendor data to worry about */
+
+				item_count = aml->large_header.resource_length +
+				    sizeof(struct aml_resource_large_header) -
+				    ACPI_GET16(source);
+			}
+
+			resource->length = resource->length + item_count;
+			ACPI_SET16(destination) = item_count;
+			break;
+
+		case ACPI_RSC_COUNT_SERIAL_VEN:
+
+			item_count = ACPI_GET16(source) - info->value;
+
+			resource->length = resource->length + item_count;
+			ACPI_SET16(destination) = item_count;
+			break;
+
+		case ACPI_RSC_COUNT_SERIAL_RES:
+
+			item_count = (aml_resource_length +
+				      sizeof(struct aml_resource_large_header))
+			    - ACPI_GET16(source) - info->value;
+
+			resource->length = resource->length + item_count;
+			ACPI_SET16(destination) = item_count;
+			break;
+
 		case ACPI_RSC_LENGTH:
 
 			resource->length = resource->length + info->value;
@@ -183,6 +257,72 @@
 					  info->opcode);
 			break;
 
+		case ACPI_RSC_MOVE_GPIO_PIN:
+
+			/* Generate and set the PIN data pointer */
+
+			target = (char *)ACPI_ADD_PTR(void, resource,
+						      (resource->length -
+						       item_count * 2));
+			*(u16 **)destination = ACPI_CAST_PTR(u16, target);
+
+			/* Copy the PIN data */
+
+			source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+			acpi_rs_move_data(target, source, item_count,
+					  info->opcode);
+			break;
+
+		case ACPI_RSC_MOVE_GPIO_RES:
+
+			/* Generate and set the resource_source string pointer */
+
+			target = (char *)ACPI_ADD_PTR(void, resource,
+						      (resource->length -
+						       item_count));
+			*(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+			/* Copy the resource_source string */
+
+			source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+			acpi_rs_move_data(target, source, item_count,
+					  info->opcode);
+			break;
+
+		case ACPI_RSC_MOVE_SERIAL_VEN:
+
+			/* Generate and set the Vendor Data pointer */
+
+			target = (char *)ACPI_ADD_PTR(void, resource,
+						      (resource->length -
+						       item_count));
+			*(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+			/* Copy the Vendor Data */
+
+			source = ACPI_ADD_PTR(void, aml, info->value);
+			acpi_rs_move_data(target, source, item_count,
+					  info->opcode);
+			break;
+
+		case ACPI_RSC_MOVE_SERIAL_RES:
+
+			/* Generate and set the resource_source string pointer */
+
+			target = (char *)ACPI_ADD_PTR(void, resource,
+						      (resource->length -
+						       item_count));
+			*(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+			/* Copy the resource_source string */
+
+			source =
+			    ACPI_ADD_PTR(void, aml,
+					 (ACPI_GET16(source) + info->value));
+			acpi_rs_move_data(target, source, item_count,
+					  info->opcode);
+			break;
+
 		case ACPI_RSC_SET8:
 
 			ACPI_MEMSET(destination, info->aml_offset, info->value);
@@ -219,13 +359,18 @@
 			 * Optional resource_source (Index and String). This is the more
 			 * complicated case used by the Interrupt() macro
 			 */
-			target =
-			    ACPI_ADD_PTR(char, resource,
-					 info->aml_offset + (item_count * 4));
+			target = ACPI_ADD_PTR(char, resource,
+					      info->aml_offset +
+					      (item_count * 4));
 
 			resource->length +=
 			    acpi_rs_get_resource_source(aml_resource_length,
-							(acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target);
+							(acpi_rs_length)
+							(((item_count -
+							   1) * sizeof(u32)) +
+							 info->value),
+							destination, aml,
+							target);
 			break;
 
 		case ACPI_RSC_BITMASK:
@@ -327,6 +472,7 @@
 {
 	void *source = NULL;
 	void *destination;
+	char *target;
 	acpi_rsdesc_size aml_length = 0;
 	u8 count;
 	u16 temp16 = 0;
@@ -334,6 +480,10 @@
 
 	ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
 
+	if (!info) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
 	/*
 	 * First table entry must be ACPI_RSC_INITxxx and must contain the
 	 * table length (# of table entries)
@@ -383,6 +533,14 @@
 			    ((ACPI_GET8(source) & 0x03) << info->value);
 			break;
 
+		case ACPI_RSC_3BITFLAG:
+			/*
+			 * Mask and shift the flag bits
+			 */
+			ACPI_SET8(destination) |= (u8)
+			    ((ACPI_GET8(source) & 0x07) << info->value);
+			break;
+
 		case ACPI_RSC_COUNT:
 
 			item_count = ACPI_GET8(source);
@@ -400,6 +558,63 @@
 			acpi_rs_set_resource_length(aml_length, aml);
 			break;
 
+		case ACPI_RSC_COUNT_GPIO_PIN:
+
+			item_count = ACPI_GET16(source);
+			ACPI_SET16(destination) = (u16)aml_length;
+
+			aml_length = (u16)(aml_length + item_count * 2);
+			target = ACPI_ADD_PTR(void, aml, info->value);
+			ACPI_SET16(target) = (u16)aml_length;
+			acpi_rs_set_resource_length(aml_length, aml);
+			break;
+
+		case ACPI_RSC_COUNT_GPIO_VEN:
+
+			item_count = ACPI_GET16(source);
+			ACPI_SET16(destination) = (u16)item_count;
+
+			aml_length =
+			    (u16)(aml_length + (info->value * item_count));
+			acpi_rs_set_resource_length(aml_length, aml);
+			break;
+
+		case ACPI_RSC_COUNT_GPIO_RES:
+
+			/* Set resource source string length */
+
+			item_count = ACPI_GET16(source);
+			ACPI_SET16(destination) = (u16)aml_length;
+
+			/* Compute offset for the Vendor Data */
+
+			aml_length = (u16)(aml_length + item_count);
+			target = ACPI_ADD_PTR(void, aml, info->value);
+
+			/* Set vendor offset only if there is vendor data */
+
+			if (resource->data.gpio.vendor_length) {
+				ACPI_SET16(target) = (u16)aml_length;
+			}
+
+			acpi_rs_set_resource_length(aml_length, aml);
+			break;
+
+		case ACPI_RSC_COUNT_SERIAL_VEN:
+
+			item_count = ACPI_GET16(source);
+			ACPI_SET16(destination) = item_count + info->value;
+			aml_length = (u16)(aml_length + item_count);
+			acpi_rs_set_resource_length(aml_length, aml);
+			break;
+
+		case ACPI_RSC_COUNT_SERIAL_RES:
+
+			item_count = ACPI_GET16(source);
+			aml_length = (u16)(aml_length + item_count);
+			acpi_rs_set_resource_length(aml_length, aml);
+			break;
+
 		case ACPI_RSC_LENGTH:
 
 			acpi_rs_set_resource_length(info->value, aml);
@@ -417,6 +632,48 @@
 					  info->opcode);
 			break;
 
+		case ACPI_RSC_MOVE_GPIO_PIN:
+
+			destination = (char *)ACPI_ADD_PTR(void, aml,
+							   ACPI_GET16
+							   (destination));
+			source = *(u16 **)source;
+			acpi_rs_move_data(destination, source, item_count,
+					  info->opcode);
+			break;
+
+		case ACPI_RSC_MOVE_GPIO_RES:
+
+			/* Used for both resource_source string and vendor_data */
+
+			destination = (char *)ACPI_ADD_PTR(void, aml,
+							   ACPI_GET16
+							   (destination));
+			source = *(u8 **)source;
+			acpi_rs_move_data(destination, source, item_count,
+					  info->opcode);
+			break;
+
+		case ACPI_RSC_MOVE_SERIAL_VEN:
+
+			destination = (char *)ACPI_ADD_PTR(void, aml,
+							   (aml_length -
+							    item_count));
+			source = *(u8 **)source;
+			acpi_rs_move_data(destination, source, item_count,
+					  info->opcode);
+			break;
+
+		case ACPI_RSC_MOVE_SERIAL_RES:
+
+			destination = (char *)ACPI_ADD_PTR(void, aml,
+							   (aml_length -
+							    item_count));
+			source = *(u8 **)source;
+			acpi_rs_move_data(destination, source, item_count,
+					  info->opcode);
+			break;
+
 		case ACPI_RSC_ADDRESS:
 
 			/* Set the Resource Type, General Flags, and Type-Specific Flags */
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
new file mode 100644
index 0000000..9aa5e68
--- /dev/null
+++ b/drivers/acpi/acpica/rsserial.c
@@ -0,0 +1,441 @@
+/*******************************************************************************
+ *
+ * Module Name: rsserial - GPIO/serial_bus resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsserial")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_gpio
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
+	 ACPI_RS_SIZE(struct acpi_resource_gpio),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO,
+	 sizeof(struct aml_resource_gpio),
+	 0},
+
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * revision_id
+	 * connection_type
+	 */
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id),
+	 AML_OFFSET(gpio.revision_id),
+	 2},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer),
+	 AML_OFFSET(gpio.flags),
+	 0},
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
+	 AML_OFFSET(gpio.int_flags),
+	 3},
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
+	 AML_OFFSET(gpio.int_flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering),
+	 AML_OFFSET(gpio.int_flags),
+	 0},
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity),
+	 AML_OFFSET(gpio.int_flags),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config),
+	 AML_OFFSET(gpio.pin_config),
+	 1},
+
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * drive_strength
+	 * debounce_timeout
+	 */
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength),
+	 AML_OFFSET(gpio.drive_strength),
+	 2},
+
+	/* Pin Table */
+
+	{ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length),
+	 AML_OFFSET(gpio.pin_table_offset),
+	 AML_OFFSET(gpio.res_source_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table),
+	 AML_OFFSET(gpio.pin_table_offset),
+	 0},
+
+	/* Resource Source */
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index),
+	 AML_OFFSET(gpio.res_source_index),
+	 1},
+
+	{ACPI_RSC_COUNT_GPIO_RES,
+	 ACPI_RS_OFFSET(data.gpio.resource_source.string_length),
+	 AML_OFFSET(gpio.res_source_offset),
+	 AML_OFFSET(gpio.vendor_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr),
+	 AML_OFFSET(gpio.res_source_offset),
+	 0},
+
+	/* Vendor Data */
+
+	{ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length),
+	 AML_OFFSET(gpio.vendor_length),
+	 1},
+
+	{ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data),
+	 AML_OFFSET(gpio.vendor_offset),
+	 0},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_i2c_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+	 ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+	 sizeof(struct aml_resource_i2c_serialbus),
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+	 AML_OFFSET(common_serial_bus.revision_id),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+	 AML_OFFSET(common_serial_bus.type),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+	 AML_OFFSET(common_serial_bus.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG,
+	 ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+	 AML_OFFSET(common_serial_bus.flags),
+	 1},
+
+	{ACPI_RSC_MOVE8,
+	 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+	 AML_OFFSET(common_serial_bus.type_revision_id),
+	 1},
+
+	{ACPI_RSC_MOVE16,
+	 ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 1},
+
+	/* Vendor data */
+
+	{ACPI_RSC_COUNT_SERIAL_VEN,
+	 ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 AML_RESOURCE_I2C_MIN_DATA_LEN},
+
+	{ACPI_RSC_MOVE_SERIAL_VEN,
+	 ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+	 0,
+	 sizeof(struct aml_resource_i2c_serialbus)},
+
+	/* Resource Source */
+
+	{ACPI_RSC_MOVE8,
+	 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+	 AML_OFFSET(common_serial_bus.res_source_index),
+	 1},
+
+	{ACPI_RSC_COUNT_SERIAL_RES,
+	 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 sizeof(struct aml_resource_common_serialbus)},
+
+	{ACPI_RSC_MOVE_SERIAL_RES,
+	 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 sizeof(struct aml_resource_common_serialbus)},
+
+	/* I2C bus type specific */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode),
+	 AML_OFFSET(i2c_serial_bus.type_specific_flags),
+	 0},
+
+	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed),
+	 AML_OFFSET(i2c_serial_bus.connection_speed),
+	 1},
+
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address),
+	 AML_OFFSET(i2c_serial_bus.slave_address),
+	 1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_spi_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+	 ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+	 sizeof(struct aml_resource_spi_serialbus),
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+	 AML_OFFSET(common_serial_bus.revision_id),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+	 AML_OFFSET(common_serial_bus.type),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+	 AML_OFFSET(common_serial_bus.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG,
+	 ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+	 AML_OFFSET(common_serial_bus.flags),
+	 1},
+
+	{ACPI_RSC_MOVE8,
+	 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+	 AML_OFFSET(common_serial_bus.type_revision_id),
+	 1},
+
+	{ACPI_RSC_MOVE16,
+	 ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 1},
+
+	/* Vendor data */
+
+	{ACPI_RSC_COUNT_SERIAL_VEN,
+	 ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 AML_RESOURCE_SPI_MIN_DATA_LEN},
+
+	{ACPI_RSC_MOVE_SERIAL_VEN,
+	 ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+	 0,
+	 sizeof(struct aml_resource_spi_serialbus)},
+
+	/* Resource Source */
+
+	{ACPI_RSC_MOVE8,
+	 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+	 AML_OFFSET(common_serial_bus.res_source_index),
+	 1},
+
+	{ACPI_RSC_COUNT_SERIAL_RES,
+	 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 sizeof(struct aml_resource_common_serialbus)},
+
+	{ACPI_RSC_MOVE_SERIAL_RES,
+	 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 sizeof(struct aml_resource_common_serialbus)},
+
+	/* Spi bus type specific  */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode),
+	 AML_OFFSET(spi_serial_bus.type_specific_flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity),
+	 AML_OFFSET(spi_serial_bus.type_specific_flags),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length),
+	 AML_OFFSET(spi_serial_bus.data_bit_length),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase),
+	 AML_OFFSET(spi_serial_bus.clock_phase),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity),
+	 AML_OFFSET(spi_serial_bus.clock_polarity),
+	 1},
+
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection),
+	 AML_OFFSET(spi_serial_bus.device_selection),
+	 1},
+
+	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed),
+	 AML_OFFSET(spi_serial_bus.connection_speed),
+	 1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_uart_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+	 ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+	 sizeof(struct aml_resource_uart_serialbus),
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+	 AML_OFFSET(common_serial_bus.revision_id),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+	 AML_OFFSET(common_serial_bus.type),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+	 AML_OFFSET(common_serial_bus.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG,
+	 ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+	 AML_OFFSET(common_serial_bus.flags),
+	 1},
+
+	{ACPI_RSC_MOVE8,
+	 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+	 AML_OFFSET(common_serial_bus.type_revision_id),
+	 1},
+
+	{ACPI_RSC_MOVE16,
+	 ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 1},
+
+	/* Vendor data */
+
+	{ACPI_RSC_COUNT_SERIAL_VEN,
+	 ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 AML_RESOURCE_UART_MIN_DATA_LEN},
+
+	{ACPI_RSC_MOVE_SERIAL_VEN,
+	 ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+	 0,
+	 sizeof(struct aml_resource_uart_serialbus)},
+
+	/* Resource Source */
+
+	{ACPI_RSC_MOVE8,
+	 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+	 AML_OFFSET(common_serial_bus.res_source_index),
+	 1},
+
+	{ACPI_RSC_COUNT_SERIAL_RES,
+	 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 sizeof(struct aml_resource_common_serialbus)},
+
+	{ACPI_RSC_MOVE_SERIAL_RES,
+	 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+	 AML_OFFSET(common_serial_bus.type_data_length),
+	 sizeof(struct aml_resource_common_serialbus)},
+
+	/* Uart bus type specific  */
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control),
+	 AML_OFFSET(uart_serial_bus.type_specific_flags),
+	 0},
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits),
+	 AML_OFFSET(uart_serial_bus.type_specific_flags),
+	 2},
+
+	{ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits),
+	 AML_OFFSET(uart_serial_bus.type_specific_flags),
+	 4},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian),
+	 AML_OFFSET(uart_serial_bus.type_specific_flags),
+	 7},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity),
+	 AML_OFFSET(uart_serial_bus.parity),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled),
+	 AML_OFFSET(uart_serial_bus.lines_enabled),
+	 1},
+
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size),
+	 AML_OFFSET(uart_serial_bus.rx_fifo_size),
+	 1},
+
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size),
+	 AML_OFFSET(uart_serial_bus.tx_fifo_size),
+	 1},
+
+	{ACPI_RSC_MOVE32,
+	 ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate),
+	 AML_OFFSET(uart_serial_bus.default_baud_rate),
+	 1},
+};
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 231811e..433a375 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,9 @@
 			 * since there are no alignment or endian issues
 			 */
 		case ACPI_RSC_MOVE8:
+		case ACPI_RSC_MOVE_GPIO_RES:
+		case ACPI_RSC_MOVE_SERIAL_VEN:
+		case ACPI_RSC_MOVE_SERIAL_RES:
 			ACPI_MEMCPY(destination, source, item_count);
 			return;
 
@@ -153,6 +156,7 @@
 			 * misaligned memory transfers
 			 */
 		case ACPI_RSC_MOVE16:
+		case ACPI_RSC_MOVE_GPIO_PIN:
 			ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
 					   &ACPI_CAST_PTR(u16, source)[i]);
 			break;
@@ -590,6 +594,56 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_rs_get_aei_method_data
+ *
+ * PARAMETERS:  Node            - Device node
+ *              ret_buffer      - Pointer to a buffer structure for the
+ *                                results
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the _AEI value of an object
+ *              contained in an object specified by the handle passed in
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+			    struct acpi_buffer *ret_buffer)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(rs_get_aei_method_data);
+
+	/* Parameters guaranteed valid by caller */
+
+	/* Execute the method, no parameters */
+
+	status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI,
+					 ACPI_BTYPE_BUFFER, &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Make the call to create a resource linked list from the
+	 * byte stream buffer that comes back from the _CRS method
+	 * execution.
+	 */
+	status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+	/* On exit, we must delete the object returned by evaluate_object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_rs_get_method_data
  *
  * PARAMETERS:  Handle          - Handle to the containing object
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index fe86b37..f58c098 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -307,6 +307,46 @@
 
 ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_event_resources
+ *
+ * PARAMETERS:  device_handle   - Handle to the device object for the
+ *                                device we are getting resources
+ *              in_buffer       - Pointer to a buffer containing the
+ *                                resources to be set for the device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the event resources for a
+ *              specific device. The caller must first acquire a handle for
+ *              the desired device. The resource data is passed to the routine
+ *              the buffer pointed to by the in_buffer variable. Uses the
+ *              _AEI method.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_event_resources(acpi_handle device_handle,
+			 struct acpi_buffer *ret_buffer)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(acpi_get_event_resources);
+
+	/* Validate parameters then dispatch to internal routine */
+
+	status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_rs_get_aei_method_data(node, ret_buffer);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
+
 /******************************************************************************
  *
  * FUNCTION:    acpi_resource_to_address64
@@ -486,8 +526,9 @@
  *
  * PARAMETERS:  device_handle   - Handle to the device object for the
  *                                device we are querying
- *              Name            - Method name of the resources we want
- *                                (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ *              Name            - Method name of the resources we want.
+ *                                (METHOD_NAME__CRS, METHOD_NAME__PRS, or
+ *                                METHOD_NAME__AEI)
  *              user_function   - Called for each resource
  *              Context         - Passed to user_function
  *
@@ -514,11 +555,12 @@
 
 	if (!device_handle || !user_function || !name ||
 	    (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
-	     !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
+	     !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
+	     !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
 		return_ACPI_STATUS(AE_BAD_PARAMETER);
 	}
 
-	/* Get the _CRS or _PRS resource list */
+	/* Get the _CRS/_PRS/_AEI resource list */
 
 	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
 	status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 6f5588e..c5d8704 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,15 @@
 
 typedef struct acpi_fadt_info {
 	char *name;
-	u8 address64;
-	u8 address32;
-	u8 length;
+	u16 address64;
+	u16 address32;
+	u16 length;
 	u8 default_length;
 	u8 type;
 
 } acpi_fadt_info;
 
+#define ACPI_FADT_OPTIONAL          0
 #define ACPI_FADT_REQUIRED          1
 #define ACPI_FADT_SEPARATE_LENGTH   2
 
@@ -87,7 +88,7 @@
 	 ACPI_FADT_OFFSET(pm1b_event_block),
 	 ACPI_FADT_OFFSET(pm1_event_length),
 	 ACPI_PM1_REGISTER_WIDTH * 2,	/* Enable + Status register */
-	 0},
+	 ACPI_FADT_OPTIONAL},
 
 	{"Pm1aControlBlock",
 	 ACPI_FADT_OFFSET(xpm1a_control_block),
@@ -101,7 +102,7 @@
 	 ACPI_FADT_OFFSET(pm1b_control_block),
 	 ACPI_FADT_OFFSET(pm1_control_length),
 	 ACPI_PM1_REGISTER_WIDTH,
-	 0},
+	 ACPI_FADT_OPTIONAL},
 
 	{"Pm2ControlBlock",
 	 ACPI_FADT_OFFSET(xpm2_control_block),
@@ -139,7 +140,7 @@
 
 typedef struct acpi_fadt_pm_info {
 	struct acpi_generic_address *target;
-	u8 source;
+	u16 source;
 	u8 register_num;
 
 } acpi_fadt_pm_info;
@@ -253,8 +254,13 @@
 	acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
 			      ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
 
-	acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
-			      ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
+	/* If Hardware Reduced flag is set, there is no FACS */
+
+	if (!acpi_gbl_reduced_hardware) {
+		acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.
+				      Xfacs, ACPI_SIG_FACS,
+				      ACPI_TABLE_INDEX_FACS);
+	}
 }
 
 /*******************************************************************************
@@ -277,12 +283,12 @@
 {
 	/*
 	 * Check if the FADT is larger than the largest table that we expect
-	 * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
+	 * (the ACPI 5.0 version). If so, truncate the table, and issue
 	 * a warning.
 	 */
 	if (length > sizeof(struct acpi_table_fadt)) {
 		ACPI_WARNING((AE_INFO,
-			      "FADT (revision %u) is longer than ACPI 2.0 version, "
+			      "FADT (revision %u) is longer than ACPI 5.0 version, "
 			      "truncating length %u to %u",
 			      table->revision, length,
 			      (u32)sizeof(struct acpi_table_fadt)));
@@ -297,6 +303,13 @@
 	ACPI_MEMCPY(&acpi_gbl_FADT, table,
 		    ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
 
+	/* Take a copy of the Hardware Reduced flag */
+
+	acpi_gbl_reduced_hardware = FALSE;
+	if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) {
+		acpi_gbl_reduced_hardware = TRUE;
+	}
+
 	/* Convert the local copy of the FADT to the common internal format */
 
 	acpi_tb_convert_fadt();
@@ -502,6 +515,12 @@
 		acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
 	}
 
+	/* If Hardware Reduced flag is set, we are all done */
+
+	if (acpi_gbl_reduced_hardware) {
+		return;
+	}
+
 	/* Examine all of the 64-bit extended address fields (X fields) */
 
 	for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index a55cb2b..4903e36 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 62365f6..1aecf7b 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0f2d395..09ca39e 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -135,6 +135,13 @@
 {
 	acpi_status status;
 
+	/* If Hardware Reduced flag is set, there is no FACS */
+
+	if (acpi_gbl_reduced_hardware) {
+		acpi_gbl_FACS = NULL;
+		return (AE_OK);
+	}
+
 	status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
 					 ACPI_CAST_INDIRECT_PTR(struct
 								acpi_table_header,
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index e7d13f5..abcc641 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 7eb6c6c..4258f64 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
new file mode 100644
index 0000000..67932ae
--- /dev/null
+++ b/drivers/acpi/acpica/utaddress.c
@@ -0,0 +1,294 @@
+/******************************************************************************
+ *
+ * Module Name: utaddress - op_region address range check
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utaddress")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_add_address_range
+ *
+ * PARAMETERS:  space_id            - Address space ID
+ *              Address             - op_region start address
+ *              Length              - op_region length
+ *              region_node         - op_region namespace node
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Add the Operation Region address range to the global list.
+ *              The only supported Space IDs are Memory and I/O. Called when
+ *              the op_region address/length operands are fully evaluated.
+ *
+ * MUTEX:       Locks the namespace
+ *
+ * NOTE: Because this interface is only called when an op_region argument
+ * list is evaluated, there cannot be any duplicate region_nodes.
+ * Duplicate Address/Length values are allowed, however, so that multiple
+ * address conflicts can be detected.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+			  acpi_physical_address address,
+			  u32 length, struct acpi_namespace_node *region_node)
+{
+	struct acpi_address_range *range_info;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_add_address_range);
+
+	if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+	    (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Allocate/init a new info block, add it to the appropriate list */
+
+	range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range));
+	if (!range_info) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	range_info->start_address = address;
+	range_info->end_address = (address + length - 1);
+	range_info->region_node = region_node;
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(range_info);
+		return_ACPI_STATUS(status);
+	}
+
+	range_info->next = acpi_gbl_address_range_list[space_id];
+	acpi_gbl_address_range_list[space_id] = range_info;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+			  "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
+			  acpi_ut_get_node_name(range_info->region_node),
+			  ACPI_CAST_PTR(void, address),
+			  ACPI_CAST_PTR(void, range_info->end_address)));
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_remove_address_range
+ *
+ * PARAMETERS:  space_id            - Address space ID
+ *              region_node         - op_region namespace node
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Remove the Operation Region from the global list. The only
+ *              supported Space IDs are Memory and I/O. Called when an
+ *              op_region is deleted.
+ *
+ * MUTEX:       Assumes the namespace is locked
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+			     struct acpi_namespace_node *region_node)
+{
+	struct acpi_address_range *range_info;
+	struct acpi_address_range *prev;
+
+	ACPI_FUNCTION_TRACE(ut_remove_address_range);
+
+	if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+	    (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+		return_VOID;
+	}
+
+	/* Get the appropriate list head and check the list */
+
+	range_info = prev = acpi_gbl_address_range_list[space_id];
+	while (range_info) {
+		if (range_info->region_node == region_node) {
+			if (range_info == prev) {	/* Found at list head */
+				acpi_gbl_address_range_list[space_id] =
+				    range_info->next;
+			} else {
+				prev->next = range_info->next;
+			}
+
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
+					  acpi_ut_get_node_name(range_info->
+								region_node),
+					  ACPI_CAST_PTR(void,
+							range_info->
+							start_address),
+					  ACPI_CAST_PTR(void,
+							range_info->
+							end_address)));
+
+			ACPI_FREE(range_info);
+			return_VOID;
+		}
+
+		prev = range_info;
+		range_info = range_info->next;
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_check_address_range
+ *
+ * PARAMETERS:  space_id            - Address space ID
+ *              Address             - Start address
+ *              Length              - Length of address range
+ *              Warn                - TRUE if warning on overlap desired
+ *
+ * RETURN:      Count of the number of conflicts detected. Zero is always
+ *              returned for Space IDs other than Memory or I/O.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ *              ASL operation region address ranges. The only supported
+ *              Space IDs are Memory and I/O.
+ *
+ * MUTEX:       Assumes the namespace is locked.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+			    acpi_physical_address address, u32 length, u8 warn)
+{
+	struct acpi_address_range *range_info;
+	acpi_physical_address end_address;
+	char *pathname;
+	u32 overlap_count = 0;
+
+	ACPI_FUNCTION_TRACE(ut_check_address_range);
+
+	if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+	    (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+		return_UINT32(0);
+	}
+
+	range_info = acpi_gbl_address_range_list[space_id];
+	end_address = address + length - 1;
+
+	/* Check entire list for all possible conflicts */
+
+	while (range_info) {
+		/*
+		 * Check if the requested Address/Length overlaps this address_range.
+		 * Four cases to consider:
+		 *
+		 * 1) Input address/length is contained completely in the address range
+		 * 2) Input address/length overlaps range at the range start
+		 * 3) Input address/length overlaps range at the range end
+		 * 4) Input address/length completely encompasses the range
+		 */
+		if ((address <= range_info->end_address) &&
+		    (end_address >= range_info->start_address)) {
+
+			/* Found an address range overlap */
+
+			overlap_count++;
+			if (warn) {	/* Optional warning message */
+				pathname =
+				    acpi_ns_get_external_pathname(range_info->
+								  region_node);
+
+				ACPI_WARNING((AE_INFO,
+					      "0x%p-0x%p %s conflicts with Region %s %d",
+					      ACPI_CAST_PTR(void, address),
+					      ACPI_CAST_PTR(void, end_address),
+					      acpi_ut_get_region_name(space_id),
+					      pathname, overlap_count));
+				ACPI_FREE(pathname);
+			}
+		}
+
+		range_info = range_info->next;
+	}
+
+	return_UINT32(overlap_count);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_delete_address_lists
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete all global address range lists (called during
+ *              subsystem shutdown).
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_address_lists(void)
+{
+	struct acpi_address_range *next;
+	struct acpi_address_range *range_info;
+	int i;
+
+	/* Delete all elements in all address range lists */
+
+	for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+		next = acpi_gbl_address_range_list[i];
+
+		while (next) {
+			range_info = next;
+			next = range_info->next;
+			ACPI_FREE(range_info);
+		}
+
+		acpi_gbl_address_range_list[i] = NULL;
+	}
+}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 0a69735..9982d2ea 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index aded299..3317c0a 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index a1f8d75..a0998a8 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 8b087e2..d42ede52 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -171,7 +171,9 @@
 	"SMBus",
 	"SystemCMOS",
 	"PCIBARTarget",
-	"IPMI"
+	"IPMI",
+	"GeneralPurposeIo",
+	"GenericSerialBus"
 };
 
 char *acpi_ut_get_region_name(u8 space_id)
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 31f5a78..2a6c3e1 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -215,11 +215,14 @@
 		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
 				  "***** Region %p\n", object));
 
-		/* Invalidate the region address/length via the host OS */
-
-		acpi_os_invalidate_address(object->region.space_id,
-					  object->region.address,
-					  (acpi_size) object->region.length);
+		/*
+		 * Update address_range list. However, only permanent regions
+		 * are installed in this list. (Not created within a method)
+		 */
+		if (!(object->region.node->flags & ANOBJ_TEMPORARY)) {
+			acpi_ut_remove_address_range(object->region.space_id,
+						     object->region.node);
+		}
 
 		second_desc = acpi_ns_get_secondary_object(object);
 		if (second_desc) {
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 18f73c9..479f32b 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ffba0a3..4153584 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -264,6 +264,12 @@
 		return_ACPI_STATUS(status);
 	}
 
+	/* Address Range lists */
+
+	for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+		acpi_gbl_address_range_list[i] = NULL;
+	}
+
 	/* Mutex locked flags */
 
 	for (i = 0; i < ACPI_NUM_MUTEX; i++) {
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index b679ea6..c92eb1d 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 191b682..8359c0c 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,7 @@
 		gpe_xrupt_info = next_gpe_xrupt_info;
 	}
 
+	acpi_ut_delete_address_lists();
 	return_VOID;
 }
 
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index f6bb75c..155fd78 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index ce481da..2491a55 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c33a852..86f19db 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 7d797e2..43174df 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -293,14 +293,10 @@
 
 acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
 {
-	acpi_thread_id this_thread_id;
-
 	ACPI_FUNCTION_NAME(ut_release_mutex);
 
-	this_thread_id = acpi_os_get_thread_id();
-
 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
-			  (u32)this_thread_id,
+			  (u32)acpi_os_get_thread_id(),
 			  acpi_ut_get_mutex_name(mutex_id)));
 
 	if (mutex_id > ACPI_MAX_MUTEX) {
@@ -329,7 +325,8 @@
 		 * the ACPI subsystem code.
 		 */
 		for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
-			if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
+			if (acpi_gbl_mutex_info[i].thread_id ==
+			    acpi_os_get_thread_id()) {
 				if (i == mutex_id) {
 					continue;
 				}
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 188340a..b112744 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 1fb10cb..2360cf7 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 6ffd3a8..9d441ea 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
-#include "amlresrc.h"
+#include "acresrc.h"
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utresrc")
@@ -154,6 +154,138 @@
 	"TypeF"
 };
 
+const char *acpi_gbl_ppc_decode[] = {
+	"PullDefault",
+	"PullUp",
+	"PullDown",
+	"PullNone"
+};
+
+const char *acpi_gbl_ior_decode[] = {
+	"IoRestrictionNone",
+	"IoRestrictionInputOnly",
+	"IoRestrictionOutputOnly",
+	"IoRestrictionNoneAndPreserve"
+};
+
+const char *acpi_gbl_dts_decode[] = {
+	"Width8bit",
+	"Width16bit",
+	"Width32bit",
+	"Width64bit",
+	"Width128bit",
+	"Width256bit",
+};
+
+/* GPIO connection type */
+
+const char *acpi_gbl_ct_decode[] = {
+	"Interrupt",
+	"I/O"
+};
+
+/* Serial bus type */
+
+const char *acpi_gbl_sbt_decode[] = {
+	"/* UNKNOWN serial bus type */",
+	"I2C",
+	"SPI",
+	"UART"
+};
+
+/* I2C serial bus access mode */
+
+const char *acpi_gbl_am_decode[] = {
+	"AddressingMode7Bit",
+	"AddressingMode10Bit"
+};
+
+/* I2C serial bus slave mode */
+
+const char *acpi_gbl_sm_decode[] = {
+	"ControllerInitiated",
+	"DeviceInitiated"
+};
+
+/* SPI serial bus wire mode */
+
+const char *acpi_gbl_wm_decode[] = {
+	"FourWireMode",
+	"ThreeWireMode"
+};
+
+/* SPI serial clock phase */
+
+const char *acpi_gbl_cph_decode[] = {
+	"ClockPhaseFirst",
+	"ClockPhaseSecond"
+};
+
+/* SPI serial bus clock polarity */
+
+const char *acpi_gbl_cpo_decode[] = {
+	"ClockPolarityLow",
+	"ClockPolarityHigh"
+};
+
+/* SPI serial bus device polarity */
+
+const char *acpi_gbl_dp_decode[] = {
+	"PolarityLow",
+	"PolarityHigh"
+};
+
+/* UART serial bus endian */
+
+const char *acpi_gbl_ed_decode[] = {
+	"LittleEndian",
+	"BigEndian"
+};
+
+/* UART serial bus bits per byte */
+
+const char *acpi_gbl_bpb_decode[] = {
+	"DataBitsFive",
+	"DataBitsSix",
+	"DataBitsSeven",
+	"DataBitsEight",
+	"DataBitsNine",
+	"/* UNKNOWN Bits per byte */",
+	"/* UNKNOWN Bits per byte */",
+	"/* UNKNOWN Bits per byte */"
+};
+
+/* UART serial bus stop bits */
+
+const char *acpi_gbl_sb_decode[] = {
+	"StopBitsNone",
+	"StopBitsOne",
+	"StopBitsOnePlusHalf",
+	"StopBitsTwo"
+};
+
+/* UART serial bus flow control */
+
+const char *acpi_gbl_fc_decode[] = {
+	"FlowControlNone",
+	"FlowControlHardware",
+	"FlowControlXON",
+	"/* UNKNOWN flow control keyword */"
+};
+
+/* UART serial bus parity type */
+
+const char *acpi_gbl_pt_decode[] = {
+	"ParityTypeNone",
+	"ParityTypeEven",
+	"ParityTypeOdd",
+	"ParityTypeMark",
+	"ParityTypeSpace",
+	"/* UNKNOWN parity keyword */",
+	"/* UNKNOWN parity keyword */",
+	"/* UNKNOWN parity keyword */"
+};
+
 #endif
 
 /*
@@ -173,7 +305,7 @@
 	ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
 	ACPI_AML_SIZE_SMALL(struct aml_resource_io),
 	ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
-	0,
+	ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma),
 	0,
 	0,
 	0,
@@ -193,7 +325,17 @@
 	ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
 	ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
 	ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
-	ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
+	ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
+	0,
+	ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
+};
+
+const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
+	0,
+	ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
 };
 
 /*
@@ -209,35 +351,49 @@
 	0,
 	0,
 	0,
-	ACPI_SMALL_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_SMALL_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_FIXED_LENGTH,
+	ACPI_SMALL_VARIABLE_LENGTH,	/* 04 IRQ */
+	ACPI_FIXED_LENGTH,	/* 05 DMA */
+	ACPI_SMALL_VARIABLE_LENGTH,	/* 06 start_dependent_functions */
+	ACPI_FIXED_LENGTH,	/* 07 end_dependent_functions */
+	ACPI_FIXED_LENGTH,	/* 08 IO */
+	ACPI_FIXED_LENGTH,	/* 09 fixed_iO */
+	ACPI_FIXED_LENGTH,	/* 0_a fixed_dMA */
 	0,
 	0,
 	0,
-	0,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH,
+	ACPI_VARIABLE_LENGTH,	/* 0_e vendor_short */
+	ACPI_FIXED_LENGTH,	/* 0_f end_tag */
 
 	/* Large descriptors */
 
 	0,
-	ACPI_FIXED_LENGTH,
-	ACPI_FIXED_LENGTH,
+	ACPI_FIXED_LENGTH,	/* 01 Memory24 */
+	ACPI_FIXED_LENGTH,	/* 02 generic_register */
 	0,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH
+	ACPI_VARIABLE_LENGTH,	/* 04 vendor_long */
+	ACPI_FIXED_LENGTH,	/* 05 Memory32 */
+	ACPI_FIXED_LENGTH,	/* 06 memory32_fixed */
+	ACPI_VARIABLE_LENGTH,	/* 07 Dword* address */
+	ACPI_VARIABLE_LENGTH,	/* 08 Word* address */
+	ACPI_VARIABLE_LENGTH,	/* 09 extended_iRQ */
+	ACPI_VARIABLE_LENGTH,	/* 0_a Qword* address */
+	ACPI_FIXED_LENGTH,	/* 0_b Extended* address */
+	ACPI_VARIABLE_LENGTH,	/* 0_c Gpio* */
+	0,
+	ACPI_VARIABLE_LENGTH	/* 0_e *serial_bus */
 };
 
+/*
+ * For the i_aSL compiler/disassembler, we don't want any error messages
+ * because the disassembler uses the resource validation code to determine
+ * if Buffer objects are actually Resource Templates.
+ */
+#ifdef ACPI_ASL_COMPILER
+#define ACPI_RESOURCE_ERROR(plist)
+#else
+#define ACPI_RESOURCE_ERROR(plist)  ACPI_ERROR(plist)
+#endif
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_walk_aml_resources
@@ -265,6 +421,7 @@
 	u8 resource_index;
 	u32 length;
 	u32 offset = 0;
+	u8 end_tag[2] = { 0x79, 0x00 };
 
 	ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
 
@@ -286,6 +443,10 @@
 
 		status = acpi_ut_validate_resource(aml, &resource_index);
 		if (ACPI_FAILURE(status)) {
+			/*
+			 * Exit on failure. Cannot continue because the descriptor length
+			 * may be bogus also.
+			 */
 			return_ACPI_STATUS(status);
 		}
 
@@ -300,7 +461,7 @@
 			    user_function(aml, length, offset, resource_index,
 					  context);
 			if (ACPI_FAILURE(status)) {
-				return (status);
+				return_ACPI_STATUS(status);
 			}
 		}
 
@@ -333,7 +494,19 @@
 
 	/* Did not find an end_tag descriptor */
 
-	return (AE_AML_NO_RESOURCE_END_TAG);
+	if (user_function) {
+
+		/* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
+
+		(void)acpi_ut_validate_resource(end_tag, &resource_index);
+		status =
+		    user_function(end_tag, 2, offset, resource_index, context);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
 }
 
 /*******************************************************************************
@@ -354,6 +527,7 @@
 
 acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
 {
+	union aml_resource *aml_resource;
 	u8 resource_type;
 	u8 resource_index;
 	acpi_rs_length resource_length;
@@ -375,7 +549,7 @@
 		/* Verify the large resource type (name) against the max */
 
 		if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
-			return (AE_AML_INVALID_RESOURCE_TYPE);
+			goto invalid_resource;
 		}
 
 		/*
@@ -392,15 +566,17 @@
 		    ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
 	}
 
-	/* Check validity of the resource type, zero indicates name is invalid */
-
+	/*
+	 * Check validity of the resource type, via acpi_gbl_resource_types. Zero
+	 * indicates an invalid resource.
+	 */
 	if (!acpi_gbl_resource_types[resource_index]) {
-		return (AE_AML_INVALID_RESOURCE_TYPE);
+		goto invalid_resource;
 	}
 
 	/*
-	 * 2) Validate the resource_length field. This ensures that the length
-	 *    is at least reasonable, and guarantees that it is non-zero.
+	 * Validate the resource_length field. This ensures that the length
+	 * is at least reasonable, and guarantees that it is non-zero.
 	 */
 	resource_length = acpi_ut_get_resource_length(aml);
 	minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
@@ -413,7 +589,7 @@
 		/* Fixed length resource, length must match exactly */
 
 		if (resource_length != minimum_resource_length) {
-			return (AE_AML_BAD_RESOURCE_LENGTH);
+			goto bad_resource_length;
 		}
 		break;
 
@@ -422,7 +598,7 @@
 		/* Variable length resource, length must be at least the minimum */
 
 		if (resource_length < minimum_resource_length) {
-			return (AE_AML_BAD_RESOURCE_LENGTH);
+			goto bad_resource_length;
 		}
 		break;
 
@@ -432,7 +608,7 @@
 
 		if ((resource_length > minimum_resource_length) ||
 		    (resource_length < (minimum_resource_length - 1))) {
-			return (AE_AML_BAD_RESOURCE_LENGTH);
+			goto bad_resource_length;
 		}
 		break;
 
@@ -440,7 +616,23 @@
 
 		/* Shouldn't happen (because of validation earlier), but be sure */
 
-		return (AE_AML_INVALID_RESOURCE_TYPE);
+		goto invalid_resource;
+	}
+
+	aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+	if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+
+		/* Validate the bus_type field */
+
+		if ((aml_resource->common_serial_bus.type == 0) ||
+		    (aml_resource->common_serial_bus.type >
+		     AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+			ACPI_RESOURCE_ERROR((AE_INFO,
+					     "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
+					     aml_resource->common_serial_bus.
+					     type));
+			return (AE_AML_INVALID_RESOURCE_TYPE);
+		}
 	}
 
 	/* Optionally return the resource table index */
@@ -450,6 +642,22 @@
 	}
 
 	return (AE_OK);
+
+      invalid_resource:
+
+	ACPI_RESOURCE_ERROR((AE_INFO,
+			     "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+			     resource_type));
+	return (AE_AML_INVALID_RESOURCE_TYPE);
+
+      bad_resource_length:
+
+	ACPI_RESOURCE_ERROR((AE_INFO,
+			     "Invalid resource descriptor length: Type "
+			     "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
+			     resource_type, resource_length,
+			     minimum_resource_length));
+	return (AE_AML_BAD_RESOURCE_LENGTH);
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 30c21e1..4267477 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 420ebfe..644e8c8 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
 #include "acnamesp.h"
 #include "acdebug.h"
 #include "actables.h"
+#include "acinterp.h"
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utxface")
@@ -640,4 +641,41 @@
 }
 
 ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_check_address_range
+ *
+ * PARAMETERS:  space_id            - Address space ID
+ *              Address             - Start address
+ *              Length              - Length
+ *              Warn                - TRUE if warning on overlap desired
+ *
+ * RETURN:      Count of the number of conflicts detected.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ *              ASL operation region address ranges.
+ *
+ ****************************************************************************/
+u32
+acpi_check_address_range(acpi_adr_space_type space_id,
+			 acpi_physical_address address,
+			 acpi_size length, u8 warn)
+{
+	u32 overlaps;
+	acpi_status status;
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (0);
+	}
+
+	overlaps = acpi_ut_check_address_range(space_id, address,
+					       (u32)length, warn);
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (overlaps);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_check_address_range)
 #endif				/* !ACPI_ASL_COMPILER */
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 8d0245e..52b568a 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
new file mode 100644
index 0000000..1427d19
--- /dev/null
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -0,0 +1,187 @@
+/*******************************************************************************
+ *
+ * Module Name: utxfmutex - external AML mutex access functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utxfmutex")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+			 acpi_string pathname,
+			 union acpi_operand_object **ret_obj);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_mutex_object
+ *
+ * PARAMETERS:  Handle              - Mutex or prefix handle (optional)
+ *              Pathname            - Mutex pathname (optional)
+ *              ret_obj             - Where the mutex object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by
+ *              Handle:Pathname. Either Handle or Pathname can be NULL, but
+ *              not both.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+			 acpi_string pathname,
+			 union acpi_operand_object **ret_obj)
+{
+	struct acpi_namespace_node *mutex_node;
+	union acpi_operand_object *mutex_obj;
+	acpi_status status;
+
+	/* Parameter validation */
+
+	if (!ret_obj || (!handle && !pathname)) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Get a the namespace node for the mutex */
+
+	mutex_node = handle;
+	if (pathname != NULL) {
+		status = acpi_get_handle(handle, pathname,
+					 ACPI_CAST_PTR(acpi_handle,
+						       &mutex_node));
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+	}
+
+	/* Ensure that we actually have a Mutex object */
+
+	if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) {
+		return (AE_TYPE);
+	}
+
+	/* Get the low-level mutex object */
+
+	mutex_obj = acpi_ns_get_attached_object(mutex_node);
+	if (!mutex_obj) {
+		return (AE_NULL_OBJECT);
+	}
+
+	*ret_obj = mutex_obj;
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_acquire_mutex
+ *
+ * PARAMETERS:  Handle              - Mutex or prefix handle (optional)
+ *              Pathname            - Mutex pathname (optional)
+ *              Timeout             - Max time to wait for the lock (millisec)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to
+ *              AML mutex objects, and allows for transaction locking between
+ *              drivers and AML code. The mutex node is pointed to by
+ *              Handle:Pathname. Either Handle or Pathname can be NULL, but
+ *              not both.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
+{
+	acpi_status status;
+	union acpi_operand_object *mutex_obj;
+
+	/* Get the low-level mutex associated with Handle:Pathname */
+
+	status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Acquire the OS mutex */
+
+	status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout);
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_release_mutex
+ *
+ * PARAMETERS:  Handle              - Mutex or prefix handle (optional)
+ *              Pathname            - Mutex pathname (optional)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Release an AML mutex. This is a device driver interface to
+ *              AML mutex objects, and allows for transaction locking between
+ *              drivers and AML code. The mutex node is pointed to by
+ *              Handle:Pathname. Either Handle or Pathname can be NULL, but
+ *              not both.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
+{
+	acpi_status status;
+	union acpi_operand_object *mutex_obj;
+
+	/* Get the low-level mutex associated with Handle:Pathname */
+
+	status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Release the OS mutex */
+
+	acpi_os_release_mutex(mutex_obj->mutex.os_mutex);
+	return (AE_OK);
+}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 6154036..e45350c 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -34,13 +34,13 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/kref.h>
 #include <linux/rculist.h>
 #include <linux/interrupt.h>
 #include <linux/debugfs.h>
-#include <acpi/atomicio.h>
 
 #include "apei-internal.h"
 
@@ -70,7 +70,7 @@
 {
 	int rc;
 
-	rc = acpi_atomic_read(val, &entry->register_region);
+	rc = apei_read(val, &entry->register_region);
 	if (rc)
 		return rc;
 	*val >>= entry->register_region.bit_offset;
@@ -116,13 +116,13 @@
 	val <<= entry->register_region.bit_offset;
 	if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
 		u64 valr = 0;
-		rc = acpi_atomic_read(&valr, &entry->register_region);
+		rc = apei_read(&valr, &entry->register_region);
 		if (rc)
 			return rc;
 		valr &= ~(entry->mask << entry->register_region.bit_offset);
 		val |= valr;
 	}
-	rc = acpi_atomic_write(val, &entry->register_region);
+	rc = apei_write(val, &entry->register_region);
 
 	return rc;
 }
@@ -243,7 +243,7 @@
 	u8 ins = entry->instruction;
 
 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-		return acpi_pre_map_gar(&entry->register_region);
+		return acpi_os_map_generic_address(&entry->register_region);
 
 	return 0;
 }
@@ -276,7 +276,7 @@
 	u8 ins = entry->instruction;
 
 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-		acpi_post_unmap_gar(&entry->register_region);
+		acpi_os_unmap_generic_address(&entry->register_region);
 
 	return 0;
 }
@@ -421,6 +421,17 @@
 	return 0;
 }
 
+int apei_resources_add(struct apei_resources *resources,
+		       unsigned long start, unsigned long size,
+		       bool iomem)
+{
+	if (iomem)
+		return apei_res_add(&resources->iomem, start, size);
+	else
+		return apei_res_add(&resources->ioport, start, size);
+}
+EXPORT_SYMBOL_GPL(apei_resources_add);
+
 /*
  * EINJ has two groups of GARs (EINJ table entry and trigger table
  * entry), so common resources are subtracted from the trigger table
@@ -438,8 +449,19 @@
 }
 EXPORT_SYMBOL_GPL(apei_resources_sub);
 
+static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
+{
+	struct apei_resources *resources = data;
+	return apei_res_add(&resources->iomem, start, size);
+}
+
+static int apei_get_nvs_resources(struct apei_resources *resources)
+{
+	return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
+}
+
 /*
- * IO memory/port rersource management mechanism is used to check
+ * IO memory/port resource management mechanism is used to check
  * whether memory/port area used by GARs conflicts with normal memory
  * or IO memory/port of devices.
  */
@@ -448,21 +470,35 @@
 {
 	struct apei_res *res, *res_bak = NULL;
 	struct resource *r;
+	struct apei_resources nvs_resources;
 	int rc;
 
 	rc = apei_resources_sub(resources, &apei_resources_all);
 	if (rc)
 		return rc;
 
+	/*
+	 * Some firmware uses ACPI NVS region, that has been marked as
+	 * busy, so exclude it from APEI resources to avoid false
+	 * conflict.
+	 */
+	apei_resources_init(&nvs_resources);
+	rc = apei_get_nvs_resources(&nvs_resources);
+	if (rc)
+		goto res_fini;
+	rc = apei_resources_sub(resources, &nvs_resources);
+	if (rc)
+		goto res_fini;
+
 	rc = -EINVAL;
 	list_for_each_entry(res, &resources->iomem, list) {
 		r = request_mem_region(res->start, res->end - res->start,
 				       desc);
 		if (!r) {
 			pr_err(APEI_PFX
-		"Can not request iomem region <%016llx-%016llx> for GARs.\n",
+		"Can not request [mem %#010llx-%#010llx] for %s registers\n",
 			       (unsigned long long)res->start,
-			       (unsigned long long)res->end);
+			       (unsigned long long)res->end - 1, desc);
 			res_bak = res;
 			goto err_unmap_iomem;
 		}
@@ -472,9 +508,9 @@
 		r = request_region(res->start, res->end - res->start, desc);
 		if (!r) {
 			pr_err(APEI_PFX
-		"Can not request ioport region <%016llx-%016llx> for GARs.\n",
+		"Can not request [io  %#06llx-%#06llx] for %s registers\n",
 			       (unsigned long long)res->start,
-			       (unsigned long long)res->end);
+			       (unsigned long long)res->end - 1, desc);
 			res_bak = res;
 			goto err_unmap_ioport;
 		}
@@ -500,6 +536,8 @@
 			break;
 		release_mem_region(res->start, res->end - res->start);
 	}
+res_fini:
+	apei_resources_fini(&nvs_resources);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(apei_resources_request);
@@ -553,6 +591,96 @@
 	return 0;
 }
 
+/* read GAR in interrupt (including NMI) or process context */
+int apei_read(u64 *val, struct acpi_generic_address *reg)
+{
+	int rc;
+	u64 address;
+	u32 tmp, width = reg->bit_width;
+	acpi_status status;
+
+	rc = apei_check_gar(reg, &address);
+	if (rc)
+		return rc;
+
+	if (width == 64)
+		width = 32;	/* Break into two 32-bit transfers */
+
+	*val = 0;
+	switch(reg->space_id) {
+	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+		status = acpi_os_read_memory((acpi_physical_address)
+					     address, &tmp, width);
+		if (ACPI_FAILURE(status))
+			return -EIO;
+		*val = tmp;
+
+		if (reg->bit_width == 64) {
+			/* Read the top 32 bits */
+			status = acpi_os_read_memory((acpi_physical_address)
+						     (address + 4), &tmp, 32);
+			if (ACPI_FAILURE(status))
+				return -EIO;
+			*val |= ((u64)tmp << 32);
+		}
+		break;
+	case ACPI_ADR_SPACE_SYSTEM_IO:
+		status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
+		if (ACPI_FAILURE(status))
+			return -EIO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(apei_read);
+
+/* write GAR in interrupt (including NMI) or process context */
+int apei_write(u64 val, struct acpi_generic_address *reg)
+{
+	int rc;
+	u64 address;
+	u32 width = reg->bit_width;
+	acpi_status status;
+
+	rc = apei_check_gar(reg, &address);
+	if (rc)
+		return rc;
+
+	if (width == 64)
+		width = 32;	/* Break into two 32-bit transfers */
+
+	switch (reg->space_id) {
+	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+		status = acpi_os_write_memory((acpi_physical_address)
+					      address, ACPI_LODWORD(val),
+					      width);
+		if (ACPI_FAILURE(status))
+			return -EIO;
+
+		if (reg->bit_width == 64) {
+			status = acpi_os_write_memory((acpi_physical_address)
+						      (address + 4),
+						      ACPI_HIDWORD(val), 32);
+			if (ACPI_FAILURE(status))
+				return -EIO;
+		}
+		break;
+	case ACPI_ADR_SPACE_SYSTEM_IO:
+		status = acpi_os_write_port(address, val, reg->bit_width);
+		if (ACPI_FAILURE(status))
+			return -EIO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(apei_write);
+
 static int collect_res_callback(struct apei_exec_context *ctx,
 				struct acpi_whea_header *entry,
 				void *data)
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index f57050e..cca240a 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -68,6 +68,9 @@
 /* IP has been set in instruction function */
 #define APEI_EXEC_SET_IP	1
 
+int apei_read(u64 *val, struct acpi_generic_address *reg);
+int apei_write(u64 val, struct acpi_generic_address *reg);
+
 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
 int apei_exec_read_register(struct apei_exec_context *ctx,
@@ -95,6 +98,9 @@
 }
 
 void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_add(struct apei_resources *resources,
+		       unsigned long start, unsigned long size,
+		       bool iomem);
 int apei_resources_sub(struct apei_resources *resources1,
 		       struct apei_resources *resources2);
 int apei_resources_request(struct apei_resources *resources,
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 589b96c..5b898d4 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -43,6 +43,42 @@
 #define FIRMWARE_TIMEOUT	(1 * NSEC_PER_MSEC)
 
 /*
+ * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
+ */
+static int acpi5;
+
+struct set_error_type_with_address {
+	u32	type;
+	u32	vendor_extension;
+	u32	flags;
+	u32	apicid;
+	u64	memory_address;
+	u64	memory_address_range;
+	u32	pcie_sbdf;
+};
+enum {
+	SETWA_FLAGS_APICID = 1,
+	SETWA_FLAGS_MEM = 2,
+	SETWA_FLAGS_PCIE_SBDF = 4,
+};
+
+/*
+ * Vendor extensions for platform specific operations
+ */
+struct vendor_error_type_extension {
+	u32	length;
+	u32	pcie_sbdf;
+	u16	vendor_id;
+	u16	device_id;
+	u8	rev_id;
+	u8	reserved[3];
+};
+
+static u32 vendor_flags;
+static struct debugfs_blob_wrapper vendor_blob;
+static char vendor_dev[64];
+
+/*
  * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
  * EINJ table through an unpublished extension. Use with caution as
  * most will ignore the parameter and make their own choice of address
@@ -103,7 +139,14 @@
  */
 static DEFINE_MUTEX(einj_mutex);
 
-static struct einj_parameter *einj_param;
+static void *einj_param;
+
+#ifndef readq
+static inline __u64 readq(volatile void __iomem *addr)
+{
+	return ((__u64)readl(addr+4) << 32) + readl(addr);
+}
+#endif
 
 #ifndef writeq
 static inline void writeq(__u64 val, volatile void __iomem *addr)
@@ -158,10 +201,31 @@
 	return 0;
 }
 
-static u64 einj_get_parameter_address(void)
+static void check_vendor_extension(u64 paddr,
+				   struct set_error_type_with_address *v5param)
+{
+	int	offset = readl(&v5param->vendor_extension);
+	struct	vendor_error_type_extension *v;
+	u32	sbdf;
+
+	if (!offset)
+		return;
+	v = ioremap(paddr + offset, sizeof(*v));
+	if (!v)
+		return;
+	sbdf = readl(&v->pcie_sbdf);
+	sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
+		sbdf >> 24, (sbdf >> 16) & 0xff,
+		(sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
+		 readw(&v->vendor_id), readw(&v->device_id),
+		readb(&v->rev_id));
+	iounmap(v);
+}
+
+static void *einj_get_parameter_address(void)
 {
 	int i;
-	u64 paddr = 0;
+	u64 paddrv4 = 0, paddrv5 = 0;
 	struct acpi_whea_header *entry;
 
 	entry = EINJ_TAB_ENTRY(einj_tab);
@@ -170,12 +234,40 @@
 		    entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
 		    entry->register_region.space_id ==
 		    ACPI_ADR_SPACE_SYSTEM_MEMORY)
-			memcpy(&paddr, &entry->register_region.address,
-			       sizeof(paddr));
+			memcpy(&paddrv4, &entry->register_region.address,
+			       sizeof(paddrv4));
+		if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
+		    entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
+		    entry->register_region.space_id ==
+		    ACPI_ADR_SPACE_SYSTEM_MEMORY)
+			memcpy(&paddrv5, &entry->register_region.address,
+			       sizeof(paddrv5));
 		entry++;
 	}
+	if (paddrv5) {
+		struct set_error_type_with_address *v5param;
 
-	return paddr;
+		v5param = ioremap(paddrv5, sizeof(*v5param));
+		if (v5param) {
+			acpi5 = 1;
+			check_vendor_extension(paddrv5, v5param);
+			return v5param;
+		}
+	}
+	if (paddrv4) {
+		struct einj_parameter *v4param;
+
+		v4param = ioremap(paddrv4, sizeof(*v4param));
+		if (!v4param)
+			return 0;
+		if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) {
+			iounmap(v4param);
+			return 0;
+		}
+		return v4param;
+	}
+
+	return 0;
 }
 
 /* do sanity check to trigger table */
@@ -194,8 +286,29 @@
 	return 0;
 }
 
+static struct acpi_generic_address *einj_get_trigger_parameter_region(
+	struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
+{
+	int i;
+	struct acpi_whea_header *entry;
+
+	entry = (struct acpi_whea_header *)
+		((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+	for (i = 0; i < trigger_tab->entry_count; i++) {
+		if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
+		entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
+		entry->register_region.space_id ==
+			ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+		(entry->register_region.address & param2) == (param1 & param2))
+			return &entry->register_region;
+		entry++;
+	}
+
+	return NULL;
+}
 /* Execute instructions in trigger error action table */
-static int __einj_error_trigger(u64 trigger_paddr)
+static int __einj_error_trigger(u64 trigger_paddr, u32 type,
+				u64 param1, u64 param2)
 {
 	struct acpi_einj_trigger *trigger_tab = NULL;
 	struct apei_exec_context trigger_ctx;
@@ -204,14 +317,16 @@
 	struct resource *r;
 	u32 table_size;
 	int rc = -EIO;
+	struct acpi_generic_address *trigger_param_region = NULL;
 
 	r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
 			       "APEI EINJ Trigger Table");
 	if (!r) {
 		pr_err(EINJ_PFX
-	"Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+	"Can not request [mem %#010llx-%#010llx] for Trigger table\n",
 		       (unsigned long long)trigger_paddr,
-		       (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+		       (unsigned long long)trigger_paddr +
+			    sizeof(*trigger_tab) - 1);
 		goto out;
 	}
 	trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
@@ -232,9 +347,9 @@
 			       "APEI EINJ Trigger Table");
 	if (!r) {
 		pr_err(EINJ_PFX
-"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
-		       (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
-		       (unsigned long long)trigger_paddr + table_size);
+"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
+		       (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
+		       (unsigned long long)trigger_paddr + table_size - 1);
 		goto out_rel_header;
 	}
 	iounmap(trigger_tab);
@@ -255,6 +370,30 @@
 	rc = apei_resources_sub(&trigger_resources, &einj_resources);
 	if (rc)
 		goto out_fini;
+	/*
+	 * Some firmware will access target address specified in
+	 * param1 to trigger the error when injecting memory error.
+	 * This will cause resource conflict with regular memory.  So
+	 * remove it from trigger table resources.
+	 */
+	if (param_extension && (type & 0x0038) && param2) {
+		struct apei_resources addr_resources;
+		apei_resources_init(&addr_resources);
+		trigger_param_region = einj_get_trigger_parameter_region(
+			trigger_tab, param1, param2);
+		if (trigger_param_region) {
+			rc = apei_resources_add(&addr_resources,
+				trigger_param_region->address,
+				trigger_param_region->bit_width/8, true);
+			if (rc)
+				goto out_fini;
+			rc = apei_resources_sub(&trigger_resources,
+					&addr_resources);
+		}
+		apei_resources_fini(&addr_resources);
+		if (rc)
+			goto out_fini;
+	}
 	rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
 	if (rc)
 		goto out_fini;
@@ -293,12 +432,56 @@
 	if (rc)
 		return rc;
 	apei_exec_ctx_set_input(&ctx, type);
-	rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
-	if (rc)
-		return rc;
-	if (einj_param) {
-		writeq(param1, &einj_param->param1);
-		writeq(param2, &einj_param->param2);
+	if (acpi5) {
+		struct set_error_type_with_address *v5param = einj_param;
+
+		writel(type, &v5param->type);
+		if (type & 0x80000000) {
+			switch (vendor_flags) {
+			case SETWA_FLAGS_APICID:
+				writel(param1, &v5param->apicid);
+				break;
+			case SETWA_FLAGS_MEM:
+				writeq(param1, &v5param->memory_address);
+				writeq(param2, &v5param->memory_address_range);
+				break;
+			case SETWA_FLAGS_PCIE_SBDF:
+				writel(param1, &v5param->pcie_sbdf);
+				break;
+			}
+			writel(vendor_flags, &v5param->flags);
+		} else {
+			switch (type) {
+			case ACPI_EINJ_PROCESSOR_CORRECTABLE:
+			case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
+			case ACPI_EINJ_PROCESSOR_FATAL:
+				writel(param1, &v5param->apicid);
+				writel(SETWA_FLAGS_APICID, &v5param->flags);
+				break;
+			case ACPI_EINJ_MEMORY_CORRECTABLE:
+			case ACPI_EINJ_MEMORY_UNCORRECTABLE:
+			case ACPI_EINJ_MEMORY_FATAL:
+				writeq(param1, &v5param->memory_address);
+				writeq(param2, &v5param->memory_address_range);
+				writel(SETWA_FLAGS_MEM, &v5param->flags);
+				break;
+			case ACPI_EINJ_PCIX_CORRECTABLE:
+			case ACPI_EINJ_PCIX_UNCORRECTABLE:
+			case ACPI_EINJ_PCIX_FATAL:
+				writel(param1, &v5param->pcie_sbdf);
+				writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags);
+				break;
+			}
+		}
+	} else {
+		rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+		if (rc)
+			return rc;
+		if (einj_param) {
+			struct einj_parameter *v4param = einj_param;
+			writeq(param1, &v4param->param1);
+			writeq(param2, &v4param->param2);
+		}
 	}
 	rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
 	if (rc)
@@ -324,7 +507,7 @@
 	if (rc)
 		return rc;
 	trigger_paddr = apei_exec_ctx_get_output(&ctx);
-	rc = __einj_error_trigger(trigger_paddr);
+	rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
 	if (rc)
 		return rc;
 	rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
@@ -408,15 +591,25 @@
 {
 	int rc;
 	u32 available_error_type = 0;
+	u32 tval, vendor;
+
+	/*
+	 * Vendor defined types have 0x80000000 bit set, and
+	 * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
+	 */
+	vendor = val & 0x80000000;
+	tval = val & 0x7fffffff;
 
 	/* Only one error type can be specified */
-	if (val & (val - 1))
+	if (tval & (tval - 1))
 		return -EINVAL;
-	rc = einj_get_available_error_type(&available_error_type);
-	if (rc)
-		return rc;
-	if (!(val & available_error_type))
-		return -EINVAL;
+	if (!vendor) {
+		rc = einj_get_available_error_type(&available_error_type);
+		if (rc)
+			return rc;
+		if (!(val & available_error_type))
+			return -EINVAL;
+	}
 	error_type = val;
 
 	return 0;
@@ -455,7 +648,6 @@
 static int __init einj_init(void)
 {
 	int rc;
-	u64 param_paddr;
 	acpi_status status;
 	struct dentry *fentry;
 	struct apei_exec_context ctx;
@@ -465,10 +657,9 @@
 
 	status = acpi_get_table(ACPI_SIG_EINJ, 0,
 				(struct acpi_table_header **)&einj_tab);
-	if (status == AE_NOT_FOUND) {
-		pr_info(EINJ_PFX "Table is not found!\n");
+	if (status == AE_NOT_FOUND)
 		return -ENODEV;
-	} else if (ACPI_FAILURE(status)) {
+	else if (ACPI_FAILURE(status)) {
 		const char *msg = acpi_format_exception(status);
 		pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
 		return -EINVAL;
@@ -509,23 +700,30 @@
 	rc = apei_exec_pre_map_gars(&ctx);
 	if (rc)
 		goto err_release;
-	if (param_extension) {
-		param_paddr = einj_get_parameter_address();
-		if (param_paddr) {
-			einj_param = ioremap(param_paddr, sizeof(*einj_param));
-			rc = -ENOMEM;
-			if (!einj_param)
-				goto err_unmap;
-			fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
-						    einj_debug_dir, &error_param1);
-			if (!fentry)
-				goto err_unmap;
-			fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
-						    einj_debug_dir, &error_param2);
-			if (!fentry)
-				goto err_unmap;
-		} else
-			pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
+
+	einj_param = einj_get_parameter_address();
+	if ((param_extension || acpi5) && einj_param) {
+		fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+					    einj_debug_dir, &error_param1);
+		if (!fentry)
+			goto err_unmap;
+		fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+					    einj_debug_dir, &error_param2);
+		if (!fentry)
+			goto err_unmap;
+	}
+
+	if (vendor_dev[0]) {
+		vendor_blob.data = vendor_dev;
+		vendor_blob.size = strlen(vendor_dev);
+		fentry = debugfs_create_blob("vendor", S_IRUSR,
+					     einj_debug_dir, &vendor_blob);
+		if (!fentry)
+			goto err_unmap;
+		fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
+					    einj_debug_dir, &vendor_flags);
+		if (!fentry)
+			goto err_unmap;
 	}
 
 	pr_info(EINJ_PFX "Error INJection is initialized.\n");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6a9e3ba..eb9fab5 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1127,10 +1127,9 @@
 
 	status = acpi_get_table(ACPI_SIG_ERST, 0,
 				(struct acpi_table_header **)&erst_tab);
-	if (status == AE_NOT_FOUND) {
-		pr_info(ERST_PFX "Table is not found!\n");
+	if (status == AE_NOT_FOUND)
 		goto err;
-	} else if (ACPI_FAILURE(status)) {
+	else if (ACPI_FAILURE(status)) {
 		const char *msg = acpi_format_exception(status);
 		pr_err(ERST_PFX "Failed to get table, %s\n", msg);
 		rc = -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index ebaf037..9b3cac0 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -33,6 +33,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/timer.h>
@@ -45,8 +46,9 @@
 #include <linux/irq_work.h>
 #include <linux/llist.h>
 #include <linux/genalloc.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
 #include <acpi/apei.h>
-#include <acpi/atomicio.h>
 #include <acpi/hed.h>
 #include <asm/mce.h>
 #include <asm/tlbflush.h>
@@ -299,7 +301,7 @@
 	if (!ghes)
 		return ERR_PTR(-ENOMEM);
 	ghes->generic = generic;
-	rc = acpi_pre_map_gar(&generic->error_status_address);
+	rc = acpi_os_map_generic_address(&generic->error_status_address);
 	if (rc)
 		goto err_free;
 	error_block_length = generic->error_block_length;
@@ -319,7 +321,7 @@
 	return ghes;
 
 err_unmap:
-	acpi_post_unmap_gar(&generic->error_status_address);
+	acpi_os_unmap_generic_address(&generic->error_status_address);
 err_free:
 	kfree(ghes);
 	return ERR_PTR(rc);
@@ -328,7 +330,7 @@
 static void ghes_fini(struct ghes *ghes)
 {
 	kfree(ghes->estatus);
-	acpi_post_unmap_gar(&ghes->generic->error_status_address);
+	acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
 }
 
 enum {
@@ -399,7 +401,7 @@
 	u32 len;
 	int rc;
 
-	rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+	rc = apei_read(&buf_paddr, &g->error_status_address);
 	if (rc) {
 		if (!silent && printk_ratelimit())
 			pr_warning(FW_WARN GHES_PFX
@@ -476,6 +478,27 @@
 			}
 #endif
 		}
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+		else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+				      CPER_SEC_PCIE)) {
+			struct cper_sec_pcie *pcie_err;
+			pcie_err = (struct cper_sec_pcie *)(gdata+1);
+			if (sev == GHES_SEV_RECOVERABLE &&
+			    sec_sev == GHES_SEV_RECOVERABLE &&
+			    pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+			    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+				unsigned int devfn;
+				int aer_severity;
+				devfn = PCI_DEVFN(pcie_err->device_id.device,
+						  pcie_err->device_id.function);
+				aer_severity = cper_severity_to_aer(sev);
+				aer_recover_queue(pcie_err->device_id.segment,
+						  pcie_err->device_id.bus,
+						  devfn, aer_severity);
+			}
+
+		}
+#endif
 	}
 }
 
@@ -483,16 +506,22 @@
 				 const struct acpi_hest_generic *generic,
 				 const struct acpi_hest_generic_status *estatus)
 {
+	static atomic_t seqno;
+	unsigned int curr_seqno;
+	char pfx_seq[64];
+
 	if (pfx == NULL) {
 		if (ghes_severity(estatus->error_severity) <=
 		    GHES_SEV_CORRECTED)
-			pfx = KERN_WARNING HW_ERR;
+			pfx = KERN_WARNING;
 		else
-			pfx = KERN_ERR HW_ERR;
+			pfx = KERN_ERR;
 	}
+	curr_seqno = atomic_inc_return(&seqno);
+	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
 	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
-	       pfx, generic->header.source_id);
-	apei_estatus_print(pfx, estatus);
+	       pfx_seq, generic->header.source_id);
+	apei_estatus_print(pfx_seq, estatus);
 }
 
 static int ghes_print_estatus(const char *pfx,
@@ -711,26 +740,34 @@
 	return ret;
 }
 
-static void ghes_proc_in_irq(struct irq_work *irq_work)
+static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
 {
-	struct llist_node *llnode, *next, *tail = NULL;
-	struct ghes_estatus_node *estatus_node;
-	struct acpi_hest_generic *generic;
-	struct acpi_hest_generic_status *estatus;
-	u32 len, node_len;
+	struct llist_node *next, *tail = NULL;
 
-	/*
-	 * Because the time order of estatus in list is reversed,
-	 * revert it back to proper order.
-	 */
-	llnode = llist_del_all(&ghes_estatus_llist);
 	while (llnode) {
 		next = llnode->next;
 		llnode->next = tail;
 		tail = llnode;
 		llnode = next;
 	}
-	llnode = tail;
+
+	return tail;
+}
+
+static void ghes_proc_in_irq(struct irq_work *irq_work)
+{
+	struct llist_node *llnode, *next;
+	struct ghes_estatus_node *estatus_node;
+	struct acpi_hest_generic *generic;
+	struct acpi_hest_generic_status *estatus;
+	u32 len, node_len;
+
+	llnode = llist_del_all(&ghes_estatus_llist);
+	/*
+	 * Because the time order of estatus in list is reversed,
+	 * revert it back to proper order.
+	 */
+	llnode = llist_nodes_reverse(llnode);
 	while (llnode) {
 		next = llnode->next;
 		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
@@ -750,6 +787,32 @@
 	}
 }
 
+static void ghes_print_queued_estatus(void)
+{
+	struct llist_node *llnode;
+	struct ghes_estatus_node *estatus_node;
+	struct acpi_hest_generic *generic;
+	struct acpi_hest_generic_status *estatus;
+	u32 len, node_len;
+
+	llnode = llist_del_all(&ghes_estatus_llist);
+	/*
+	 * Because the time order of estatus in list is reversed,
+	 * revert it back to proper order.
+	 */
+	llnode = llist_nodes_reverse(llnode);
+	while (llnode) {
+		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+					   llnode);
+		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+		len = apei_estatus_len(estatus);
+		node_len = GHES_ESTATUS_NODE_LEN(len);
+		generic = estatus_node->generic;
+		ghes_print_estatus(NULL, generic, estatus);
+		llnode = llnode->next;
+	}
+}
+
 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
 {
 	struct ghes *ghes, *ghes_global = NULL;
@@ -775,7 +838,8 @@
 
 	if (sev_global >= GHES_SEV_PANIC) {
 		oops_begin();
-		__ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+		ghes_print_queued_estatus();
+		__ghes_print_estatus(KERN_EMERG, ghes_global->generic,
 				     ghes_global->estatus);
 		/* reboot to log the error! */
 		if (panic_timeout == 0)
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index ee7fddc..7f00cf3 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -221,10 +221,9 @@
 
 	status = acpi_get_table(ACPI_SIG_HEST, 0,
 				(struct acpi_table_header **)&hest_tab);
-	if (status == AE_NOT_FOUND) {
-		pr_info(HEST_PFX "Table not found.\n");
+	if (status == AE_NOT_FOUND)
 		goto err;
-	} else if (ACPI_FAILURE(status)) {
+	else if (ACPI_FAILURE(status)) {
 		const char *msg = acpi_format_exception(status);
 		pr_err(HEST_PFX "Failed to get table, %s\n", msg);
 		rc = -EINVAL;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index cfc0cc1..d4a5b3d 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -32,6 +32,8 @@
 #include <linux/rculist.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
 #include <acpi/atomicio.h>
 
 #define ACPI_PFX "ACPI: "
@@ -97,6 +99,37 @@
 		return NULL;
 }
 
+#ifndef CONFIG_IA64
+#define should_use_kmap(pfn)	page_is_ram(pfn)
+#else
+/* ioremap will take care of cache attributes */
+#define should_use_kmap(pfn)	0
+#endif
+
+static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
+{
+	unsigned long pfn;
+
+	pfn = pg_off >> PAGE_SHIFT;
+	if (should_use_kmap(pfn)) {
+		if (pg_sz > PAGE_SIZE)
+			return NULL;
+		return (void __iomem __force *)kmap(pfn_to_page(pfn));
+	} else
+		return ioremap(pg_off, pg_sz);
+}
+
+static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
+{
+	unsigned long pfn;
+
+	pfn = pg_off >> PAGE_SHIFT;
+	if (page_is_ram(pfn))
+		kunmap(pfn_to_page(pfn));
+	else
+		iounmap(vaddr);
+}
+
 /*
  * Used to pre-map the specified IO memory area. First try to find
  * whether the area is already pre-mapped, if it is, increase the
@@ -119,7 +152,7 @@
 
 	pg_off = paddr & PAGE_MASK;
 	pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
-	vaddr = ioremap(pg_off, pg_sz);
+	vaddr = acpi_map(pg_off, pg_sz);
 	if (!vaddr)
 		return NULL;
 	map = kmalloc(sizeof(*map), GFP_KERNEL);
@@ -135,7 +168,7 @@
 	vaddr = __acpi_try_ioremap(paddr, size);
 	if (vaddr) {
 		spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-		iounmap(map->vaddr);
+		acpi_unmap(pg_off, map->vaddr);
 		kfree(map);
 		return vaddr;
 	}
@@ -144,7 +177,7 @@
 
 	return map->vaddr + (paddr - map->paddr);
 err_unmap:
-	iounmap(vaddr);
+	acpi_unmap(pg_off, vaddr);
 	return NULL;
 }
 
@@ -177,7 +210,7 @@
 		return;
 
 	synchronize_rcu();
-	iounmap(map->vaddr);
+	acpi_unmap(map->paddr, map->vaddr);
 	kfree(map);
 }
 
@@ -260,6 +293,21 @@
 }
 EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
 
+#ifdef readq
+static inline u64 read64(const volatile void __iomem *addr)
+{
+	return readq(addr);
+}
+#else
+static inline u64 read64(const volatile void __iomem *addr)
+{
+	u64 l, h;
+	l = readl(addr);
+	h = readl(addr+4);
+	return l | (h << 32);
+}
+#endif
+
 /*
  * Can be used in atomic (including NMI) or process context. RCU read
  * lock can only be released after the IO memory area accessing.
@@ -280,11 +328,9 @@
 	case 32:
 		*val = readl(addr);
 		break;
-#ifdef readq
 	case 64:
-		*val = readq(addr);
+		*val = read64(addr);
 		break;
-#endif
 	default:
 		return -EINVAL;
 	}
@@ -293,6 +339,19 @@
 	return 0;
 }
 
+#ifdef writeq
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+	writeq(val, addr);
+}
+#else
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+	writel(val, addr);
+	writel(val>>32, addr+4);
+}
+#endif
+
 static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
 {
 	void __iomem *addr;
@@ -309,11 +368,9 @@
 	case 32:
 		writel(val, addr);
 		break;
-#ifdef writeq
 	case 64:
-		writeq(val, addr);
+		write64(val, addr);
 		break;
-#endif
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 3b5c318..e56f3be 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -45,6 +45,8 @@
 static int node_to_pxm_map[MAX_NUMNODES]
 			= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
 
+unsigned char acpi_srat_revision __initdata;
+
 int pxm_to_node(int pxm)
 {
 	if (pxm < 0)
@@ -255,9 +257,13 @@
 
 static int __init acpi_parse_srat(struct acpi_table_header *table)
 {
+	struct acpi_table_srat *srat;
 	if (!table)
 		return -EINVAL;
 
+	srat = (struct acpi_table_srat *)table;
+	acpi_srat_revision = srat->header.revision;
+
 	/* Real work done in acpi_table_parse_srat below. */
 
 	return 0;
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 096787b..7a2035f 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -15,6 +15,56 @@
 #include <linux/acpi_io.h>
 #include <acpi/acpiosxf.h>
 
+/* ACPI NVS regions, APEI may use it */
+
+struct nvs_region {
+	__u64 phys_start;
+	__u64 size;
+	struct list_head node;
+};
+
+static LIST_HEAD(nvs_region_list);
+
+#ifdef CONFIG_ACPI_SLEEP
+static int suspend_nvs_register(unsigned long start, unsigned long size);
+#else
+static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+{
+	return 0;
+}
+#endif
+
+int acpi_nvs_register(__u64 start, __u64 size)
+{
+	struct nvs_region *region;
+
+	region = kmalloc(sizeof(*region), GFP_KERNEL);
+	if (!region)
+		return -ENOMEM;
+	region->phys_start = start;
+	region->size = size;
+	list_add_tail(&region->node, &nvs_region_list);
+
+	return suspend_nvs_register(start, size);
+}
+
+int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
+			     void *data)
+{
+	int rc;
+	struct nvs_region *region;
+
+	list_for_each_entry(region, &nvs_region_list, node) {
+		rc = func(region->phys_start, region->size, data);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+
+#ifdef CONFIG_ACPI_SLEEP
 /*
  * Platforms, like ACPI, may want us to save some memory used by them during
  * suspend and to restore the contents of this memory during the subsequent
@@ -41,7 +91,7 @@
  *	things so that the data from page-aligned addresses in this region will
  *	be copied into separate RAM pages.
  */
-int suspend_nvs_register(unsigned long start, unsigned long size)
+static int suspend_nvs_register(unsigned long start, unsigned long size)
 {
 	struct nvs_page *entry, *next;
 
@@ -159,3 +209,4 @@
 		if (entry->data)
 			memcpy(entry->kaddr, entry->data, entry->size);
 }
+#endif
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f31c5c5..fcc12d8 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -83,19 +83,6 @@
 struct workqueue_struct *kacpi_hotplug_wq;
 EXPORT_SYMBOL(kacpi_hotplug_wq);
 
-struct acpi_res_list {
-	resource_size_t start;
-	resource_size_t end;
-	acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
-	char name[5];   /* only can have a length of 4 chars, make use of this
-			   one instead of res->name, no need to kalloc then */
-	struct list_head resource_list;
-	int count;
-};
-
-static LIST_HEAD(resource_list_head);
-static DEFINE_SPINLOCK(acpi_res_lock);
-
 /*
  * This list of permanent mappings is for memory that may be accessed from
  * interrupt context, where we can't do the ioremap().
@@ -166,17 +153,21 @@
 	return supported;
 }
 
-static void __init acpi_request_region (struct acpi_generic_address *addr,
+static void __init acpi_request_region (struct acpi_generic_address *gas,
 	unsigned int length, char *desc)
 {
-	if (!addr->address || !length)
+	u64 addr;
+
+	/* Handle possible alignment issues */
+	memcpy(&addr, &gas->address, sizeof(addr));
+	if (!addr || !length)
 		return;
 
 	/* Resources are never freed */
-	if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
-		request_region(addr->address, length, desc);
-	else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
-		request_mem_region(addr->address, length, desc);
+	if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+		request_region(addr, length, desc);
+	else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+		request_mem_region(addr, length, desc);
 }
 
 static int __init acpi_reserve_resources(void)
@@ -427,35 +418,42 @@
 		__acpi_unmap_table(virt, size);
 }
 
-static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+int acpi_os_map_generic_address(struct acpi_generic_address *gas)
 {
+	u64 addr;
 	void __iomem *virt;
 
-	if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+	if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
 		return 0;
 
-	if (!addr->address || !addr->bit_width)
+	/* Handle possible alignment issues */
+	memcpy(&addr, &gas->address, sizeof(addr));
+	if (!addr || !gas->bit_width)
 		return -EINVAL;
 
-	virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
+	virt = acpi_os_map_memory(addr, gas->bit_width / 8);
 	if (!virt)
 		return -EIO;
 
 	return 0;
 }
+EXPORT_SYMBOL(acpi_os_map_generic_address);
 
-static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
 {
+	u64 addr;
 	struct acpi_ioremap *map;
 
-	if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+	if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
 		return;
 
-	if (!addr->address || !addr->bit_width)
+	/* Handle possible alignment issues */
+	memcpy(&addr, &gas->address, sizeof(addr));
+	if (!addr || !gas->bit_width)
 		return;
 
 	mutex_lock(&acpi_ioremap_lock);
-	map = acpi_map_lookup(addr->address, addr->bit_width / 8);
+	map = acpi_map_lookup(addr, gas->bit_width / 8);
 	if (!map) {
 		mutex_unlock(&acpi_ioremap_lock);
 		return;
@@ -465,6 +463,7 @@
 
 	acpi_os_map_cleanup(map);
 }
+EXPORT_SYMBOL(acpi_os_unmap_generic_address);
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status
@@ -1278,44 +1277,28 @@
  * drivers */
 int acpi_check_resource_conflict(const struct resource *res)
 {
-	struct acpi_res_list *res_list_elem;
-	int ioport = 0, clash = 0;
+	acpi_adr_space_type space_id;
+	acpi_size length;
+	u8 warn = 0;
+	int clash = 0;
 
 	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
 		return 0;
 	if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
 		return 0;
 
-	ioport = res->flags & IORESOURCE_IO;
+	if (res->flags & IORESOURCE_IO)
+		space_id = ACPI_ADR_SPACE_SYSTEM_IO;
+	else
+		space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
 
-	spin_lock(&acpi_res_lock);
-	list_for_each_entry(res_list_elem, &resource_list_head,
-			    resource_list) {
-		if (ioport && (res_list_elem->resource_type
-			       != ACPI_ADR_SPACE_SYSTEM_IO))
-			continue;
-		if (!ioport && (res_list_elem->resource_type
-				!= ACPI_ADR_SPACE_SYSTEM_MEMORY))
-			continue;
-
-		if (res->end < res_list_elem->start
-		    || res_list_elem->end < res->start)
-			continue;
-		clash = 1;
-		break;
-	}
-	spin_unlock(&acpi_res_lock);
+	length = res->end - res->start + 1;
+	if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
+		warn = 1;
+	clash = acpi_check_address_range(space_id, res->start, length, warn);
 
 	if (clash) {
 		if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
-			printk(KERN_WARNING "ACPI: resource %s %pR"
-			       " conflicts with ACPI region %s "
-			       "[%s 0x%zx-0x%zx]\n",
-			       res->name, res, res_list_elem->name,
-			       (res_list_elem->resource_type ==
-				ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
-			       (size_t) res_list_elem->start,
-			       (size_t) res_list_elem->end);
 			if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
 				printk(KERN_NOTICE "ACPI: This conflict may"
 				       " cause random problems and system"
@@ -1467,155 +1450,6 @@
 	kmem_cache_free(cache, object);
 	return (AE_OK);
 }
-
-static inline int acpi_res_list_add(struct acpi_res_list *res)
-{
-	struct acpi_res_list *res_list_elem;
-
-	list_for_each_entry(res_list_elem, &resource_list_head,
-			    resource_list) {
-
-		if (res->resource_type == res_list_elem->resource_type &&
-		    res->start == res_list_elem->start &&
-		    res->end == res_list_elem->end) {
-
-			/*
-			 * The Region(addr,len) already exist in the list,
-			 * just increase the count
-			 */
-
-			res_list_elem->count++;
-			return 0;
-		}
-	}
-
-	res->count = 1;
-	list_add(&res->resource_list, &resource_list_head);
-	return 1;
-}
-
-static inline void acpi_res_list_del(struct acpi_res_list *res)
-{
-	struct acpi_res_list *res_list_elem;
-
-	list_for_each_entry(res_list_elem, &resource_list_head,
-			    resource_list) {
-
-		if (res->resource_type == res_list_elem->resource_type &&
-		    res->start == res_list_elem->start &&
-		    res->end == res_list_elem->end) {
-
-			/*
-			 * If the res count is decreased to 0,
-			 * remove and free it
-			 */
-
-			if (--res_list_elem->count == 0) {
-				list_del(&res_list_elem->resource_list);
-				kfree(res_list_elem);
-			}
-			return;
-		}
-	}
-}
-
-acpi_status
-acpi_os_invalidate_address(
-    u8                   space_id,
-    acpi_physical_address   address,
-    acpi_size               length)
-{
-	struct acpi_res_list res;
-
-	switch (space_id) {
-	case ACPI_ADR_SPACE_SYSTEM_IO:
-	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-		/* Only interference checks against SystemIO and SystemMemory
-		   are needed */
-		res.start = address;
-		res.end = address + length - 1;
-		res.resource_type = space_id;
-		spin_lock(&acpi_res_lock);
-		acpi_res_list_del(&res);
-		spin_unlock(&acpi_res_lock);
-		break;
-	case ACPI_ADR_SPACE_PCI_CONFIG:
-	case ACPI_ADR_SPACE_EC:
-	case ACPI_ADR_SPACE_SMBUS:
-	case ACPI_ADR_SPACE_CMOS:
-	case ACPI_ADR_SPACE_PCI_BAR_TARGET:
-	case ACPI_ADR_SPACE_DATA_TABLE:
-	case ACPI_ADR_SPACE_FIXED_HARDWARE:
-		break;
-	}
-	return AE_OK;
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_os_validate_address
- *
- * PARAMETERS:  space_id             - ACPI space ID
- *              address             - Physical address
- *              length              - Address length
- *
- * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
- *              should return AE_AML_ILLEGAL_ADDRESS.
- *
- * DESCRIPTION: Validate a system address via the host OS. Used to validate
- *              the addresses accessed by AML operation regions.
- *
- *****************************************************************************/
-
-acpi_status
-acpi_os_validate_address (
-    u8                   space_id,
-    acpi_physical_address   address,
-    acpi_size               length,
-    char *name)
-{
-	struct acpi_res_list *res;
-	int added;
-	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
-		return AE_OK;
-
-	switch (space_id) {
-	case ACPI_ADR_SPACE_SYSTEM_IO:
-	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-		/* Only interference checks against SystemIO and SystemMemory
-		   are needed */
-		res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
-		if (!res)
-			return AE_OK;
-		/* ACPI names are fixed to 4 bytes, still better use strlcpy */
-		strlcpy(res->name, name, 5);
-		res->start = address;
-		res->end = address + length - 1;
-		res->resource_type = space_id;
-		spin_lock(&acpi_res_lock);
-		added = acpi_res_list_add(res);
-		spin_unlock(&acpi_res_lock);
-		pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
-			 "name: %s\n", added ? "Added" : "Already exist",
-			 (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
-			 ? "SystemIO" : "System Memory",
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 res->name);
-		if (!added)
-			kfree(res);
-		break;
-	case ACPI_ADR_SPACE_PCI_CONFIG:
-	case ACPI_ADR_SPACE_EC:
-	case ACPI_ADR_SPACE_SMBUS:
-	case ACPI_ADR_SPACE_CMOS:
-	case ACPI_ADR_SPACE_PCI_BAR_TARGET:
-	case ACPI_ADR_SPACE_DATA_TABLE:
-	case ACPI_ADR_SPACE_FIXED_HARDWARE:
-		break;
-	}
-	return AE_OK;
-}
 #endif
 
 acpi_status __init acpi_os_initialize(void)
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 3a0428e..c850de4 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -173,8 +173,30 @@
 	apic_id = map_mat_entry(handle, type, acpi_id);
 	if (apic_id == -1)
 		apic_id = map_madt_entry(type, acpi_id);
-	if (apic_id == -1)
-		return apic_id;
+	if (apic_id == -1) {
+		/*
+		 * On UP processor, there is no _MAT or MADT table.
+		 * So above apic_id is always set to -1.
+		 *
+		 * BIOS may define multiple CPU handles even for UP processor.
+		 * For example,
+		 *
+		 * Scope (_PR)
+                 * {
+		 *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
+		 *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
+		 *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
+		 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+		 * }
+		 *
+		 * Ignores apic_id and always return 0 for CPU0's handle.
+		 * Return -1 for other CPU's handle.
+		 */
+		if (acpi_id == 0)
+			return acpi_id;
+		else
+			return apic_id;
+	}
 
 #ifdef CONFIG_SMP
 	for_each_possible_cpu(i) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 20a68ca..0034ede 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -82,7 +82,7 @@
 static int acpi_processor_add(struct acpi_device *device);
 static int acpi_processor_remove(struct acpi_device *device, int type);
 static void acpi_processor_notify(struct acpi_device *device, u32 event);
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
 static int acpi_processor_handle_eject(struct acpi_processor *pr);
 
 
@@ -324,10 +324,8 @@
 	 *  they are physically not present.
 	 */
 	if (pr->id == -1) {
-		if (ACPI_FAILURE
-		    (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
+		if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
 			return -ENODEV;
-		}
 	}
 	/*
 	 * On some boxes several processors use the same processor bus id.
@@ -539,6 +537,7 @@
 	thermal_cooling_device_unregister(pr->cdev);
 err_power_exit:
 	acpi_processor_power_exit(pr, device);
+	sysfs_remove_link(&device->dev.kobj, "sysdev");
 err_free_cpumask:
 	free_cpumask_var(pr->throttling.shared_cpu_map);
 
@@ -720,18 +719,19 @@
 	return (AE_OK);
 }
 
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
+	acpi_handle handle = pr->handle;
 
 	if (!is_processor_present(handle)) {
 		return AE_ERROR;
 	}
 
-	if (acpi_map_lsapic(handle, p_cpu))
+	if (acpi_map_lsapic(handle, &pr->id))
 		return AE_ERROR;
 
-	if (arch_register_cpu(*p_cpu)) {
-		acpi_unmap_lsapic(*p_cpu);
+	if (arch_register_cpu(pr->id)) {
+		acpi_unmap_lsapic(pr->id);
 		return AE_ERROR;
 	}
 
@@ -748,7 +748,7 @@
 	return (0);
 }
 #else
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
 	return AE_ERROR;
 }
@@ -827,8 +827,6 @@
 
 	acpi_bus_unregister_driver(&acpi_processor_driver);
 
-	cpuidle_unregister_driver(&acpi_idle_driver);
-
 	return;
 }
 
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 99dc592..40fb122 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -915,9 +915,10 @@
 
 /**
  * __bus_register - register a driver-core subsystem
- * @bus: bus.
+ * @bus: bus to register
+ * @key: lockdep class key
  *
- * Once we have that, we registered the bus with the kobject
+ * Once we have that, we register the bus with the kobject
  * infrastructure, then register the children subsystems it has:
  * the devices and drivers that belong to the subsystem.
  */
@@ -1220,8 +1221,8 @@
 }
 /**
  * subsys_system_register - register a subsystem at /sys/devices/system/
- * @subsys - system subsystem
- * @groups - default attributes for the root device
+ * @subsys: system subsystem
+ * @groups: default attributes for the root device
  *
  * All 'system' subsystems have a /sys/devices/system/<name> root device
  * with the name of the subsystem. The root device can carry subsystem-
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 26ab358..6c9387d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -525,8 +525,7 @@
 	if (!firmware) {
 		dev_err(device, "%s: kmalloc(struct firmware) failed\n",
 			__func__);
-		retval = -ENOMEM;
-		goto out;
+		return -ENOMEM;
 	}
 
 	if (fw_get_builtin_firmware(firmware, name)) {
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a30aa10..4e4c8a4 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -317,6 +317,17 @@
 
 	  If unsure, say N.
 
+config BLK_DEV_NVME
+	tristate "NVM Express block device"
+	depends on PCI
+	---help---
+	  The NVM Express driver is for solid state drives directly
+	  connected to the PCI or PCI Express bus.  If you know you
+	  don't have one of these, it is safe to answer N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nvme.
+
 config BLK_DEV_OSD
 	tristate "OSD object-as-blkdev support"
 	depends on SCSI_OSD_ULD
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ad7b74a..5b79505 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@
 obj-$(CONFIG_CDROM_PKTCDVD)	+= pktcdvd.o
 obj-$(CONFIG_MG_DISK)		+= mg_disk.o
 obj-$(CONFIG_SUNVDC)		+= sunvdc.o
+obj-$(CONFIG_BLK_DEV_NVME)	+= nvme.o
 obj-$(CONFIG_BLK_DEV_OSD)	+= osdblk.o
 
 obj-$(CONFIG_BLK_DEV_UMEM)	+= umem.o
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
new file mode 100644
index 0000000..c1dc4d8
--- /dev/null
+++ b/drivers/block/nvme.c
@@ -0,0 +1,1739 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#define NVME_Q_DEPTH 1024
+#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
+#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
+#define NVME_MINORS 64
+#define NVME_IO_TIMEOUT	(5 * HZ)
+#define ADMIN_TIMEOUT	(60 * HZ)
+
+static int nvme_major;
+module_param(nvme_major, int, 0);
+
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
+static DEFINE_SPINLOCK(dev_list_lock);
+static LIST_HEAD(dev_list);
+static struct task_struct *nvme_thread;
+
+/*
+ * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+	struct list_head node;
+	struct nvme_queue **queues;
+	u32 __iomem *dbs;
+	struct pci_dev *pci_dev;
+	struct dma_pool *prp_page_pool;
+	struct dma_pool *prp_small_pool;
+	int instance;
+	int queue_count;
+	int db_stride;
+	u32 ctrl_config;
+	struct msix_entry *entry;
+	struct nvme_bar __iomem *bar;
+	struct list_head namespaces;
+	char serial[20];
+	char model[40];
+	char firmware_rev[8];
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+	struct list_head list;
+
+	struct nvme_dev *dev;
+	struct request_queue *queue;
+	struct gendisk *disk;
+
+	int ns_id;
+	int lba_shift;
+};
+
+/*
+ * An NVM Express queue.  Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+	struct device *q_dmadev;
+	struct nvme_dev *dev;
+	spinlock_t q_lock;
+	struct nvme_command *sq_cmds;
+	volatile struct nvme_completion *cqes;
+	dma_addr_t sq_dma_addr;
+	dma_addr_t cq_dma_addr;
+	wait_queue_head_t sq_full;
+	wait_queue_t sq_cong_wait;
+	struct bio_list sq_cong;
+	u32 __iomem *q_db;
+	u16 q_depth;
+	u16 cq_vector;
+	u16 sq_head;
+	u16 sq_tail;
+	u16 cq_head;
+	u16 cq_phase;
+	unsigned long cmdid_data[];
+};
+
+/*
+ * Check we didin't inadvertently grow the command struct
+ */
+static inline void _nvme_check_size(void)
+{
+	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
+	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
+	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
+	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
+	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
+	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
+	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
+	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+}
+
+typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+						struct nvme_completion *);
+
+struct nvme_cmd_info {
+	nvme_completion_fn fn;
+	void *ctx;
+	unsigned long timeout;
+};
+
+static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+{
+	return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+}
+
+/**
+ * alloc_cmdid() - Allocate a Command ID
+ * @nvmeq: The queue that will be used for this command
+ * @ctx: A pointer that will be passed to the handler
+ * @handler: The function to call on completion
+ *
+ * Allocate a Command ID for a queue.  The data passed in will
+ * be passed to the completion handler.  This is implemented by using
+ * the bottom two bits of the ctx pointer to store the handler ID.
+ * Passing in a pointer that's not 4-byte aligned will cause a BUG.
+ * We can change this if it becomes a problem.
+ *
+ * May be called with local interrupts disabled and the q_lock held,
+ * or with interrupts enabled and no locks held.
+ */
+static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
+				nvme_completion_fn handler, unsigned timeout)
+{
+	int depth = nvmeq->q_depth - 1;
+	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+	int cmdid;
+
+	do {
+		cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
+		if (cmdid >= depth)
+			return -EBUSY;
+	} while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
+
+	info[cmdid].fn = handler;
+	info[cmdid].ctx = ctx;
+	info[cmdid].timeout = jiffies + timeout;
+	return cmdid;
+}
+
+static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
+				nvme_completion_fn handler, unsigned timeout)
+{
+	int cmdid;
+	wait_event_killable(nvmeq->sq_full,
+		(cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
+	return (cmdid < 0) ? -EINTR : cmdid;
+}
+
+/* Special values must be less than 0x1000 */
+#define CMD_CTX_BASE		((void *)POISON_POINTER_DELTA)
+#define CMD_CTX_CANCELLED	(0x30C + CMD_CTX_BASE)
+#define CMD_CTX_COMPLETED	(0x310 + CMD_CTX_BASE)
+#define CMD_CTX_INVALID		(0x314 + CMD_CTX_BASE)
+#define CMD_CTX_FLUSH		(0x318 + CMD_CTX_BASE)
+
+static void special_completion(struct nvme_dev *dev, void *ctx,
+						struct nvme_completion *cqe)
+{
+	if (ctx == CMD_CTX_CANCELLED)
+		return;
+	if (ctx == CMD_CTX_FLUSH)
+		return;
+	if (ctx == CMD_CTX_COMPLETED) {
+		dev_warn(&dev->pci_dev->dev,
+				"completed id %d twice on queue %d\n",
+				cqe->command_id, le16_to_cpup(&cqe->sq_id));
+		return;
+	}
+	if (ctx == CMD_CTX_INVALID) {
+		dev_warn(&dev->pci_dev->dev,
+				"invalid id %d completed on queue %d\n",
+				cqe->command_id, le16_to_cpup(&cqe->sq_id));
+		return;
+	}
+
+	dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held.  May not sleep.
+ */
+static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
+						nvme_completion_fn *fn)
+{
+	void *ctx;
+	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+	if (cmdid >= nvmeq->q_depth) {
+		*fn = special_completion;
+		return CMD_CTX_INVALID;
+	}
+	*fn = info[cmdid].fn;
+	ctx = info[cmdid].ctx;
+	info[cmdid].fn = special_completion;
+	info[cmdid].ctx = CMD_CTX_COMPLETED;
+	clear_bit(cmdid, nvmeq->cmdid_data);
+	wake_up(&nvmeq->sq_full);
+	return ctx;
+}
+
+static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
+						nvme_completion_fn *fn)
+{
+	void *ctx;
+	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+	if (fn)
+		*fn = info[cmdid].fn;
+	ctx = info[cmdid].ctx;
+	info[cmdid].fn = special_completion;
+	info[cmdid].ctx = CMD_CTX_CANCELLED;
+	return ctx;
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+{
+	return dev->queues[get_cpu() + 1];
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq)
+{
+	put_cpu();
+}
+
+/**
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * @nvmeq: The queue to use
+ * @cmd: The command to send
+ *
+ * Safe to use from interrupt context
+ */
+static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+	unsigned long flags;
+	u16 tail;
+	spin_lock_irqsave(&nvmeq->q_lock, flags);
+	tail = nvmeq->sq_tail;
+	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+	if (++tail == nvmeq->q_depth)
+		tail = 0;
+	writel(tail, nvmeq->q_db);
+	nvmeq->sq_tail = tail;
+	spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+
+	return 0;
+}
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries.  You can't see it in this data structure because C doesn't let
+ * me express that.  Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+	void *private;		/* For the use of the submitter of the I/O */
+	int npages;		/* In the PRP list. 0 means small pool in use */
+	int offset;		/* Of PRP list */
+	int nents;		/* Used in scatterlist */
+	int length;		/* Of data, in bytes */
+	dma_addr_t first_dma;
+	struct scatterlist sg[0];
+};
+
+static __le64 **iod_list(struct nvme_iod *iod)
+{
+	return ((void *)iod) + iod->offset;
+}
+
+/*
+ * Will slightly overestimate the number of pages needed.  This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static int nvme_npages(unsigned size)
+{
+	unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
+	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+}
+
+static struct nvme_iod *
+nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
+{
+	struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
+				sizeof(__le64 *) * nvme_npages(nbytes) +
+				sizeof(struct scatterlist) * nseg, gfp);
+
+	if (iod) {
+		iod->offset = offsetof(struct nvme_iod, sg[nseg]);
+		iod->npages = -1;
+		iod->length = nbytes;
+	}
+
+	return iod;
+}
+
+static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+{
+	const int last_prp = PAGE_SIZE / 8 - 1;
+	int i;
+	__le64 **list = iod_list(iod);
+	dma_addr_t prp_dma = iod->first_dma;
+
+	if (iod->npages == 0)
+		dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
+	for (i = 0; i < iod->npages; i++) {
+		__le64 *prp_list = list[i];
+		dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
+		dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
+		prp_dma = next_prp_dma;
+	}
+	kfree(iod);
+}
+
+static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
+{
+	struct nvme_queue *nvmeq = get_nvmeq(dev);
+	if (bio_list_empty(&nvmeq->sq_cong))
+		add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+	bio_list_add(&nvmeq->sq_cong, bio);
+	put_nvmeq(nvmeq);
+	wake_up_process(nvme_thread);
+}
+
+static void bio_completion(struct nvme_dev *dev, void *ctx,
+						struct nvme_completion *cqe)
+{
+	struct nvme_iod *iod = ctx;
+	struct bio *bio = iod->private;
+	u16 status = le16_to_cpup(&cqe->status) >> 1;
+
+	dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+			bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	nvme_free_iod(dev, iod);
+	if (status) {
+		bio_endio(bio, -EIO);
+	} else if (bio->bi_vcnt > bio->bi_idx) {
+		requeue_bio(dev, bio);
+	} else {
+		bio_endio(bio, 0);
+	}
+}
+
+/* length is in bytes.  gfp flags indicates whether we may sleep. */
+static int nvme_setup_prps(struct nvme_dev *dev,
+			struct nvme_common_command *cmd, struct nvme_iod *iod,
+			int total_len, gfp_t gfp)
+{
+	struct dma_pool *pool;
+	int length = total_len;
+	struct scatterlist *sg = iod->sg;
+	int dma_len = sg_dma_len(sg);
+	u64 dma_addr = sg_dma_address(sg);
+	int offset = offset_in_page(dma_addr);
+	__le64 *prp_list;
+	__le64 **list = iod_list(iod);
+	dma_addr_t prp_dma;
+	int nprps, i;
+
+	cmd->prp1 = cpu_to_le64(dma_addr);
+	length -= (PAGE_SIZE - offset);
+	if (length <= 0)
+		return total_len;
+
+	dma_len -= (PAGE_SIZE - offset);
+	if (dma_len) {
+		dma_addr += (PAGE_SIZE - offset);
+	} else {
+		sg = sg_next(sg);
+		dma_addr = sg_dma_address(sg);
+		dma_len = sg_dma_len(sg);
+	}
+
+	if (length <= PAGE_SIZE) {
+		cmd->prp2 = cpu_to_le64(dma_addr);
+		return total_len;
+	}
+
+	nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+	if (nprps <= (256 / 8)) {
+		pool = dev->prp_small_pool;
+		iod->npages = 0;
+	} else {
+		pool = dev->prp_page_pool;
+		iod->npages = 1;
+	}
+
+	prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+	if (!prp_list) {
+		cmd->prp2 = cpu_to_le64(dma_addr);
+		iod->npages = -1;
+		return (total_len - length) + PAGE_SIZE;
+	}
+	list[0] = prp_list;
+	iod->first_dma = prp_dma;
+	cmd->prp2 = cpu_to_le64(prp_dma);
+	i = 0;
+	for (;;) {
+		if (i == PAGE_SIZE / 8) {
+			__le64 *old_prp_list = prp_list;
+			prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+			if (!prp_list)
+				return total_len - length;
+			list[iod->npages++] = prp_list;
+			prp_list[0] = old_prp_list[i - 1];
+			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+			i = 1;
+		}
+		prp_list[i++] = cpu_to_le64(dma_addr);
+		dma_len -= PAGE_SIZE;
+		dma_addr += PAGE_SIZE;
+		length -= PAGE_SIZE;
+		if (length <= 0)
+			break;
+		if (dma_len > 0)
+			continue;
+		BUG_ON(dma_len < 0);
+		sg = sg_next(sg);
+		dma_addr = sg_dma_address(sg);
+		dma_len = sg_dma_len(sg);
+	}
+
+	return total_len;
+}
+
+/* NVMe scatterlists require no holes in the virtual address */
+#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2)	((vec2)->bv_offset || \
+			(((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
+
+static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+		struct bio *bio, enum dma_data_direction dma_dir, int psegs)
+{
+	struct bio_vec *bvec, *bvprv = NULL;
+	struct scatterlist *sg = NULL;
+	int i, old_idx, length = 0, nsegs = 0;
+
+	sg_init_table(iod->sg, psegs);
+	old_idx = bio->bi_idx;
+	bio_for_each_segment(bvec, bio, i) {
+		if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
+			sg->length += bvec->bv_len;
+		} else {
+			if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
+				break;
+			sg = sg ? sg + 1 : iod->sg;
+			sg_set_page(sg, bvec->bv_page, bvec->bv_len,
+							bvec->bv_offset);
+			nsegs++;
+		}
+		length += bvec->bv_len;
+		bvprv = bvec;
+	}
+	bio->bi_idx = i;
+	iod->nents = nsegs;
+	sg_mark_end(sg);
+	if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
+		bio->bi_idx = old_idx;
+		return -ENOMEM;
+	}
+	return length;
+}
+
+static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+								int cmdid)
+{
+	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+	memset(cmnd, 0, sizeof(*cmnd));
+	cmnd->common.opcode = nvme_cmd_flush;
+	cmnd->common.command_id = cmdid;
+	cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+
+	if (++nvmeq->sq_tail == nvmeq->q_depth)
+		nvmeq->sq_tail = 0;
+	writel(nvmeq->sq_tail, nvmeq->q_db);
+
+	return 0;
+}
+
+static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+{
+	int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
+					special_completion, NVME_IO_TIMEOUT);
+	if (unlikely(cmdid < 0))
+		return cmdid;
+
+	return nvme_submit_flush(nvmeq, ns, cmdid);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held.  May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+								struct bio *bio)
+{
+	struct nvme_command *cmnd;
+	struct nvme_iod *iod;
+	enum dma_data_direction dma_dir;
+	int cmdid, length, result = -ENOMEM;
+	u16 control;
+	u32 dsmgmt;
+	int psegs = bio_phys_segments(ns->queue, bio);
+
+	if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+		result = nvme_submit_flush_data(nvmeq, ns);
+		if (result)
+			return result;
+	}
+
+	iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+	if (!iod)
+		goto nomem;
+	iod->private = bio;
+
+	result = -EBUSY;
+	cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
+	if (unlikely(cmdid < 0))
+		goto free_iod;
+
+	if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+		return nvme_submit_flush(nvmeq, ns, cmdid);
+
+	control = 0;
+	if (bio->bi_rw & REQ_FUA)
+		control |= NVME_RW_FUA;
+	if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+		control |= NVME_RW_LR;
+
+	dsmgmt = 0;
+	if (bio->bi_rw & REQ_RAHEAD)
+		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+	cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+	memset(cmnd, 0, sizeof(*cmnd));
+	if (bio_data_dir(bio)) {
+		cmnd->rw.opcode = nvme_cmd_write;
+		dma_dir = DMA_TO_DEVICE;
+	} else {
+		cmnd->rw.opcode = nvme_cmd_read;
+		dma_dir = DMA_FROM_DEVICE;
+	}
+
+	result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
+	if (result < 0)
+		goto free_iod;
+	length = result;
+
+	cmnd->rw.command_id = cmdid;
+	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+	length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
+								GFP_ATOMIC);
+	cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+	cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+	cmnd->rw.control = cpu_to_le16(control);
+	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+
+	bio->bi_sector += length >> 9;
+
+	if (++nvmeq->sq_tail == nvmeq->q_depth)
+		nvmeq->sq_tail = 0;
+	writel(nvmeq->sq_tail, nvmeq->q_db);
+
+	return 0;
+
+ free_iod:
+	nvme_free_iod(nvmeq->dev, iod);
+ nomem:
+	return result;
+}
+
+static void nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct nvme_ns *ns = q->queuedata;
+	struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+	int result = -EBUSY;
+
+	spin_lock_irq(&nvmeq->q_lock);
+	if (bio_list_empty(&nvmeq->sq_cong))
+		result = nvme_submit_bio_queue(nvmeq, ns, bio);
+	if (unlikely(result)) {
+		if (bio_list_empty(&nvmeq->sq_cong))
+			add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+		bio_list_add(&nvmeq->sq_cong, bio);
+	}
+
+	spin_unlock_irq(&nvmeq->q_lock);
+	put_nvmeq(nvmeq);
+}
+
+static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
+{
+	u16 head, phase;
+
+	head = nvmeq->cq_head;
+	phase = nvmeq->cq_phase;
+
+	for (;;) {
+		void *ctx;
+		nvme_completion_fn fn;
+		struct nvme_completion cqe = nvmeq->cqes[head];
+		if ((le16_to_cpu(cqe.status) & 1) != phase)
+			break;
+		nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
+		if (++head == nvmeq->q_depth) {
+			head = 0;
+			phase = !phase;
+		}
+
+		ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
+		fn(nvmeq->dev, ctx, &cqe);
+	}
+
+	/* If the controller ignores the cq head doorbell and continuously
+	 * writes to the queue, it is theoretically possible to wrap around
+	 * the queue twice and mistakenly return IRQ_NONE.  Linux only
+	 * requires that 0.1% of your interrupts are handled, so this isn't
+	 * a big problem.
+	 */
+	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
+		return IRQ_NONE;
+
+	writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+	nvmeq->cq_head = head;
+	nvmeq->cq_phase = phase;
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t nvme_irq(int irq, void *data)
+{
+	irqreturn_t result;
+	struct nvme_queue *nvmeq = data;
+	spin_lock(&nvmeq->q_lock);
+	result = nvme_process_cq(nvmeq);
+	spin_unlock(&nvmeq->q_lock);
+	return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+	struct nvme_queue *nvmeq = data;
+	struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+	if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+		return IRQ_NONE;
+	return IRQ_WAKE_THREAD;
+}
+
+static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+{
+	spin_lock_irq(&nvmeq->q_lock);
+	cancel_cmdid(nvmeq, cmdid, NULL);
+	spin_unlock_irq(&nvmeq->q_lock);
+}
+
+struct sync_cmd_info {
+	struct task_struct *task;
+	u32 result;
+	int status;
+};
+
+static void sync_completion(struct nvme_dev *dev, void *ctx,
+						struct nvme_completion *cqe)
+{
+	struct sync_cmd_info *cmdinfo = ctx;
+	cmdinfo->result = le32_to_cpup(&cqe->result);
+	cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+	wake_up_process(cmdinfo->task);
+}
+
+/*
+ * Returns 0 on success.  If the result is negative, it's a Linux error code;
+ * if the result is positive, it's an NVM Express status code
+ */
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+			struct nvme_command *cmd, u32 *result, unsigned timeout)
+{
+	int cmdid;
+	struct sync_cmd_info cmdinfo;
+
+	cmdinfo.task = current;
+	cmdinfo.status = -EINTR;
+
+	cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
+								timeout);
+	if (cmdid < 0)
+		return cmdid;
+	cmd->common.command_id = cmdid;
+
+	set_current_state(TASK_KILLABLE);
+	nvme_submit_cmd(nvmeq, cmd);
+	schedule();
+
+	if (cmdinfo.status == -EINTR) {
+		nvme_abort_command(nvmeq, cmdid);
+		return -EINTR;
+	}
+
+	if (result)
+		*result = cmdinfo.result;
+
+	return cmdinfo.status;
+}
+
+static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+								u32 *result)
+{
+	return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+	int status;
+	struct nvme_command c;
+
+	memset(&c, 0, sizeof(c));
+	c.delete_queue.opcode = opcode;
+	c.delete_queue.qid = cpu_to_le16(id);
+
+	status = nvme_submit_admin_cmd(dev, &c, NULL);
+	if (status)
+		return -EIO;
+	return 0;
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+						struct nvme_queue *nvmeq)
+{
+	int status;
+	struct nvme_command c;
+	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+
+	memset(&c, 0, sizeof(c));
+	c.create_cq.opcode = nvme_admin_create_cq;
+	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
+	c.create_cq.cqid = cpu_to_le16(qid);
+	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+	c.create_cq.cq_flags = cpu_to_le16(flags);
+	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
+
+	status = nvme_submit_admin_cmd(dev, &c, NULL);
+	if (status)
+		return -EIO;
+	return 0;
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+						struct nvme_queue *nvmeq)
+{
+	int status;
+	struct nvme_command c;
+	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
+
+	memset(&c, 0, sizeof(c));
+	c.create_sq.opcode = nvme_admin_create_sq;
+	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
+	c.create_sq.sqid = cpu_to_le16(qid);
+	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+	c.create_sq.sq_flags = cpu_to_le16(flags);
+	c.create_sq.cqid = cpu_to_le16(qid);
+
+	status = nvme_submit_admin_cmd(dev, &c, NULL);
+	if (status)
+		return -EIO;
+	return 0;
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+{
+	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+}
+
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+							dma_addr_t dma_addr)
+{
+	struct nvme_command c;
+
+	memset(&c, 0, sizeof(c));
+	c.identify.opcode = nvme_admin_identify;
+	c.identify.nsid = cpu_to_le32(nsid);
+	c.identify.prp1 = cpu_to_le64(dma_addr);
+	c.identify.cns = cpu_to_le32(cns);
+
+	return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+				unsigned dword11, dma_addr_t dma_addr)
+{
+	struct nvme_command c;
+
+	memset(&c, 0, sizeof(c));
+	c.features.opcode = nvme_admin_get_features;
+	c.features.prp1 = cpu_to_le64(dma_addr);
+	c.features.fid = cpu_to_le32(fid);
+	c.features.dword11 = cpu_to_le32(dword11);
+
+	return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
+			unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+	struct nvme_command c;
+
+	memset(&c, 0, sizeof(c));
+	c.features.opcode = nvme_admin_set_features;
+	c.features.prp1 = cpu_to_le64(dma_addr);
+	c.features.fid = cpu_to_le32(fid);
+	c.features.dword11 = cpu_to_le32(dword11);
+
+	return nvme_submit_admin_cmd(dev, &c, result);
+}
+
+static void nvme_free_queue(struct nvme_dev *dev, int qid)
+{
+	struct nvme_queue *nvmeq = dev->queues[qid];
+	int vector = dev->entry[nvmeq->cq_vector].vector;
+
+	irq_set_affinity_hint(vector, NULL);
+	free_irq(vector, nvmeq);
+
+	/* Don't tell the adapter to delete the admin queue */
+	if (qid) {
+		adapter_delete_sq(dev, qid);
+		adapter_delete_cq(dev, qid);
+	}
+
+	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+	kfree(nvmeq);
+}
+
+static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
+							int depth, int vector)
+{
+	struct device *dmadev = &dev->pci_dev->dev;
+	unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
+	if (!nvmeq)
+		return NULL;
+
+	nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
+					&nvmeq->cq_dma_addr, GFP_KERNEL);
+	if (!nvmeq->cqes)
+		goto free_nvmeq;
+	memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
+
+	nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
+					&nvmeq->sq_dma_addr, GFP_KERNEL);
+	if (!nvmeq->sq_cmds)
+		goto free_cqdma;
+
+	nvmeq->q_dmadev = dmadev;
+	nvmeq->dev = dev;
+	spin_lock_init(&nvmeq->q_lock);
+	nvmeq->cq_head = 0;
+	nvmeq->cq_phase = 1;
+	init_waitqueue_head(&nvmeq->sq_full);
+	init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
+	bio_list_init(&nvmeq->sq_cong);
+	nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+	nvmeq->q_depth = depth;
+	nvmeq->cq_vector = vector;
+
+	return nvmeq;
+
+ free_cqdma:
+	dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+							nvmeq->cq_dma_addr);
+ free_nvmeq:
+	kfree(nvmeq);
+	return NULL;
+}
+
+static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+							const char *name)
+{
+	if (use_threaded_interrupts)
+		return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+					nvme_irq_check, nvme_irq,
+					IRQF_DISABLED | IRQF_SHARED,
+					name, nvmeq);
+	return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
+				IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+}
+
+static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
+					int qid, int cq_size, int vector)
+{
+	int result;
+	struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+
+	if (!nvmeq)
+		return ERR_PTR(-ENOMEM);
+
+	result = adapter_alloc_cq(dev, qid, nvmeq);
+	if (result < 0)
+		goto free_nvmeq;
+
+	result = adapter_alloc_sq(dev, qid, nvmeq);
+	if (result < 0)
+		goto release_cq;
+
+	result = queue_request_irq(dev, nvmeq, "nvme");
+	if (result < 0)
+		goto release_sq;
+
+	return nvmeq;
+
+ release_sq:
+	adapter_delete_sq(dev, qid);
+ release_cq:
+	adapter_delete_cq(dev, qid);
+ free_nvmeq:
+	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+	kfree(nvmeq);
+	return ERR_PTR(result);
+}
+
+static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
+{
+	int result;
+	u32 aqa;
+	u64 cap;
+	unsigned long timeout;
+	struct nvme_queue *nvmeq;
+
+	dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+	nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+	if (!nvmeq)
+		return -ENOMEM;
+
+	aqa = nvmeq->q_depth - 1;
+	aqa |= aqa << 16;
+
+	dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
+	dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+
+	writel(0, &dev->bar->cc);
+	writel(aqa, &dev->bar->aqa);
+	writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
+	writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
+	writel(dev->ctrl_config, &dev->bar->cc);
+
+	cap = readq(&dev->bar->cap);
+	timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+	dev->db_stride = NVME_CAP_STRIDE(cap);
+
+	while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
+		msleep(100);
+		if (fatal_signal_pending(current))
+			return -EINTR;
+		if (time_after(jiffies, timeout)) {
+			dev_err(&dev->pci_dev->dev,
+				"Device not ready; aborting initialisation\n");
+			return -ENODEV;
+		}
+	}
+
+	result = queue_request_irq(dev, nvmeq, "nvme admin");
+	dev->queues[0] = nvmeq;
+	return result;
+}
+
+static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+				unsigned long addr, unsigned length)
+{
+	int i, err, count, nents, offset;
+	struct scatterlist *sg;
+	struct page **pages;
+	struct nvme_iod *iod;
+
+	if (addr & 3)
+		return ERR_PTR(-EINVAL);
+	if (!length)
+		return ERR_PTR(-EINVAL);
+
+	offset = offset_in_page(addr);
+	count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+	pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+	err = get_user_pages_fast(addr, count, 1, pages);
+	if (err < count) {
+		count = err;
+		err = -EFAULT;
+		goto put_pages;
+	}
+
+	iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+	sg = iod->sg;
+	sg_init_table(sg, count);
+	for (i = 0; i < count; i++) {
+		sg_set_page(&sg[i], pages[i],
+				min_t(int, length, PAGE_SIZE - offset), offset);
+		length -= (PAGE_SIZE - offset);
+		offset = 0;
+	}
+	sg_mark_end(&sg[i - 1]);
+	iod->nents = count;
+
+	err = -ENOMEM;
+	nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+				write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	if (!nents)
+		goto free_iod;
+
+	kfree(pages);
+	return iod;
+
+ free_iod:
+	kfree(iod);
+ put_pages:
+	for (i = 0; i < count; i++)
+		put_page(pages[i]);
+	kfree(pages);
+	return ERR_PTR(err);
+}
+
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+			struct nvme_iod *iod)
+{
+	int i;
+
+	dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+				write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+	for (i = 0; i < iod->nents; i++)
+		put_page(sg_page(&iod->sg[i]));
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+	struct nvme_dev *dev = ns->dev;
+	struct nvme_queue *nvmeq;
+	struct nvme_user_io io;
+	struct nvme_command c;
+	unsigned length;
+	int status;
+	struct nvme_iod *iod;
+
+	if (copy_from_user(&io, uio, sizeof(io)))
+		return -EFAULT;
+	length = (io.nblocks + 1) << ns->lba_shift;
+
+	switch (io.opcode) {
+	case nvme_cmd_write:
+	case nvme_cmd_read:
+	case nvme_cmd_compare:
+		iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (IS_ERR(iod))
+		return PTR_ERR(iod);
+
+	memset(&c, 0, sizeof(c));
+	c.rw.opcode = io.opcode;
+	c.rw.flags = io.flags;
+	c.rw.nsid = cpu_to_le32(ns->ns_id);
+	c.rw.slba = cpu_to_le64(io.slba);
+	c.rw.length = cpu_to_le16(io.nblocks);
+	c.rw.control = cpu_to_le16(io.control);
+	c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+	c.rw.reftag = io.reftag;
+	c.rw.apptag = io.apptag;
+	c.rw.appmask = io.appmask;
+	/* XXX: metadata */
+	length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+
+	nvmeq = get_nvmeq(dev);
+	/*
+	 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+	 * disabled.  We may be preempted at any point, and be rescheduled
+	 * to a different CPU.  That will cause cacheline bouncing, but no
+	 * additional races since q_lock already protects against other CPUs.
+	 */
+	put_nvmeq(nvmeq);
+	if (length != (io.nblocks + 1) << ns->lba_shift)
+		status = -ENOMEM;
+	else
+		status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+	nvme_unmap_user_pages(dev, io.opcode & 1, iod);
+	nvme_free_iod(dev, iod);
+	return status;
+}
+
+static int nvme_user_admin_cmd(struct nvme_ns *ns,
+					struct nvme_admin_cmd __user *ucmd)
+{
+	struct nvme_dev *dev = ns->dev;
+	struct nvme_admin_cmd cmd;
+	struct nvme_command c;
+	int status, length;
+	struct nvme_iod *iod;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+		return -EFAULT;
+
+	memset(&c, 0, sizeof(c));
+	c.common.opcode = cmd.opcode;
+	c.common.flags = cmd.flags;
+	c.common.nsid = cpu_to_le32(cmd.nsid);
+	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+	c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+	c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+	c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+	c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+	c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+	c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+	length = cmd.data_len;
+	if (cmd.data_len) {
+		iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
+								length);
+		if (IS_ERR(iod))
+			return PTR_ERR(iod);
+		length = nvme_setup_prps(dev, &c.common, iod, length,
+								GFP_KERNEL);
+	}
+
+	if (length != cmd.data_len)
+		status = -ENOMEM;
+	else
+		status = nvme_submit_admin_cmd(dev, &c, NULL);
+
+	if (cmd.data_len) {
+		nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
+		nvme_free_iod(dev, iod);
+	}
+	return status;
+}
+
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+							unsigned long arg)
+{
+	struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+	switch (cmd) {
+	case NVME_IOCTL_ID:
+		return ns->ns_id;
+	case NVME_IOCTL_ADMIN_CMD:
+		return nvme_user_admin_cmd(ns, (void __user *)arg);
+	case NVME_IOCTL_SUBMIT_IO:
+		return nvme_submit_io(ns, (void __user *)arg);
+	default:
+		return -ENOTTY;
+	}
+}
+
+static const struct block_device_operations nvme_fops = {
+	.owner		= THIS_MODULE,
+	.ioctl		= nvme_ioctl,
+	.compat_ioctl	= nvme_ioctl,
+};
+
+static void nvme_timeout_ios(struct nvme_queue *nvmeq)
+{
+	int depth = nvmeq->q_depth - 1;
+	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+	unsigned long now = jiffies;
+	int cmdid;
+
+	for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+		void *ctx;
+		nvme_completion_fn fn;
+		static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
+
+		if (!time_after(now, info[cmdid].timeout))
+			continue;
+		dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+		ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+		fn(nvmeq->dev, ctx, &cqe);
+	}
+}
+
+static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
+{
+	while (bio_list_peek(&nvmeq->sq_cong)) {
+		struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+		struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+		if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+			bio_list_add_head(&nvmeq->sq_cong, bio);
+			break;
+		}
+		if (bio_list_empty(&nvmeq->sq_cong))
+			remove_wait_queue(&nvmeq->sq_full,
+							&nvmeq->sq_cong_wait);
+	}
+}
+
+static int nvme_kthread(void *data)
+{
+	struct nvme_dev *dev;
+
+	while (!kthread_should_stop()) {
+		__set_current_state(TASK_RUNNING);
+		spin_lock(&dev_list_lock);
+		list_for_each_entry(dev, &dev_list, node) {
+			int i;
+			for (i = 0; i < dev->queue_count; i++) {
+				struct nvme_queue *nvmeq = dev->queues[i];
+				if (!nvmeq)
+					continue;
+				spin_lock_irq(&nvmeq->q_lock);
+				if (nvme_process_cq(nvmeq))
+					printk("process_cq did something\n");
+				nvme_timeout_ios(nvmeq);
+				nvme_resubmit_bios(nvmeq);
+				spin_unlock_irq(&nvmeq->q_lock);
+			}
+		}
+		spin_unlock(&dev_list_lock);
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(HZ);
+	}
+	return 0;
+}
+
+static DEFINE_IDA(nvme_index_ida);
+
+static int nvme_get_ns_idx(void)
+{
+	int index, error;
+
+	do {
+		if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
+			return -1;
+
+		spin_lock(&dev_list_lock);
+		error = ida_get_new(&nvme_index_ida, &index);
+		spin_unlock(&dev_list_lock);
+	} while (error == -EAGAIN);
+
+	if (error)
+		index = -1;
+	return index;
+}
+
+static void nvme_put_ns_idx(int index)
+{
+	spin_lock(&dev_list_lock);
+	ida_remove(&nvme_index_ida, index);
+	spin_unlock(&dev_list_lock);
+}
+
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
+			struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+{
+	struct nvme_ns *ns;
+	struct gendisk *disk;
+	int lbaf;
+
+	if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
+		return NULL;
+
+	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+	if (!ns)
+		return NULL;
+	ns->queue = blk_alloc_queue(GFP_KERNEL);
+	if (!ns->queue)
+		goto out_free_ns;
+	ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
+	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+/*	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
+	blk_queue_make_request(ns->queue, nvme_make_request);
+	ns->dev = dev;
+	ns->queue->queuedata = ns;
+
+	disk = alloc_disk(NVME_MINORS);
+	if (!disk)
+		goto out_free_queue;
+	ns->ns_id = nsid;
+	ns->disk = disk;
+	lbaf = id->flbas & 0xf;
+	ns->lba_shift = id->lbaf[lbaf].ds;
+
+	disk->major = nvme_major;
+	disk->minors = NVME_MINORS;
+	disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+	disk->fops = &nvme_fops;
+	disk->private_data = ns;
+	disk->queue = ns->queue;
+	disk->driverfs_dev = &dev->pci_dev->dev;
+	sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
+	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+	return ns;
+
+ out_free_queue:
+	blk_cleanup_queue(ns->queue);
+ out_free_ns:
+	kfree(ns);
+	return NULL;
+}
+
+static void nvme_ns_free(struct nvme_ns *ns)
+{
+	int index = ns->disk->first_minor / NVME_MINORS;
+	put_disk(ns->disk);
+	nvme_put_ns_idx(index);
+	blk_cleanup_queue(ns->queue);
+	kfree(ns);
+}
+
+static int set_queue_count(struct nvme_dev *dev, int count)
+{
+	int status;
+	u32 result;
+	u32 q_count = (count - 1) | ((count - 1) << 16);
+
+	status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+								&result);
+	if (status)
+		return -EIO;
+	return min(result & 0xffff, result >> 16) + 1;
+}
+
+static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
+{
+	int result, cpu, i, nr_io_queues, db_bar_size;
+
+	nr_io_queues = num_online_cpus();
+	result = set_queue_count(dev, nr_io_queues);
+	if (result < 0)
+		return result;
+	if (result < nr_io_queues)
+		nr_io_queues = result;
+
+	/* Deregister the admin queue's interrupt */
+	free_irq(dev->entry[0].vector, dev->queues[0]);
+
+	db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+	if (db_bar_size > 8192) {
+		iounmap(dev->bar);
+		dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
+								db_bar_size);
+		dev->dbs = ((void __iomem *)dev->bar) + 4096;
+		dev->queues[0]->q_db = dev->dbs;
+	}
+
+	for (i = 0; i < nr_io_queues; i++)
+		dev->entry[i].entry = i;
+	for (;;) {
+		result = pci_enable_msix(dev->pci_dev, dev->entry,
+								nr_io_queues);
+		if (result == 0) {
+			break;
+		} else if (result > 0) {
+			nr_io_queues = result;
+			continue;
+		} else {
+			nr_io_queues = 1;
+			break;
+		}
+	}
+
+	result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+	/* XXX: handle failure here */
+
+	cpu = cpumask_first(cpu_online_mask);
+	for (i = 0; i < nr_io_queues; i++) {
+		irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+		cpu = cpumask_next(cpu, cpu_online_mask);
+	}
+
+	for (i = 0; i < nr_io_queues; i++) {
+		dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
+							NVME_Q_DEPTH, i);
+		if (IS_ERR(dev->queues[i + 1]))
+			return PTR_ERR(dev->queues[i + 1]);
+		dev->queue_count++;
+	}
+
+	for (; i < num_possible_cpus(); i++) {
+		int target = i % rounddown_pow_of_two(dev->queue_count - 1);
+		dev->queues[i + 1] = dev->queues[target + 1];
+	}
+
+	return 0;
+}
+
+static void nvme_free_queues(struct nvme_dev *dev)
+{
+	int i;
+
+	for (i = dev->queue_count - 1; i >= 0; i--)
+		nvme_free_queue(dev, i);
+}
+
+static int __devinit nvme_dev_add(struct nvme_dev *dev)
+{
+	int res, nn, i;
+	struct nvme_ns *ns, *next;
+	struct nvme_id_ctrl *ctrl;
+	struct nvme_id_ns *id_ns;
+	void *mem;
+	dma_addr_t dma_addr;
+
+	res = nvme_setup_io_queues(dev);
+	if (res)
+		return res;
+
+	mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
+								GFP_KERNEL);
+
+	res = nvme_identify(dev, 0, 1, dma_addr);
+	if (res) {
+		res = -EIO;
+		goto out_free;
+	}
+
+	ctrl = mem;
+	nn = le32_to_cpup(&ctrl->nn);
+	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+
+	id_ns = mem;
+	for (i = 1; i <= nn; i++) {
+		res = nvme_identify(dev, i, 0, dma_addr);
+		if (res)
+			continue;
+
+		if (id_ns->ncap == 0)
+			continue;
+
+		res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
+							dma_addr + 4096);
+		if (res)
+			continue;
+
+		ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
+		if (ns)
+			list_add_tail(&ns->list, &dev->namespaces);
+	}
+	list_for_each_entry(ns, &dev->namespaces, list)
+		add_disk(ns->disk);
+
+	goto out;
+
+ out_free:
+	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+		list_del(&ns->list);
+		nvme_ns_free(ns);
+	}
+
+ out:
+	dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
+	return res;
+}
+
+static int nvme_dev_remove(struct nvme_dev *dev)
+{
+	struct nvme_ns *ns, *next;
+
+	spin_lock(&dev_list_lock);
+	list_del(&dev->node);
+	spin_unlock(&dev_list_lock);
+
+	/* TODO: wait all I/O finished or cancel them */
+
+	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+		list_del(&ns->list);
+		del_gendisk(ns->disk);
+		nvme_ns_free(ns);
+	}
+
+	nvme_free_queues(dev);
+
+	return 0;
+}
+
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+	struct device *dmadev = &dev->pci_dev->dev;
+	dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+						PAGE_SIZE, PAGE_SIZE, 0);
+	if (!dev->prp_page_pool)
+		return -ENOMEM;
+
+	/* Optimisation for I/Os between 4k and 128k */
+	dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+						256, 256, 0);
+	if (!dev->prp_small_pool) {
+		dma_pool_destroy(dev->prp_page_pool);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+	dma_pool_destroy(dev->prp_page_pool);
+	dma_pool_destroy(dev->prp_small_pool);
+}
+
+/* XXX: Use an ida or something to let remove / add work correctly */
+static void nvme_set_instance(struct nvme_dev *dev)
+{
+	static int instance;
+	dev->instance = instance++;
+}
+
+static void nvme_release_instance(struct nvme_dev *dev)
+{
+}
+
+static int __devinit nvme_probe(struct pci_dev *pdev,
+						const struct pci_device_id *id)
+{
+	int bars, result = -ENOMEM;
+	struct nvme_dev *dev;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+	dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
+								GFP_KERNEL);
+	if (!dev->entry)
+		goto free;
+	dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
+								GFP_KERNEL);
+	if (!dev->queues)
+		goto free;
+
+	if (pci_enable_device_mem(pdev))
+		goto free;
+	pci_set_master(pdev);
+	bars = pci_select_bars(pdev, IORESOURCE_MEM);
+	if (pci_request_selected_regions(pdev, bars, "nvme"))
+		goto disable;
+
+	INIT_LIST_HEAD(&dev->namespaces);
+	dev->pci_dev = pdev;
+	pci_set_drvdata(pdev, dev);
+	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+	nvme_set_instance(dev);
+	dev->entry[0].vector = pdev->irq;
+
+	result = nvme_setup_prp_pools(dev);
+	if (result)
+		goto disable_msix;
+
+	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+	if (!dev->bar) {
+		result = -ENOMEM;
+		goto disable_msix;
+	}
+
+	result = nvme_configure_admin_queue(dev);
+	if (result)
+		goto unmap;
+	dev->queue_count++;
+
+	spin_lock(&dev_list_lock);
+	list_add(&dev->node, &dev_list);
+	spin_unlock(&dev_list_lock);
+
+	result = nvme_dev_add(dev);
+	if (result)
+		goto delete;
+
+	return 0;
+
+ delete:
+	spin_lock(&dev_list_lock);
+	list_del(&dev->node);
+	spin_unlock(&dev_list_lock);
+
+	nvme_free_queues(dev);
+ unmap:
+	iounmap(dev->bar);
+ disable_msix:
+	pci_disable_msix(pdev);
+	nvme_release_instance(dev);
+	nvme_release_prp_pools(dev);
+ disable:
+	pci_disable_device(pdev);
+	pci_release_regions(pdev);
+ free:
+	kfree(dev->queues);
+	kfree(dev->entry);
+	kfree(dev);
+	return result;
+}
+
+static void __devexit nvme_remove(struct pci_dev *pdev)
+{
+	struct nvme_dev *dev = pci_get_drvdata(pdev);
+	nvme_dev_remove(dev);
+	pci_disable_msix(pdev);
+	iounmap(dev->bar);
+	nvme_release_instance(dev);
+	nvme_release_prp_pools(dev);
+	pci_disable_device(pdev);
+	pci_release_regions(pdev);
+	kfree(dev->queues);
+	kfree(dev->entry);
+	kfree(dev);
+}
+
+/* These functions are yet to be implemented */
+#define nvme_error_detected NULL
+#define nvme_dump_registers NULL
+#define nvme_link_reset NULL
+#define nvme_slot_reset NULL
+#define nvme_error_resume NULL
+#define nvme_suspend NULL
+#define nvme_resume NULL
+
+static struct pci_error_handlers nvme_err_handler = {
+	.error_detected	= nvme_error_detected,
+	.mmio_enabled	= nvme_dump_registers,
+	.link_reset	= nvme_link_reset,
+	.slot_reset	= nvme_slot_reset,
+	.resume		= nvme_error_resume,
+};
+
+/* Move to pci_ids.h later */
+#define PCI_CLASS_STORAGE_EXPRESS	0x010802
+
+static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, nvme_id_table);
+
+static struct pci_driver nvme_driver = {
+	.name		= "nvme",
+	.id_table	= nvme_id_table,
+	.probe		= nvme_probe,
+	.remove		= __devexit_p(nvme_remove),
+	.suspend	= nvme_suspend,
+	.resume		= nvme_resume,
+	.err_handler	= &nvme_err_handler,
+};
+
+static int __init nvme_init(void)
+{
+	int result = -EBUSY;
+
+	nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+	if (IS_ERR(nvme_thread))
+		return PTR_ERR(nvme_thread);
+
+	nvme_major = register_blkdev(nvme_major, "nvme");
+	if (nvme_major <= 0)
+		goto kill_kthread;
+
+	result = pci_register_driver(&nvme_driver);
+	if (result)
+		goto unregister_blkdev;
+	return 0;
+
+ unregister_blkdev:
+	unregister_blkdev(nvme_major, "nvme");
+ kill_kthread:
+	kthread_stop(nvme_thread);
+	return result;
+}
+
+static void __exit nvme_exit(void)
+{
+	pci_unregister_driver(&nvme_driver);
+	unregister_blkdev(nvme_major, "nvme");
+	kthread_stop(nvme_thread);
+}
+
+MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.8");
+module_init(nvme_init);
+module_exit(nvme_exit);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 6a8771f..32362cf 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -846,6 +846,15 @@
 
 	do {
 		rc = __tpm_pcr_read(chip, 0, digest);
+		if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
+			dev_info(chip->dev,
+				 "TPM is disabled/deactivated (0x%X)\n", rc);
+			/* TPM is disabled and/or deactivated; driver can
+			 * proceed and TPM does handle commands for
+			 * suspend/resume correctly
+			 */
+			return 0;
+		}
 		if (rc != TPM_WARN_DOING_SELFTEST)
 			return rc;
 		msleep(delay_msec);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 8c1df30..0105471 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -39,6 +39,9 @@
 };
 
 #define TPM_WARN_DOING_SELFTEST 0x802
+#define TPM_ERR_DEACTIVATED     0x6
+#define TPM_ERR_DISABLED        0x7
+
 #define TPM_HEADER_SIZE		10
 extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
 				char *);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 37c4bd1..d0c4118 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -87,6 +87,7 @@
 
 config GPIO_IT8761E
 	tristate "IT8761E GPIO support"
+	depends on X86  # unconditional access to IO space.
 	help
 	  Say yes here to support GPIO functionality of IT8761E super I/O chip.
 
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 461958f..03d6dd5 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -248,7 +248,7 @@
 static int ioh_irq_type(struct irq_data *d, unsigned int type)
 {
 	u32 im;
-	u32 *im_reg;
+	void __iomem *im_reg;
 	u32 ien;
 	u32 im_pos;
 	int ch;
@@ -412,7 +412,7 @@
 	int i, j;
 	struct ioh_gpio *chip;
 	void __iomem *base;
-	void __iomem *chip_save;
+	void *chip_save;
 	int irq_base;
 
 	ret = pci_enable_device(pdev);
@@ -428,7 +428,7 @@
 	}
 
 	base = pci_iomap(pdev, 1, 0);
-	if (base == 0) {
+	if (!base) {
 		dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
 		ret = -ENOMEM;
 		goto err_iomap;
@@ -521,7 +521,7 @@
 	int err;
 	int i;
 	struct ioh_gpio *chip = pci_get_drvdata(pdev);
-	void __iomem *chip_save;
+	void *chip_save;
 
 	chip_save = chip;
 
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index f060329..68fa55e 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -231,7 +231,7 @@
 static int pch_irq_type(struct irq_data *d, unsigned int type)
 {
 	u32 im;
-	u32 *im_reg;
+	u32 __iomem *im_reg;
 	u32 ien;
 	u32 im_pos;
 	int ch;
@@ -376,7 +376,7 @@
 	}
 
 	chip->base = pci_iomap(pdev, 1, 0);
-	if (chip->base == 0) {
+	if (!chip->base) {
 		dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
 		ret = -ENOMEM;
 		goto err_iomap;
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index b9c1c29..91f45b9 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -52,7 +52,7 @@
 	struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
 
 	/* Set the initial value */
-	tps65910_gpio_set(gc, 0, value);
+	tps65910_gpio_set(gc, offset, value);
 
 	return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
 						GPIO_CFG_MASK);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index e770bd1..5d5330f 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -20,6 +20,7 @@
  */
 
 #include <drm/drmP.h>
+#include <linux/shmem_fs.h>
 #include "psb_drv.h"
 
 
@@ -203,9 +204,7 @@
 	gt->npage = pages;
 
 	for (i = 0; i < pages; i++) {
-		/* FIXME: needs updating as per mail from Hugh Dickins */
-		p = read_cache_page_gfp(mapping, i,
-					__GFP_COLD | GFP_KERNEL);
+		p = shmem_read_mapping_page(mapping, i);
 		if (IS_ERR(p))
 			goto err;
 		gt->pages[i] = p;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index cbe7a2f..3101dd5 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -682,19 +682,19 @@
 	  will be called xilinx_i2c.
 
 config I2C_EG20T
-	tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)"
+	tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
 	depends on PCI
 	help
 	  This driver is for PCH(Platform controller Hub) I2C of EG20T which
 	  is an IOH(Input/Output Hub) for x86 embedded processor.
 	  This driver can access PCH I2C bus device.
 
-	  This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-	  Output Hub), ML7213 and ML7223.
-	  ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
-	  for MP(Media Phone) use.
-	  ML7213/ML7223 is companion chip for Intel Atom E6xx series.
-	  ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+	  This driver also can be used for LAPIS Semiconductor IOH(Input/
+	  Output Hub), ML7213, ML7223 and ML7831.
+	  ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+	  for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+	  ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+	  ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 comment "External I2C/SMBus adapter drivers"
 
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 3ef3557..ca88776 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -136,7 +136,8 @@
 /*
 Set the number of I2C instance max
 Intel EG20T PCH :		1ch
-OKI SEMICONDUCTOR ML7213 IOH :	2ch
+LAPIS Semiconductor ML7213 IOH :	2ch
+LAPIS Semiconductor ML7831 IOH :	1ch
 */
 #define PCH_I2C_MAX_DEV			2
 
@@ -180,15 +181,17 @@
 static wait_queue_head_t pch_event;
 static DEFINE_MUTEX(pch_mutex);
 
-/* Definition for ML7213 by OKI SEMICONDUCTOR */
+/* Definition for ML7213 by LAPIS Semiconductor */
 #define PCI_VENDOR_ID_ROHM		0x10DB
 #define PCI_DEVICE_ID_ML7213_I2C	0x802D
 #define PCI_DEVICE_ID_ML7223_I2C	0x8010
+#define PCI_DEVICE_ID_ML7831_I2C	0x8817
 
 static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = {
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C),   1, },
 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
+	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
 	{0,}
 };
 
@@ -243,7 +246,7 @@
 	if (pch_clk > PCH_MAX_CLK)
 		pch_clk = 62500;
 
-	pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
+	pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
 	/* Set transfer speed in I2CBC */
 	iowrite32(pch_i2cbc, p + PCH_I2CBC);
 
@@ -918,7 +921,9 @@
 		pch_adap->dev.parent = &pdev->dev;
 
 		pch_i2c_init(&adap_info->pch_data[i]);
-		ret = i2c_add_adapter(pch_adap);
+
+		pch_adap->nr = i;
+		ret = i2c_add_numbered_adapter(pch_adap);
 		if (ret) {
 			pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
 			goto err_add_adapter;
@@ -1058,8 +1063,8 @@
 }
 module_exit(pch_pci_exit);
 
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
+MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>");
 module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
 module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index fa23faa..f713eac 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -37,6 +37,9 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
+#include <linux/of_device.h>
 #include <linux/slab.h>
 #include <linux/i2c-omap.h>
 #include <linux/pm_runtime.h>
@@ -182,7 +185,9 @@
 	u32			latency;	/* maximum mpu wkup latency */
 	void			(*set_mpu_wkup_lat)(struct device *dev,
 						    long latency);
-	u32			speed;		/* Speed of bus in Khz */
+	u32			speed;		/* Speed of bus in kHz */
+	u32			dtrev;		/* extra revision from DT */
+	u32			flags;
 	u16			cmd_err;
 	u8			*buf;
 	u8			*regs;
@@ -235,7 +240,7 @@
 	[OMAP_I2C_BUF_REG] = 0x94,
 	[OMAP_I2C_CNT_REG] = 0x98,
 	[OMAP_I2C_DATA_REG] = 0x9c,
-	[OMAP_I2C_SYSC_REG] = 0x20,
+	[OMAP_I2C_SYSC_REG] = 0x10,
 	[OMAP_I2C_CON_REG] = 0xa4,
 	[OMAP_I2C_OA_REG] = 0xa8,
 	[OMAP_I2C_SA_REG] = 0xac,
@@ -266,11 +271,7 @@
 
 static void omap_i2c_unidle(struct omap_i2c_dev *dev)
 {
-	struct omap_i2c_bus_platform_data *pdata;
-
-	pdata = dev->dev->platform_data;
-
-	if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+	if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
 		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
 		omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
 		omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -291,13 +292,10 @@
 
 static void omap_i2c_idle(struct omap_i2c_dev *dev)
 {
-	struct omap_i2c_bus_platform_data *pdata;
 	u16 iv;
 
-	pdata = dev->dev->platform_data;
-
 	dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
-	if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+	if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
 		omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
 	else
 		omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
@@ -320,9 +318,6 @@
 	unsigned long timeout;
 	unsigned long internal_clk = 0;
 	struct clk *fclk;
-	struct omap_i2c_bus_platform_data *pdata;
-
-	pdata = dev->dev->platform_data;
 
 	if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
 		/* Disable I2C controller before soft reset */
@@ -373,7 +368,7 @@
 	}
 	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
 
-	if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
+	if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
 		/*
 		 * The I2C functional clock is the armxor_ck, so there's
 		 * no need to get "armxor_ck" separately.  Now, if OMAP2420
@@ -397,7 +392,7 @@
 			psc = fclk_rate / 12000000;
 	}
 
-	if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
+	if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
 
 		/*
 		 * HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -406,7 +401,7 @@
 		 * The filter is iclk (fclk for HS) period.
 		 */
 		if (dev->speed > 400 ||
-			       pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
+			       dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
 			internal_clk = 19200;
 		else if (dev->speed > 100)
 			internal_clk = 9600;
@@ -475,7 +470,7 @@
 
 	dev->errata = 0;
 
-	if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+	if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
 		dev->errata |= I2C_OMAP_ERRATA_I207;
 
 	/* Enable interrupts */
@@ -484,7 +479,7 @@
 			OMAP_I2C_IE_AL)  | ((dev->fifo_size) ?
 				(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
 	omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
-	if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+	if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
 		dev->pscstate = psc;
 		dev->scllstate = scll;
 		dev->sclhstate = sclh;
@@ -804,9 +799,6 @@
 	u16 bits;
 	u16 stat, w;
 	int err, count = 0;
-	struct omap_i2c_bus_platform_data *pdata;
-
-	pdata = dev->dev->platform_data;
 
 	if (pm_runtime_suspended(dev->dev))
 		return IRQ_NONE;
@@ -830,11 +822,9 @@
 				~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
 				OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
 
-		if (stat & OMAP_I2C_STAT_NACK) {
+		if (stat & OMAP_I2C_STAT_NACK)
 			err |= OMAP_I2C_STAT_NACK;
-			omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
-					   OMAP_I2C_CON_STP);
-		}
+
 		if (stat & OMAP_I2C_STAT_AL) {
 			dev_err(dev->dev, "Arbitration lost\n");
 			err |= OMAP_I2C_STAT_AL;
@@ -875,7 +865,7 @@
 					 * Data reg in 2430, omap3 and
 					 * omap4 is 8 bit wide
 					 */
-					if (pdata->flags &
+					if (dev->flags &
 						 OMAP_I2C_FLAG_16BIT_DATA_REG) {
 						if (dev->buf_len) {
 							*dev->buf++ = w >> 8;
@@ -918,7 +908,7 @@
 					 * Data reg in 2430, omap3 and
 					 * omap4 is 8 bit wide
 					 */
-					if (pdata->flags &
+					if (dev->flags &
 						 OMAP_I2C_FLAG_16BIT_DATA_REG) {
 						if (dev->buf_len) {
 							w |= *dev->buf++ << 8;
@@ -965,6 +955,32 @@
 	.functionality	= omap_i2c_func,
 };
 
+#ifdef CONFIG_OF
+static struct omap_i2c_bus_platform_data omap3_pdata = {
+	.rev = OMAP_I2C_IP_VERSION_1,
+	.flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
+		 OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
+		 OMAP_I2C_FLAG_BUS_SHIFT_2,
+};
+
+static struct omap_i2c_bus_platform_data omap4_pdata = {
+	.rev = OMAP_I2C_IP_VERSION_2,
+};
+
+static const struct of_device_id omap_i2c_of_match[] = {
+	{
+		.compatible = "ti,omap4-i2c",
+		.data = &omap4_pdata,
+	},
+	{
+		.compatible = "ti,omap3-i2c",
+		.data = &omap3_pdata,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
+#endif
+
 static int __devinit
 omap_i2c_probe(struct platform_device *pdev)
 {
@@ -972,9 +988,10 @@
 	struct i2c_adapter	*adap;
 	struct resource		*mem, *irq, *ioarea;
 	struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
+	struct device_node	*node = pdev->dev.of_node;
+	const struct of_device_id *match;
 	irq_handler_t isr;
 	int r;
-	u32 speed = 0;
 
 	/* NOTE: driver uses the static register mapping */
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1001,15 +1018,24 @@
 		goto err_release_region;
 	}
 
-	if (pdata != NULL) {
-		speed = pdata->clkrate;
+	match = of_match_device(omap_i2c_of_match, &pdev->dev);
+	if (match) {
+		u32 freq = 100000; /* default to 100000 Hz */
+
+		pdata = match->data;
+		dev->dtrev = pdata->rev;
+		dev->flags = pdata->flags;
+
+		of_property_read_u32(node, "clock-frequency", &freq);
+		/* convert DT freq value in Hz into kHz for speed */
+		dev->speed = freq / 1000;
+	} else if (pdata != NULL) {
+		dev->speed = pdata->clkrate;
+		dev->flags = pdata->flags;
 		dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
-	} else {
-		speed = 100;	/* Default speed */
-		dev->set_mpu_wkup_lat = NULL;
+		dev->dtrev = pdata->rev;
 	}
 
-	dev->speed = speed;
 	dev->dev = &pdev->dev;
 	dev->irq = irq->start;
 	dev->base = ioremap(mem->start, resource_size(mem));
@@ -1020,9 +1046,9 @@
 
 	platform_set_drvdata(pdev, dev);
 
-	dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
+	dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
 
-	if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+	if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
 		dev->regs = (u8 *)reg_map_ip_v2;
 	else
 		dev->regs = (u8 *)reg_map_ip_v1;
@@ -1035,7 +1061,7 @@
 	if (dev->rev <= OMAP_I2C_REV_ON_3430)
 		dev->errata |= I2C_OMAP3_1P153;
 
-	if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) {
+	if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
 		u16 s;
 
 		/* Set up the fifo size - Get total size */
@@ -1058,7 +1084,7 @@
 		/* calculate wakeup latency constraint for MPU */
 		if (dev->set_mpu_wkup_lat != NULL)
 			dev->latency = (1000000 * dev->fifo_size) /
-				       (1000 * speed / 8);
+				       (1000 * dev->speed / 8);
 	}
 
 	/* reset ASAP, clearing any IRQs */
@@ -1074,7 +1100,7 @@
 	}
 
 	dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
-		 pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+		 dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
 
 	pm_runtime_put(dev->dev);
 
@@ -1085,6 +1111,7 @@
 	strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
 	adap->algo = &omap_i2c_algo;
 	adap->dev.parent = &pdev->dev;
+	adap->dev.of_node = pdev->dev.of_node;
 
 	/* i2c device drivers may be active on return from add_adapter() */
 	adap->nr = pdev->id;
@@ -1094,6 +1121,8 @@
 		goto err_free_irq;
 	}
 
+	of_i2c_register_devices(adap);
+
 	return 0;
 
 err_free_irq:
@@ -1166,6 +1195,7 @@
 		.name	= "omap_i2c",
 		.owner	= THIS_MODULE,
 		.pm	= OMAP_I2C_PM_OPS,
+		.of_match_table = of_match_ptr(omap_i2c_of_match),
 	},
 };
 
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5d2f8e1..20bce51 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -197,7 +197,7 @@
 		.enter = &intel_idle },
 };
 
-static int get_driver_data(int cstate)
+static long get_driver_data(int cstate)
 {
 	int driver_data;
 	switch (cstate) {
@@ -232,6 +232,7 @@
  * @drv: cpuidle driver
  * @index: index of cpuidle state
  *
+ * Must be called under local_irq_disable().
  */
 static int intel_idle(struct cpuidle_device *dev,
 		struct cpuidle_driver *drv, int index)
@@ -247,8 +248,6 @@
 
 	cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
 
-	local_irq_disable();
-
 	/*
 	 * leave_mm() to avoid costly and often unnecessary wakeups
 	 * for flushing the user TLB's associated with the active mm.
@@ -348,7 +347,8 @@
 	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
 
 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
-		!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+	    !mwait_substates)
 			return -ENODEV;
 
 	pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
@@ -394,7 +394,7 @@
 	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
 		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
 	else {
-		smp_call_function(__setup_broadcast_timer, (void *)true, 1);
+		on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
 		register_cpu_notifier(&setup_broadcast_notifier);
 	}
 
@@ -471,71 +471,67 @@
 	}
 
 	if (auto_demotion_disable_flags)
-		smp_call_function(auto_demotion_disable, NULL, 1);
+		on_each_cpu(auto_demotion_disable, NULL, 1);
 
 	return 0;
 }
 
 
 /*
- * intel_idle_cpuidle_devices_init()
+ * intel_idle_cpu_init()
  * allocate, initialize, register cpuidle_devices
+ * @cpu: cpu/core to initialize
  */
-static int intel_idle_cpuidle_devices_init(void)
+int intel_idle_cpu_init(int cpu)
 {
-	int i, cstate;
+	int cstate;
 	struct cpuidle_device *dev;
 
-	intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
-	if (intel_idle_cpuidle_devices == NULL)
-		return -ENOMEM;
+	dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
 
-	for_each_online_cpu(i) {
-		dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+	dev->state_count = 1;
 
-		dev->state_count = 1;
+	for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+		int num_substates;
 
-		for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
-			int num_substates;
+		if (cstate > max_cstate) {
+			printk(PREFIX "max_cstate %d reached\n",
+			       max_cstate);
+			break;
+		}
 
-			if (cstate > max_cstate) {
-				printk(PREFIX "max_cstate %d reached\n",
-					max_cstate);
-				break;
-			}
+		/* does the state exist in CPUID.MWAIT? */
+		num_substates = (mwait_substates >> ((cstate) * 4))
+			& MWAIT_SUBSTATE_MASK;
+		if (num_substates == 0)
+			continue;
+		/* is the state not enabled? */
+		if (cpuidle_state_table[cstate].enter == NULL)
+			continue;
 
-			/* does the state exist in CPUID.MWAIT? */
-			num_substates = (mwait_substates >> ((cstate) * 4))
-						& MWAIT_SUBSTATE_MASK;
-			if (num_substates == 0)
-				continue;
-			/* is the state not enabled? */
-			if (cpuidle_state_table[cstate].enter == NULL) {
-				continue;
-			}
-
-			dev->states_usage[dev->state_count].driver_data =
-				(void *)get_driver_data(cstate);
+		dev->states_usage[dev->state_count].driver_data =
+			(void *)get_driver_data(cstate);
 
 			dev->state_count += 1;
 		}
+	dev->cpu = cpu;
 
-		dev->cpu = i;
-		if (cpuidle_register_device(dev)) {
-			pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
-				 i);
-			intel_idle_cpuidle_devices_uninit();
-			return -EIO;
-		}
+	if (cpuidle_register_device(dev)) {
+		pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
+		intel_idle_cpuidle_devices_uninit();
+		return -EIO;
 	}
 
+	if (auto_demotion_disable_flags)
+		smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+
 	return 0;
 }
 
 
 static int __init intel_idle_init(void)
 {
-	int retval;
+	int retval, i;
 
 	/* Do not load intel_idle at all for now if idle= is passed */
 	if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -553,10 +549,16 @@
 		return retval;
 	}
 
-	retval = intel_idle_cpuidle_devices_init();
-	if (retval) {
-		cpuidle_unregister_driver(&intel_idle_driver);
-		return retval;
+	intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+	if (intel_idle_cpuidle_devices == NULL)
+		return -ENOMEM;
+
+	for_each_online_cpu(i) {
+		retval = intel_idle_cpu_init(i);
+		if (retval) {
+			cpuidle_unregister_driver(&intel_idle_driver);
+			return retval;
+		}
 	}
 
 	return 0;
@@ -568,7 +570,7 @@
 	cpuidle_unregister_driver(&intel_idle_driver);
 
 	if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
-		smp_call_function(__setup_broadcast_timer, (void *)false, 1);
+		on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
 		unregister_cpu_notifier(&setup_broadcast_notifier);
 	}
 
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 0f9a84c..eb0add3 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -55,6 +55,7 @@
 source "drivers/infiniband/ulp/ipoib/Kconfig"
 
 source "drivers/infiniband/ulp/srp/Kconfig"
+source "drivers/infiniband/ulp/srpt/Kconfig"
 
 source "drivers/infiniband/ulp/iser/Kconfig"
 
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 9cc7a47..a3b2d8e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -10,4 +10,5 @@
 obj-$(CONFIG_INFINIBAND_NES)		+= hw/nes/
 obj-$(CONFIG_INFINIBAND_IPOIB)		+= ulp/ipoib/
 obj-$(CONFIG_INFINIBAND_SRP)		+= ulp/srp/
+obj-$(CONFIG_INFINIBAND_SRPT)		+= ulp/srpt/
 obj-$(CONFIG_INFINIBAND_ISER)		+= ulp/iser/
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
new file mode 100644
index 0000000..31ee83d
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -0,0 +1,12 @@
+config INFINIBAND_SRPT
+	tristate "InfiniBand SCSI RDMA Protocol target support"
+	depends on INFINIBAND && TARGET_CORE
+	---help---
+
+	  Support for the SCSI RDMA Protocol (SRP) Target driver. The
+	  SRP protocol is a protocol that allows an initiator to access
+	  a block storage device on another host (target) over a network
+	  that supports the RDMA protocol. Currently the RDMA protocol is
+	  supported by InfiniBand and by iWarp network hardware. More
+	  information about the SRP protocol can be found on the website
+	  of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile
new file mode 100644
index 0000000..e3ee4bd
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Makefile
@@ -0,0 +1,2 @@
+ccflags-y			:= -Idrivers/target
+obj-$(CONFIG_INFINIBAND_SRPT)	+= ib_srpt.o
diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
new file mode 100644
index 0000000..fb1de1f
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_DM_MAD_H
+#define IB_DM_MAD_H
+
+#include <linux/types.h>
+
+#include <rdma/ib_mad.h>
+
+enum {
+	/*
+	 * See also section 13.4.7 Status Field, table 115 MAD Common Status
+	 * Field Bit Values and also section 16.3.1.1 Status Field in the
+	 * InfiniBand Architecture Specification.
+	 */
+	DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
+	DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
+	DM_MAD_STATUS_INVALID_FIELD = 0x001c,
+	DM_MAD_STATUS_NO_IOC = 0x0100,
+
+	/*
+	 * See also the Device Management chapter, section 16.3.3 Attributes,
+	 * table 279 Device Management Attributes in the InfiniBand
+	 * Architecture Specification.
+	 */
+	DM_ATTR_CLASS_PORT_INFO = 0x01,
+	DM_ATTR_IOU_INFO = 0x10,
+	DM_ATTR_IOC_PROFILE = 0x11,
+	DM_ATTR_SVC_ENTRIES = 0x12
+};
+
+struct ib_dm_hdr {
+	u8 reserved[28];
+};
+
+/*
+ * Structure of management datagram sent by the SRP target implementation.
+ * Contains a management datagram header, reliable multi-packet transaction
+ * protocol (RMPP) header and ib_dm_hdr. Notes:
+ * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
+ *   management datagrams.
+ * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
+ *   is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
+ * - The maximum supported size for a management datagram when not using RMPP
+ *   is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
+ */
+struct ib_dm_mad {
+	struct ib_mad_hdr mad_hdr;
+	struct ib_rmpp_hdr rmpp_hdr;
+	struct ib_dm_hdr dm_hdr;
+	u8 data[IB_MGMT_DEVICE_DATA];
+};
+
+/*
+ * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
+ * Architecture Specification.
+ */
+struct ib_dm_iou_info {
+	__be16 change_id;
+	u8 max_controllers;
+	u8 op_rom;
+	u8 controller_list[128];
+};
+
+/*
+ * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
+ * the InfiniBand Architecture Specification.
+ */
+struct ib_dm_ioc_profile {
+	__be64 guid;
+	__be32 vendor_id;
+	__be32 device_id;
+	__be16 device_version;
+	__be16 reserved1;
+	__be32 subsys_vendor_id;
+	__be32 subsys_device_id;
+	__be16 io_class;
+	__be16 io_subclass;
+	__be16 protocol;
+	__be16 protocol_version;
+	__be16 service_conn;
+	__be16 initiators_supported;
+	__be16 send_queue_depth;
+	u8 reserved2;
+	u8 rdma_read_depth;
+	__be32 send_size;
+	__be32 rdma_size;
+	u8 op_cap_mask;
+	u8 svc_cap_mask;
+	u8 num_svc_entries;
+	u8 reserved3[9];
+	u8 id_string[64];
+};
+
+struct ib_dm_svc_entry {
+	u8 name[40];
+	__be64 id;
+};
+
+/*
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
+ */
+struct ib_dm_svc_entries {
+	struct ib_dm_svc_entry service_entries[4];
+};
+
+#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
new file mode 100644
index 0000000..cd5d05e
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -0,0 +1,4073 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
+ * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/kthread.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <scsi/scsi_tcq.h>
+#include <target/configfs_macros.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+#include "ib_srpt.h"
+
+/* Name of this kernel module. */
+#define DRV_NAME		"ib_srpt"
+#define DRV_VERSION		"2.0.0"
+#define DRV_RELDATE		"2011-02-14"
+
+#define SRPT_ID_STRING	"Linux SRP target"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRV_NAME " " fmt
+
+MODULE_AUTHOR("Vu Pham and Bart Van Assche");
+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
+		   "v" DRV_VERSION " (" DRV_RELDATE ")");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*
+ * Global Variables
+ */
+
+static u64 srpt_service_guid;
+static spinlock_t srpt_dev_lock;       /* Protects srpt_dev_list. */
+static struct list_head srpt_dev_list; /* List of srpt_device structures. */
+
+static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
+module_param(srp_max_req_size, int, 0444);
+MODULE_PARM_DESC(srp_max_req_size,
+		 "Maximum size of SRP request messages in bytes.");
+
+static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
+module_param(srpt_srq_size, int, 0444);
+MODULE_PARM_DESC(srpt_srq_size,
+		 "Shared receive queue (SRQ) size.");
+
+static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
+{
+	return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+}
+module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
+		  0444);
+MODULE_PARM_DESC(srpt_service_guid,
+		 "Using this value for ioc_guid, id_ext, and cm_listen_id"
+		 " instead of using the node_guid of the first HCA.");
+
+static struct ib_client srpt_client;
+static struct target_fabric_configfs *srpt_target;
+static void srpt_release_channel(struct srpt_rdma_ch *ch);
+static int srpt_queue_status(struct se_cmd *cmd);
+
+/**
+ * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
+ */
+static inline
+enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_TO_DEVICE:	return DMA_FROM_DEVICE;
+	case DMA_FROM_DEVICE:	return DMA_TO_DEVICE;
+	default:		return dir;
+	}
+}
+
+/**
+ * srpt_sdev_name() - Return the name associated with the HCA.
+ *
+ * Examples are ib0, ib1, ...
+ */
+static inline const char *srpt_sdev_name(struct srpt_device *sdev)
+{
+	return sdev->device->name;
+}
+
+static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
+{
+	unsigned long flags;
+	enum rdma_ch_state state;
+
+	spin_lock_irqsave(&ch->spinlock, flags);
+	state = ch->state;
+	spin_unlock_irqrestore(&ch->spinlock, flags);
+	return state;
+}
+
+static enum rdma_ch_state
+srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
+{
+	unsigned long flags;
+	enum rdma_ch_state prev;
+
+	spin_lock_irqsave(&ch->spinlock, flags);
+	prev = ch->state;
+	ch->state = new_state;
+	spin_unlock_irqrestore(&ch->spinlock, flags);
+	return prev;
+}
+
+/**
+ * srpt_test_and_set_ch_state() - Test and set the channel state.
+ *
+ * Returns true if and only if the channel state has been set to the new state.
+ */
+static bool
+srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
+			   enum rdma_ch_state new)
+{
+	unsigned long flags;
+	enum rdma_ch_state prev;
+
+	spin_lock_irqsave(&ch->spinlock, flags);
+	prev = ch->state;
+	if (prev == old)
+		ch->state = new;
+	spin_unlock_irqrestore(&ch->spinlock, flags);
+	return prev == old;
+}
+
+/**
+ * srpt_event_handler() - Asynchronous IB event callback function.
+ *
+ * Callback function called by the InfiniBand core when an asynchronous IB
+ * event occurs. This callback may occur in interrupt context. See also
+ * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
+ * Architecture Specification.
+ */
+static void srpt_event_handler(struct ib_event_handler *handler,
+			       struct ib_event *event)
+{
+	struct srpt_device *sdev;
+	struct srpt_port *sport;
+
+	sdev = ib_get_client_data(event->device, &srpt_client);
+	if (!sdev || sdev->device != event->device)
+		return;
+
+	pr_debug("ASYNC event= %d on device= %s\n", event->event,
+		 srpt_sdev_name(sdev));
+
+	switch (event->event) {
+	case IB_EVENT_PORT_ERR:
+		if (event->element.port_num <= sdev->device->phys_port_cnt) {
+			sport = &sdev->port[event->element.port_num - 1];
+			sport->lid = 0;
+			sport->sm_lid = 0;
+		}
+		break;
+	case IB_EVENT_PORT_ACTIVE:
+	case IB_EVENT_LID_CHANGE:
+	case IB_EVENT_PKEY_CHANGE:
+	case IB_EVENT_SM_CHANGE:
+	case IB_EVENT_CLIENT_REREGISTER:
+		/* Refresh port data asynchronously. */
+		if (event->element.port_num <= sdev->device->phys_port_cnt) {
+			sport = &sdev->port[event->element.port_num - 1];
+			if (!sport->lid && !sport->sm_lid)
+				schedule_work(&sport->work);
+		}
+		break;
+	default:
+		printk(KERN_ERR "received unrecognized IB event %d\n",
+		       event->event);
+		break;
+	}
+}
+
+/**
+ * srpt_srq_event() - SRQ event callback function.
+ */
+static void srpt_srq_event(struct ib_event *event, void *ctx)
+{
+	printk(KERN_INFO "SRQ event %d\n", event->event);
+}
+
+/**
+ * srpt_qp_event() - QP event callback function.
+ */
+static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+{
+	pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
+		 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
+
+	switch (event->event) {
+	case IB_EVENT_COMM_EST:
+		ib_cm_notify(ch->cm_id, event->event);
+		break;
+	case IB_EVENT_QP_LAST_WQE_REACHED:
+		if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
+					       CH_RELEASING))
+			srpt_release_channel(ch);
+		else
+			pr_debug("%s: state %d - ignored LAST_WQE.\n",
+				 ch->sess_name, srpt_get_ch_state(ch));
+		break;
+	default:
+		printk(KERN_ERR "received unrecognized IB QP event %d\n",
+		       event->event);
+		break;
+	}
+}
+
+/**
+ * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
+ *
+ * @slot: one-based slot number.
+ * @value: four-bit value.
+ *
+ * Copies the lowest four bits of value in element slot of the array of four
+ * bit elements called c_list (controller list). The index slot is one-based.
+ */
+static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
+{
+	u16 id;
+	u8 tmp;
+
+	id = (slot - 1) / 2;
+	if (slot & 0x1) {
+		tmp = c_list[id] & 0xf;
+		c_list[id] = (value << 4) | tmp;
+	} else {
+		tmp = c_list[id] & 0xf0;
+		c_list[id] = (value & 0xf) | tmp;
+	}
+}
+
+/**
+ * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
+ *
+ * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
+ * Specification.
+ */
+static void srpt_get_class_port_info(struct ib_dm_mad *mad)
+{
+	struct ib_class_port_info *cif;
+
+	cif = (struct ib_class_port_info *)mad->data;
+	memset(cif, 0, sizeof *cif);
+	cif->base_version = 1;
+	cif->class_version = 1;
+	cif->resp_time_value = 20;
+
+	mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_iou() - Write IOUnitInfo to a management datagram.
+ *
+ * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.6 in the SRP r16a document.
+ */
+static void srpt_get_iou(struct ib_dm_mad *mad)
+{
+	struct ib_dm_iou_info *ioui;
+	u8 slot;
+	int i;
+
+	ioui = (struct ib_dm_iou_info *)mad->data;
+	ioui->change_id = __constant_cpu_to_be16(1);
+	ioui->max_controllers = 16;
+
+	/* set present for slot 1 and empty for the rest */
+	srpt_set_ioc(ioui->controller_list, 1, 1);
+	for (i = 1, slot = 2; i < 16; i++, slot++)
+		srpt_set_ioc(ioui->controller_list, slot, 0);
+
+	mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
+ *
+ * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
+ * Architecture Specification. See also section B.7, table B.7 in the SRP
+ * r16a document.
+ */
+static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
+			 struct ib_dm_mad *mad)
+{
+	struct srpt_device *sdev = sport->sdev;
+	struct ib_dm_ioc_profile *iocp;
+
+	iocp = (struct ib_dm_ioc_profile *)mad->data;
+
+	if (!slot || slot > 16) {
+		mad->mad_hdr.status
+			= __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+		return;
+	}
+
+	if (slot > 2) {
+		mad->mad_hdr.status
+			= __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+		return;
+	}
+
+	memset(iocp, 0, sizeof *iocp);
+	strcpy(iocp->id_string, SRPT_ID_STRING);
+	iocp->guid = cpu_to_be64(srpt_service_guid);
+	iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+	iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
+	iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
+	iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+	iocp->subsys_device_id = 0x0;
+	iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+	iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
+	iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
+	iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+	iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
+	iocp->rdma_read_depth = 4;
+	iocp->send_size = cpu_to_be32(srp_max_req_size);
+	iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
+					  1U << 24));
+	iocp->num_svc_entries = 1;
+	iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
+		SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
+
+	mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
+ *
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the SRP r16a document.
+ */
+static void srpt_get_svc_entries(u64 ioc_guid,
+				 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
+{
+	struct ib_dm_svc_entries *svc_entries;
+
+	WARN_ON(!ioc_guid);
+
+	if (!slot || slot > 16) {
+		mad->mad_hdr.status
+			= __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+		return;
+	}
+
+	if (slot > 2 || lo > hi || hi > 1) {
+		mad->mad_hdr.status
+			= __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+		return;
+	}
+
+	svc_entries = (struct ib_dm_svc_entries *)mad->data;
+	memset(svc_entries, 0, sizeof *svc_entries);
+	svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
+	snprintf(svc_entries->service_entries[0].name,
+		 sizeof(svc_entries->service_entries[0].name),
+		 "%s%016llx",
+		 SRP_SERVICE_NAME_PREFIX,
+		 ioc_guid);
+
+	mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_mgmt_method_get() - Process a received management datagram.
+ * @sp:      source port through which the MAD has been received.
+ * @rq_mad:  received MAD.
+ * @rsp_mad: response MAD.
+ */
+static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
+				 struct ib_dm_mad *rsp_mad)
+{
+	u16 attr_id;
+	u32 slot;
+	u8 hi, lo;
+
+	attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
+	switch (attr_id) {
+	case DM_ATTR_CLASS_PORT_INFO:
+		srpt_get_class_port_info(rsp_mad);
+		break;
+	case DM_ATTR_IOU_INFO:
+		srpt_get_iou(rsp_mad);
+		break;
+	case DM_ATTR_IOC_PROFILE:
+		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+		srpt_get_ioc(sp, slot, rsp_mad);
+		break;
+	case DM_ATTR_SVC_ENTRIES:
+		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+		hi = (u8) ((slot >> 8) & 0xff);
+		lo = (u8) (slot & 0xff);
+		slot = (u16) ((slot >> 16) & 0xffff);
+		srpt_get_svc_entries(srpt_service_guid,
+				     slot, hi, lo, rsp_mad);
+		break;
+	default:
+		rsp_mad->mad_hdr.status =
+		    __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+		break;
+	}
+}
+
+/**
+ * srpt_mad_send_handler() - Post MAD-send callback function.
+ */
+static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
+				  struct ib_mad_send_wc *mad_wc)
+{
+	ib_destroy_ah(mad_wc->send_buf->ah);
+	ib_free_send_mad(mad_wc->send_buf);
+}
+
+/**
+ * srpt_mad_recv_handler() - MAD reception callback function.
+ */
+static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
+				  struct ib_mad_recv_wc *mad_wc)
+{
+	struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
+	struct ib_ah *ah;
+	struct ib_mad_send_buf *rsp;
+	struct ib_dm_mad *dm_mad;
+
+	if (!mad_wc || !mad_wc->recv_buf.mad)
+		return;
+
+	ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
+				  mad_wc->recv_buf.grh, mad_agent->port_num);
+	if (IS_ERR(ah))
+		goto err;
+
+	BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
+
+	rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
+				 mad_wc->wc->pkey_index, 0,
+				 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
+				 GFP_KERNEL);
+	if (IS_ERR(rsp))
+		goto err_rsp;
+
+	rsp->ah = ah;
+
+	dm_mad = rsp->mad;
+	memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
+	dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+	dm_mad->mad_hdr.status = 0;
+
+	switch (mad_wc->recv_buf.mad->mad_hdr.method) {
+	case IB_MGMT_METHOD_GET:
+		srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
+		break;
+	case IB_MGMT_METHOD_SET:
+		dm_mad->mad_hdr.status =
+		    __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+		break;
+	default:
+		dm_mad->mad_hdr.status =
+		    __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+		break;
+	}
+
+	if (!ib_post_send_mad(rsp, NULL)) {
+		ib_free_recv_mad(mad_wc);
+		/* will destroy_ah & free_send_mad in send completion */
+		return;
+	}
+
+	ib_free_send_mad(rsp);
+
+err_rsp:
+	ib_destroy_ah(ah);
+err:
+	ib_free_recv_mad(mad_wc);
+}
+
+/**
+ * srpt_refresh_port() - Configure a HCA port.
+ *
+ * Enable InfiniBand management datagram processing, update the cached sm_lid,
+ * lid and gid values, and register a callback function for processing MADs
+ * on the specified port.
+ *
+ * Note: It is safe to call this function more than once for the same port.
+ */
+static int srpt_refresh_port(struct srpt_port *sport)
+{
+	struct ib_mad_reg_req reg_req;
+	struct ib_port_modify port_modify;
+	struct ib_port_attr port_attr;
+	int ret;
+
+	memset(&port_modify, 0, sizeof port_modify);
+	port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+	port_modify.clr_port_cap_mask = 0;
+
+	ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+	if (ret)
+		goto err_mod_port;
+
+	ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
+	if (ret)
+		goto err_query_port;
+
+	sport->sm_lid = port_attr.sm_lid;
+	sport->lid = port_attr.lid;
+
+	ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
+	if (ret)
+		goto err_query_port;
+
+	if (!sport->mad_agent) {
+		memset(&reg_req, 0, sizeof reg_req);
+		reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
+		reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
+		set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+		set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+
+		sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
+							 sport->port,
+							 IB_QPT_GSI,
+							 &reg_req, 0,
+							 srpt_mad_send_handler,
+							 srpt_mad_recv_handler,
+							 sport);
+		if (IS_ERR(sport->mad_agent)) {
+			ret = PTR_ERR(sport->mad_agent);
+			sport->mad_agent = NULL;
+			goto err_query_port;
+		}
+	}
+
+	return 0;
+
+err_query_port:
+
+	port_modify.set_port_cap_mask = 0;
+	port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+	ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+
+err_mod_port:
+
+	return ret;
+}
+
+/**
+ * srpt_unregister_mad_agent() - Unregister MAD callback functions.
+ *
+ * Note: It is safe to call this function more than once for the same device.
+ */
+static void srpt_unregister_mad_agent(struct srpt_device *sdev)
+{
+	struct ib_port_modify port_modify = {
+		.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
+	};
+	struct srpt_port *sport;
+	int i;
+
+	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+		sport = &sdev->port[i - 1];
+		WARN_ON(sport->port != i);
+		if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
+			printk(KERN_ERR "disabling MAD processing failed.\n");
+		if (sport->mad_agent) {
+			ib_unregister_mad_agent(sport->mad_agent);
+			sport->mad_agent = NULL;
+		}
+	}
+}
+
+/**
+ * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
+ */
+static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
+					   int ioctx_size, int dma_size,
+					   enum dma_data_direction dir)
+{
+	struct srpt_ioctx *ioctx;
+
+	ioctx = kmalloc(ioctx_size, GFP_KERNEL);
+	if (!ioctx)
+		goto err;
+
+	ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
+	if (!ioctx->buf)
+		goto err_free_ioctx;
+
+	ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
+	if (ib_dma_mapping_error(sdev->device, ioctx->dma))
+		goto err_free_buf;
+
+	return ioctx;
+
+err_free_buf:
+	kfree(ioctx->buf);
+err_free_ioctx:
+	kfree(ioctx);
+err:
+	return NULL;
+}
+
+/**
+ * srpt_free_ioctx() - Free an SRPT I/O context structure.
+ */
+static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
+			    int dma_size, enum dma_data_direction dir)
+{
+	if (!ioctx)
+		return;
+
+	ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
+	kfree(ioctx->buf);
+	kfree(ioctx);
+}
+
+/**
+ * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
+ * @sdev:       Device to allocate the I/O context ring for.
+ * @ring_size:  Number of elements in the I/O context ring.
+ * @ioctx_size: I/O context size.
+ * @dma_size:   DMA buffer size.
+ * @dir:        DMA data direction.
+ */
+static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
+				int ring_size, int ioctx_size,
+				int dma_size, enum dma_data_direction dir)
+{
+	struct srpt_ioctx **ring;
+	int i;
+
+	WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
+		&& ioctx_size != sizeof(struct srpt_send_ioctx));
+
+	ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
+	if (!ring)
+		goto out;
+	for (i = 0; i < ring_size; ++i) {
+		ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
+		if (!ring[i])
+			goto err;
+		ring[i]->index = i;
+	}
+	goto out;
+
+err:
+	while (--i >= 0)
+		srpt_free_ioctx(sdev, ring[i], dma_size, dir);
+	kfree(ring);
+out:
+	return ring;
+}
+
+/**
+ * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
+ */
+static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
+				 struct srpt_device *sdev, int ring_size,
+				 int dma_size, enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < ring_size; ++i)
+		srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
+	kfree(ioctx_ring);
+}
+
+/**
+ * srpt_get_cmd_state() - Get the state of a SCSI command.
+ */
+static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
+{
+	enum srpt_command_state state;
+	unsigned long flags;
+
+	BUG_ON(!ioctx);
+
+	spin_lock_irqsave(&ioctx->spinlock, flags);
+	state = ioctx->state;
+	spin_unlock_irqrestore(&ioctx->spinlock, flags);
+	return state;
+}
+
+/**
+ * srpt_set_cmd_state() - Set the state of a SCSI command.
+ *
+ * Does not modify the state of aborted commands. Returns the previous command
+ * state.
+ */
+static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
+						  enum srpt_command_state new)
+{
+	enum srpt_command_state previous;
+	unsigned long flags;
+
+	BUG_ON(!ioctx);
+
+	spin_lock_irqsave(&ioctx->spinlock, flags);
+	previous = ioctx->state;
+	if (previous != SRPT_STATE_DONE)
+		ioctx->state = new;
+	spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+	return previous;
+}
+
+/**
+ * srpt_test_and_set_cmd_state() - Test and set the state of a command.
+ *
+ * Returns true if and only if the previous command state was equal to 'old'.
+ */
+static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
+					enum srpt_command_state old,
+					enum srpt_command_state new)
+{
+	enum srpt_command_state previous;
+	unsigned long flags;
+
+	WARN_ON(!ioctx);
+	WARN_ON(old == SRPT_STATE_DONE);
+	WARN_ON(new == SRPT_STATE_NEW);
+
+	spin_lock_irqsave(&ioctx->spinlock, flags);
+	previous = ioctx->state;
+	if (previous == old)
+		ioctx->state = new;
+	spin_unlock_irqrestore(&ioctx->spinlock, flags);
+	return previous == old;
+}
+
+/**
+ * srpt_post_recv() - Post an IB receive request.
+ */
+static int srpt_post_recv(struct srpt_device *sdev,
+			  struct srpt_recv_ioctx *ioctx)
+{
+	struct ib_sge list;
+	struct ib_recv_wr wr, *bad_wr;
+
+	BUG_ON(!sdev);
+	wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
+
+	list.addr = ioctx->ioctx.dma;
+	list.length = srp_max_req_size;
+	list.lkey = sdev->mr->lkey;
+
+	wr.next = NULL;
+	wr.sg_list = &list;
+	wr.num_sge = 1;
+
+	return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+}
+
+/**
+ * srpt_post_send() - Post an IB send request.
+ *
+ * Returns zero upon success and a non-zero value upon failure.
+ */
+static int srpt_post_send(struct srpt_rdma_ch *ch,
+			  struct srpt_send_ioctx *ioctx, int len)
+{
+	struct ib_sge list;
+	struct ib_send_wr wr, *bad_wr;
+	struct srpt_device *sdev = ch->sport->sdev;
+	int ret;
+
+	atomic_inc(&ch->req_lim);
+
+	ret = -ENOMEM;
+	if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
+		printk(KERN_WARNING "IB send queue full (needed 1)\n");
+		goto out;
+	}
+
+	ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
+				      DMA_TO_DEVICE);
+
+	list.addr = ioctx->ioctx.dma;
+	list.length = len;
+	list.lkey = sdev->mr->lkey;
+
+	wr.next = NULL;
+	wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
+	wr.sg_list = &list;
+	wr.num_sge = 1;
+	wr.opcode = IB_WR_SEND;
+	wr.send_flags = IB_SEND_SIGNALED;
+
+	ret = ib_post_send(ch->qp, &wr, &bad_wr);
+
+out:
+	if (ret < 0) {
+		atomic_inc(&ch->sq_wr_avail);
+		atomic_dec(&ch->req_lim);
+	}
+	return ret;
+}
+
+/**
+ * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
+ * @ioctx: Pointer to the I/O context associated with the request.
+ * @srp_cmd: Pointer to the SRP_CMD request data.
+ * @dir: Pointer to the variable to which the transfer direction will be
+ *   written.
+ * @data_len: Pointer to the variable to which the total data length of all
+ *   descriptors in the SRP_CMD request will be written.
+ *
+ * This function initializes ioctx->nrbuf and ioctx->r_bufs.
+ *
+ * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
+ * -ENOMEM when memory allocation fails and zero upon success.
+ */
+static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
+			     struct srp_cmd *srp_cmd,
+			     enum dma_data_direction *dir, u64 *data_len)
+{
+	struct srp_indirect_buf *idb;
+	struct srp_direct_buf *db;
+	unsigned add_cdb_offset;
+	int ret;
+
+	/*
+	 * The pointer computations below will only be compiled correctly
+	 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+	 * whether srp_cmd::add_data has been declared as a byte pointer.
+	 */
+	BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
+		     && !__same_type(srp_cmd->add_data[0], (u8)0));
+
+	BUG_ON(!dir);
+	BUG_ON(!data_len);
+
+	ret = 0;
+	*data_len = 0;
+
+	/*
+	 * The lower four bits of the buffer format field contain the DATA-IN
+	 * buffer descriptor format, and the highest four bits contain the
+	 * DATA-OUT buffer descriptor format.
+	 */
+	*dir = DMA_NONE;
+	if (srp_cmd->buf_fmt & 0xf)
+		/* DATA-IN: transfer data from target to initiator (read). */
+		*dir = DMA_FROM_DEVICE;
+	else if (srp_cmd->buf_fmt >> 4)
+		/* DATA-OUT: transfer data from initiator to target (write). */
+		*dir = DMA_TO_DEVICE;
+
+	/*
+	 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
+	 * CDB LENGTH' field are reserved and the size in bytes of this field
+	 * is four times the value specified in bits 3..7. Hence the "& ~3".
+	 */
+	add_cdb_offset = srp_cmd->add_cdb_len & ~3;
+	if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
+	    ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
+		ioctx->n_rbuf = 1;
+		ioctx->rbufs = &ioctx->single_rbuf;
+
+		db = (struct srp_direct_buf *)(srp_cmd->add_data
+					       + add_cdb_offset);
+		memcpy(ioctx->rbufs, db, sizeof *db);
+		*data_len = be32_to_cpu(db->len);
+	} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
+		   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
+		idb = (struct srp_indirect_buf *)(srp_cmd->add_data
+						  + add_cdb_offset);
+
+		ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
+
+		if (ioctx->n_rbuf >
+		    (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
+			printk(KERN_ERR "received unsupported SRP_CMD request"
+			       " type (%u out + %u in != %u / %zu)\n",
+			       srp_cmd->data_out_desc_cnt,
+			       srp_cmd->data_in_desc_cnt,
+			       be32_to_cpu(idb->table_desc.len),
+			       sizeof(*db));
+			ioctx->n_rbuf = 0;
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (ioctx->n_rbuf == 1)
+			ioctx->rbufs = &ioctx->single_rbuf;
+		else {
+			ioctx->rbufs =
+				kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
+			if (!ioctx->rbufs) {
+				ioctx->n_rbuf = 0;
+				ret = -ENOMEM;
+				goto out;
+			}
+		}
+
+		db = idb->desc_list;
+		memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
+		*data_len = be32_to_cpu(idb->len);
+	}
+out:
+	return ret;
+}
+
+/**
+ * srpt_init_ch_qp() - Initialize queue pair attributes.
+ *
+ * Initialized the attributes of queue pair 'qp' by allowing local write,
+ * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
+ */
+static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+	struct ib_qp_attr *attr;
+	int ret;
+
+	attr = kzalloc(sizeof *attr, GFP_KERNEL);
+	if (!attr)
+		return -ENOMEM;
+
+	attr->qp_state = IB_QPS_INIT;
+	attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
+	    IB_ACCESS_REMOTE_WRITE;
+	attr->port_num = ch->sport->port;
+	attr->pkey_index = 0;
+
+	ret = ib_modify_qp(qp, attr,
+			   IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
+			   IB_QP_PKEY_INDEX);
+
+	kfree(attr);
+	return ret;
+}
+
+/**
+ * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+	struct ib_qp_attr qp_attr;
+	int attr_mask;
+	int ret;
+
+	qp_attr.qp_state = IB_QPS_RTR;
+	ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+	if (ret)
+		goto out;
+
+	qp_attr.max_dest_rd_atomic = 4;
+
+	ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+	return ret;
+}
+
+/**
+ * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+	struct ib_qp_attr qp_attr;
+	int attr_mask;
+	int ret;
+
+	qp_attr.qp_state = IB_QPS_RTS;
+	ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+	if (ret)
+		goto out;
+
+	qp_attr.max_rd_atomic = 4;
+
+	ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+	return ret;
+}
+
+/**
+ * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
+ */
+static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
+{
+	struct ib_qp_attr qp_attr;
+
+	qp_attr.qp_state = IB_QPS_ERR;
+	return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
+}
+
+/**
+ * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
+ */
+static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+				    struct srpt_send_ioctx *ioctx)
+{
+	struct scatterlist *sg;
+	enum dma_data_direction dir;
+
+	BUG_ON(!ch);
+	BUG_ON(!ioctx);
+	BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
+
+	while (ioctx->n_rdma)
+		kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
+
+	kfree(ioctx->rdma_ius);
+	ioctx->rdma_ius = NULL;
+
+	if (ioctx->mapped_sg_count) {
+		sg = ioctx->sg;
+		WARN_ON(!sg);
+		dir = ioctx->cmd.data_direction;
+		BUG_ON(dir == DMA_NONE);
+		ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
+				opposite_dma_dir(dir));
+		ioctx->mapped_sg_count = 0;
+	}
+}
+
+/**
+ * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
+ */
+static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+				 struct srpt_send_ioctx *ioctx)
+{
+	struct se_cmd *cmd;
+	struct scatterlist *sg, *sg_orig;
+	int sg_cnt;
+	enum dma_data_direction dir;
+	struct rdma_iu *riu;
+	struct srp_direct_buf *db;
+	dma_addr_t dma_addr;
+	struct ib_sge *sge;
+	u64 raddr;
+	u32 rsize;
+	u32 tsize;
+	u32 dma_len;
+	int count, nrdma;
+	int i, j, k;
+
+	BUG_ON(!ch);
+	BUG_ON(!ioctx);
+	cmd = &ioctx->cmd;
+	dir = cmd->data_direction;
+	BUG_ON(dir == DMA_NONE);
+
+	transport_do_task_sg_chain(cmd);
+	ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
+	ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
+
+	count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
+			      opposite_dma_dir(dir));
+	if (unlikely(!count))
+		return -EAGAIN;
+
+	ioctx->mapped_sg_count = count;
+
+	if (ioctx->rdma_ius && ioctx->n_rdma_ius)
+		nrdma = ioctx->n_rdma_ius;
+	else {
+		nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
+			+ ioctx->n_rbuf;
+
+		ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
+		if (!ioctx->rdma_ius)
+			goto free_mem;
+
+		ioctx->n_rdma_ius = nrdma;
+	}
+
+	db = ioctx->rbufs;
+	tsize = cmd->data_length;
+	dma_len = sg_dma_len(&sg[0]);
+	riu = ioctx->rdma_ius;
+
+	/*
+	 * For each remote desc - calculate the #ib_sge.
+	 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
+	 *      each remote desc rdma_iu is required a rdma wr;
+	 * else
+	 *      we need to allocate extra rdma_iu to carry extra #ib_sge in
+	 *      another rdma wr
+	 */
+	for (i = 0, j = 0;
+	     j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+		rsize = be32_to_cpu(db->len);
+		raddr = be64_to_cpu(db->va);
+		riu->raddr = raddr;
+		riu->rkey = be32_to_cpu(db->key);
+		riu->sge_cnt = 0;
+
+		/* calculate how many sge required for this remote_buf */
+		while (rsize > 0 && tsize > 0) {
+
+			if (rsize >= dma_len) {
+				tsize -= dma_len;
+				rsize -= dma_len;
+				raddr += dma_len;
+
+				if (tsize > 0) {
+					++j;
+					if (j < count) {
+						sg = sg_next(sg);
+						dma_len = sg_dma_len(sg);
+					}
+				}
+			} else {
+				tsize -= rsize;
+				dma_len -= rsize;
+				rsize = 0;
+			}
+
+			++riu->sge_cnt;
+
+			if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
+				++ioctx->n_rdma;
+				riu->sge =
+				    kmalloc(riu->sge_cnt * sizeof *riu->sge,
+					    GFP_KERNEL);
+				if (!riu->sge)
+					goto free_mem;
+
+				++riu;
+				riu->sge_cnt = 0;
+				riu->raddr = raddr;
+				riu->rkey = be32_to_cpu(db->key);
+			}
+		}
+
+		++ioctx->n_rdma;
+		riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
+				   GFP_KERNEL);
+		if (!riu->sge)
+			goto free_mem;
+	}
+
+	db = ioctx->rbufs;
+	tsize = cmd->data_length;
+	riu = ioctx->rdma_ius;
+	sg = sg_orig;
+	dma_len = sg_dma_len(&sg[0]);
+	dma_addr = sg_dma_address(&sg[0]);
+
+	/* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
+	for (i = 0, j = 0;
+	     j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+		rsize = be32_to_cpu(db->len);
+		sge = riu->sge;
+		k = 0;
+
+		while (rsize > 0 && tsize > 0) {
+			sge->addr = dma_addr;
+			sge->lkey = ch->sport->sdev->mr->lkey;
+
+			if (rsize >= dma_len) {
+				sge->length =
+					(tsize < dma_len) ? tsize : dma_len;
+				tsize -= dma_len;
+				rsize -= dma_len;
+
+				if (tsize > 0) {
+					++j;
+					if (j < count) {
+						sg = sg_next(sg);
+						dma_len = sg_dma_len(sg);
+						dma_addr = sg_dma_address(sg);
+					}
+				}
+			} else {
+				sge->length = (tsize < rsize) ? tsize : rsize;
+				tsize -= rsize;
+				dma_len -= rsize;
+				dma_addr += rsize;
+				rsize = 0;
+			}
+
+			++k;
+			if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
+				++riu;
+				sge = riu->sge;
+				k = 0;
+			} else if (rsize > 0 && tsize > 0)
+				++sge;
+		}
+	}
+
+	return 0;
+
+free_mem:
+	srpt_unmap_sg_to_ib_sge(ch, ioctx);
+
+	return -ENOMEM;
+}
+
+/**
+ * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
+ */
+static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
+{
+	struct srpt_send_ioctx *ioctx;
+	unsigned long flags;
+
+	BUG_ON(!ch);
+
+	ioctx = NULL;
+	spin_lock_irqsave(&ch->spinlock, flags);
+	if (!list_empty(&ch->free_list)) {
+		ioctx = list_first_entry(&ch->free_list,
+					 struct srpt_send_ioctx, free_list);
+		list_del(&ioctx->free_list);
+	}
+	spin_unlock_irqrestore(&ch->spinlock, flags);
+
+	if (!ioctx)
+		return ioctx;
+
+	BUG_ON(ioctx->ch != ch);
+	kref_init(&ioctx->kref);
+	spin_lock_init(&ioctx->spinlock);
+	ioctx->state = SRPT_STATE_NEW;
+	ioctx->n_rbuf = 0;
+	ioctx->rbufs = NULL;
+	ioctx->n_rdma = 0;
+	ioctx->n_rdma_ius = 0;
+	ioctx->rdma_ius = NULL;
+	ioctx->mapped_sg_count = 0;
+	init_completion(&ioctx->tx_done);
+	ioctx->queue_status_only = false;
+	/*
+	 * transport_init_se_cmd() does not initialize all fields, so do it
+	 * here.
+	 */
+	memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
+	memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
+
+	return ioctx;
+}
+
+/**
+ * srpt_put_send_ioctx() - Free up resources.
+ */
+static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
+{
+	struct srpt_rdma_ch *ch;
+	unsigned long flags;
+
+	BUG_ON(!ioctx);
+	ch = ioctx->ch;
+	BUG_ON(!ch);
+
+	WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
+
+	srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+	transport_generic_free_cmd(&ioctx->cmd, 0);
+
+	if (ioctx->n_rbuf > 1) {
+		kfree(ioctx->rbufs);
+		ioctx->rbufs = NULL;
+		ioctx->n_rbuf = 0;
+	}
+
+	spin_lock_irqsave(&ch->spinlock, flags);
+	list_add(&ioctx->free_list, &ch->free_list);
+	spin_unlock_irqrestore(&ch->spinlock, flags);
+}
+
+static void srpt_put_send_ioctx_kref(struct kref *kref)
+{
+	srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
+}
+
+/**
+ * srpt_abort_cmd() - Abort a SCSI command.
+ * @ioctx:   I/O context associated with the SCSI command.
+ * @context: Preferred execution context.
+ */
+static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+{
+	enum srpt_command_state state;
+	unsigned long flags;
+
+	BUG_ON(!ioctx);
+
+	/*
+	 * If the command is in a state where the target core is waiting for
+	 * the ib_srpt driver, change the state to the next state. Changing
+	 * the state of the command from SRPT_STATE_NEED_DATA to
+	 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
+	 * function a second time.
+	 */
+
+	spin_lock_irqsave(&ioctx->spinlock, flags);
+	state = ioctx->state;
+	switch (state) {
+	case SRPT_STATE_NEED_DATA:
+		ioctx->state = SRPT_STATE_DATA_IN;
+		break;
+	case SRPT_STATE_DATA_IN:
+	case SRPT_STATE_CMD_RSP_SENT:
+	case SRPT_STATE_MGMT_RSP_SENT:
+		ioctx->state = SRPT_STATE_DONE;
+		break;
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+	if (state == SRPT_STATE_DONE)
+		goto out;
+
+	pr_debug("Aborting cmd with state %d and tag %lld\n", state,
+		 ioctx->tag);
+
+	switch (state) {
+	case SRPT_STATE_NEW:
+	case SRPT_STATE_DATA_IN:
+	case SRPT_STATE_MGMT:
+		/*
+		 * Do nothing - defer abort processing until
+		 * srpt_queue_response() is invoked.
+		 */
+		WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
+		break;
+	case SRPT_STATE_NEED_DATA:
+		/* DMA_TO_DEVICE (write) - RDMA read error. */
+		atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+		transport_generic_handle_data(&ioctx->cmd);
+		break;
+	case SRPT_STATE_CMD_RSP_SENT:
+		/*
+		 * SRP_RSP sending failed or the SRP_RSP send completion has
+		 * not been received in time.
+		 */
+		srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+		atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+		kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+		break;
+	case SRPT_STATE_MGMT_RSP_SENT:
+		srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+		kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+		break;
+	default:
+		WARN_ON("ERROR: unexpected command state");
+		break;
+	}
+
+out:
+	return state;
+}
+
+/**
+ * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
+ */
+static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
+{
+	struct srpt_send_ioctx *ioctx;
+	enum srpt_command_state state;
+	struct se_cmd *cmd;
+	u32 index;
+
+	atomic_inc(&ch->sq_wr_avail);
+
+	index = idx_from_wr_id(wr_id);
+	ioctx = ch->ioctx_ring[index];
+	state = srpt_get_cmd_state(ioctx);
+	cmd = &ioctx->cmd;
+
+	WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+		&& state != SRPT_STATE_MGMT_RSP_SENT
+		&& state != SRPT_STATE_NEED_DATA
+		&& state != SRPT_STATE_DONE);
+
+	/* If SRP_RSP sending failed, undo the ch->req_lim change. */
+	if (state == SRPT_STATE_CMD_RSP_SENT
+	    || state == SRPT_STATE_MGMT_RSP_SENT)
+		atomic_dec(&ch->req_lim);
+
+	srpt_abort_cmd(ioctx);
+}
+
+/**
+ * srpt_handle_send_comp() - Process an IB send completion notification.
+ */
+static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
+				  struct srpt_send_ioctx *ioctx)
+{
+	enum srpt_command_state state;
+
+	atomic_inc(&ch->sq_wr_avail);
+
+	state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+
+	if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+		    && state != SRPT_STATE_MGMT_RSP_SENT
+		    && state != SRPT_STATE_DONE))
+		pr_debug("state = %d\n", state);
+
+	if (state != SRPT_STATE_DONE)
+		kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+	else
+		printk(KERN_ERR "IB completion has been received too late for"
+		       " wr_id = %u.\n", ioctx->ioctx.index);
+}
+
+/**
+ * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
+ *
+ * Note: transport_generic_handle_data() is asynchronous so unmapping the
+ * data that has been transferred via IB RDMA must be postponed until the
+ * check_stop_free() callback.
+ */
+static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
+				  struct srpt_send_ioctx *ioctx,
+				  enum srpt_opcode opcode)
+{
+	WARN_ON(ioctx->n_rdma <= 0);
+	atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+
+	if (opcode == SRPT_RDMA_READ_LAST) {
+		if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
+						SRPT_STATE_DATA_IN))
+			transport_generic_handle_data(&ioctx->cmd);
+		else
+			printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
+			       __LINE__, srpt_get_cmd_state(ioctx));
+	} else if (opcode == SRPT_RDMA_ABORT) {
+		ioctx->rdma_aborted = true;
+	} else {
+		WARN(true, "unexpected opcode %d\n", opcode);
+	}
+}
+
+/**
+ * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
+ */
+static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
+				      struct srpt_send_ioctx *ioctx,
+				      enum srpt_opcode opcode)
+{
+	struct se_cmd *cmd;
+	enum srpt_command_state state;
+
+	cmd = &ioctx->cmd;
+	state = srpt_get_cmd_state(ioctx);
+	switch (opcode) {
+	case SRPT_RDMA_READ_LAST:
+		if (ioctx->n_rdma <= 0) {
+			printk(KERN_ERR "Received invalid RDMA read"
+			       " error completion with idx %d\n",
+			       ioctx->ioctx.index);
+			break;
+		}
+		atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+		if (state == SRPT_STATE_NEED_DATA)
+			srpt_abort_cmd(ioctx);
+		else
+			printk(KERN_ERR "%s[%d]: wrong state = %d\n",
+			       __func__, __LINE__, state);
+		break;
+	case SRPT_RDMA_WRITE_LAST:
+		atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+		break;
+	default:
+		printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
+		       __LINE__, opcode);
+		break;
+	}
+}
+
+/**
+ * srpt_build_cmd_rsp() - Build an SRP_RSP response.
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context associated with the SRP_CMD request. The response will
+ *   be built in the buffer ioctx->buf points at and hence this function will
+ *   overwrite the request data.
+ * @tag: tag of the request for which this response is being generated.
+ * @status: value for the STATUS field of the SRP_RSP information unit.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response. See also SPC-2 for more information about sense data.
+ */
+static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
+			      struct srpt_send_ioctx *ioctx, u64 tag,
+			      int status)
+{
+	struct srp_rsp *srp_rsp;
+	const u8 *sense_data;
+	int sense_data_len, max_sense_len;
+
+	/*
+	 * The lowest bit of all SAM-3 status codes is zero (see also
+	 * paragraph 5.3 in SAM-3).
+	 */
+	WARN_ON(status & 1);
+
+	srp_rsp = ioctx->ioctx.buf;
+	BUG_ON(!srp_rsp);
+
+	sense_data = ioctx->sense_data;
+	sense_data_len = ioctx->cmd.scsi_sense_length;
+	WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
+
+	memset(srp_rsp, 0, sizeof *srp_rsp);
+	srp_rsp->opcode = SRP_RSP;
+	srp_rsp->req_lim_delta =
+		__constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+	srp_rsp->tag = tag;
+	srp_rsp->status = status;
+
+	if (sense_data_len) {
+		BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
+		max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
+		if (sense_data_len > max_sense_len) {
+			printk(KERN_WARNING "truncated sense data from %d to %d"
+			       " bytes\n", sense_data_len, max_sense_len);
+			sense_data_len = max_sense_len;
+		}
+
+		srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
+		srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
+		memcpy(srp_rsp + 1, sense_data, sense_data_len);
+	}
+
+	return sizeof(*srp_rsp) + sense_data_len;
+}
+
+/**
+ * srpt_build_tskmgmt_rsp() - Build a task management response.
+ * @ch:       RDMA channel through which the request has been received.
+ * @ioctx:    I/O context in which the SRP_RSP response will be built.
+ * @rsp_code: RSP_CODE that will be stored in the response.
+ * @tag:      Tag of the request for which this response is being generated.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response.
+ */
+static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
+				  struct srpt_send_ioctx *ioctx,
+				  u8 rsp_code, u64 tag)
+{
+	struct srp_rsp *srp_rsp;
+	int resp_data_len;
+	int resp_len;
+
+	resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
+	resp_len = sizeof(*srp_rsp) + resp_data_len;
+
+	srp_rsp = ioctx->ioctx.buf;
+	BUG_ON(!srp_rsp);
+	memset(srp_rsp, 0, sizeof *srp_rsp);
+
+	srp_rsp->opcode = SRP_RSP;
+	srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
+				    + atomic_xchg(&ch->req_lim_delta, 0));
+	srp_rsp->tag = tag;
+
+	if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
+		srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+		srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
+		srp_rsp->data[3] = rsp_code;
+	}
+
+	return resp_len;
+}
+
+#define NO_SUCH_LUN ((uint64_t)-1LL)
+
+/*
+ * SCSI LUN addressing method. See also SAM-2 and the section about
+ * eight byte LUNs.
+ */
+enum scsi_lun_addr_method {
+	SCSI_LUN_ADDR_METHOD_PERIPHERAL   = 0,
+	SCSI_LUN_ADDR_METHOD_FLAT         = 1,
+	SCSI_LUN_ADDR_METHOD_LUN          = 2,
+	SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
+};
+
+/*
+ * srpt_unpack_lun() - Convert from network LUN to linear LUN.
+ *
+ * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
+ * order (big endian) to a linear LUN. Supports three LUN addressing methods:
+ * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
+ */
+static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
+{
+	uint64_t res = NO_SUCH_LUN;
+	int addressing_method;
+
+	if (unlikely(len < 2)) {
+		printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
+		       "more", len);
+		goto out;
+	}
+
+	switch (len) {
+	case 8:
+		if ((*((__be64 *)lun) &
+		     __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+			goto out_err;
+		break;
+	case 4:
+		if (*((__be16 *)&lun[2]) != 0)
+			goto out_err;
+		break;
+	case 6:
+		if (*((__be32 *)&lun[2]) != 0)
+			goto out_err;
+		break;
+	case 2:
+		break;
+	default:
+		goto out_err;
+	}
+
+	addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
+	switch (addressing_method) {
+	case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
+	case SCSI_LUN_ADDR_METHOD_FLAT:
+	case SCSI_LUN_ADDR_METHOD_LUN:
+		res = *(lun + 1) | (((*lun) & 0x3f) << 8);
+		break;
+
+	case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
+	default:
+		printk(KERN_ERR "Unimplemented LUN addressing method %u",
+		       addressing_method);
+		break;
+	}
+
+out:
+	return res;
+
+out_err:
+	printk(KERN_ERR "Support for multi-level LUNs has not yet been"
+	       " implemented");
+	goto out;
+}
+
+static int srpt_check_stop_free(struct se_cmd *cmd)
+{
+	struct srpt_send_ioctx *ioctx;
+
+	ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+	return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+}
+
+/**
+ * srpt_handle_cmd() - Process SRP_CMD.
+ */
+static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
+			   struct srpt_recv_ioctx *recv_ioctx,
+			   struct srpt_send_ioctx *send_ioctx)
+{
+	struct se_cmd *cmd;
+	struct srp_cmd *srp_cmd;
+	uint64_t unpacked_lun;
+	u64 data_len;
+	enum dma_data_direction dir;
+	int ret;
+
+	BUG_ON(!send_ioctx);
+
+	srp_cmd = recv_ioctx->ioctx.buf;
+	kref_get(&send_ioctx->kref);
+	cmd = &send_ioctx->cmd;
+	send_ioctx->tag = srp_cmd->tag;
+
+	switch (srp_cmd->task_attr) {
+	case SRP_CMD_SIMPLE_Q:
+		cmd->sam_task_attr = MSG_SIMPLE_TAG;
+		break;
+	case SRP_CMD_ORDERED_Q:
+	default:
+		cmd->sam_task_attr = MSG_ORDERED_TAG;
+		break;
+	case SRP_CMD_HEAD_OF_Q:
+		cmd->sam_task_attr = MSG_HEAD_TAG;
+		break;
+	case SRP_CMD_ACA:
+		cmd->sam_task_attr = MSG_ACA_TAG;
+		break;
+	}
+
+	ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
+	if (ret) {
+		printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
+		       srp_cmd->tag);
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		goto send_sense;
+	}
+
+	cmd->data_length = data_len;
+	cmd->data_direction = dir;
+	unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
+				       sizeof(srp_cmd->lun));
+	if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0)
+		goto send_sense;
+	ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
+	if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+		srpt_queue_status(cmd);
+	else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)
+		goto send_sense;
+	else
+		WARN_ON_ONCE(ret);
+
+	transport_handle_cdb_direct(cmd);
+	return 0;
+
+send_sense:
+	transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
+						 0);
+	return -1;
+}
+
+/**
+ * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
+ * @ch: RDMA channel of the task management request.
+ * @fn: Task management function to perform.
+ * @req_tag: Tag of the SRP task management request.
+ * @mgmt_ioctx: I/O context of the task management request.
+ *
+ * Returns zero if the target core will process the task management
+ * request asynchronously.
+ *
+ * Note: It is assumed that the initiator serializes tag-based task management
+ * requests.
+ */
+static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
+{
+	struct srpt_device *sdev;
+	struct srpt_rdma_ch *ch;
+	struct srpt_send_ioctx *target;
+	int ret, i;
+
+	ret = -EINVAL;
+	ch = ioctx->ch;
+	BUG_ON(!ch);
+	BUG_ON(!ch->sport);
+	sdev = ch->sport->sdev;
+	BUG_ON(!sdev);
+	spin_lock_irq(&sdev->spinlock);
+	for (i = 0; i < ch->rq_size; ++i) {
+		target = ch->ioctx_ring[i];
+		if (target->cmd.se_lun == ioctx->cmd.se_lun &&
+		    target->tag == tag &&
+		    srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
+			ret = 0;
+			/* now let the target core abort &target->cmd; */
+			break;
+		}
+	}
+	spin_unlock_irq(&sdev->spinlock);
+	return ret;
+}
+
+static int srp_tmr_to_tcm(int fn)
+{
+	switch (fn) {
+	case SRP_TSK_ABORT_TASK:
+		return TMR_ABORT_TASK;
+	case SRP_TSK_ABORT_TASK_SET:
+		return TMR_ABORT_TASK_SET;
+	case SRP_TSK_CLEAR_TASK_SET:
+		return TMR_CLEAR_TASK_SET;
+	case SRP_TSK_LUN_RESET:
+		return TMR_LUN_RESET;
+	case SRP_TSK_CLEAR_ACA:
+		return TMR_CLEAR_ACA;
+	default:
+		return -1;
+	}
+}
+
+/**
+ * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
+ *
+ * Returns 0 if and only if the request will be processed by the target core.
+ *
+ * For more information about SRP_TSK_MGMT information units, see also section
+ * 6.7 in the SRP r16a document.
+ */
+static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+				 struct srpt_recv_ioctx *recv_ioctx,
+				 struct srpt_send_ioctx *send_ioctx)
+{
+	struct srp_tsk_mgmt *srp_tsk;
+	struct se_cmd *cmd;
+	uint64_t unpacked_lun;
+	int tcm_tmr;
+	int res;
+
+	BUG_ON(!send_ioctx);
+
+	srp_tsk = recv_ioctx->ioctx.buf;
+	cmd = &send_ioctx->cmd;
+
+	pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
+		 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
+		 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
+
+	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+	send_ioctx->tag = srp_tsk->tag;
+	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+	if (tcm_tmr < 0) {
+		send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		send_ioctx->cmd.se_tmr_req->response =
+			TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+		goto process_tmr;
+	}
+	cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
+	if (!cmd->se_tmr_req) {
+		send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+		goto process_tmr;
+	}
+
+	unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
+				       sizeof(srp_tsk->lun));
+	res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
+	if (res) {
+		pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
+		send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+		goto process_tmr;
+	}
+
+	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
+		srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+
+process_tmr:
+	kref_get(&send_ioctx->kref);
+	if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+		transport_generic_handle_tmr(&send_ioctx->cmd);
+	else
+		transport_send_check_condition_and_sense(cmd,
+						cmd->scsi_sense_reason, 0);
+
+}
+
+/**
+ * srpt_handle_new_iu() - Process a newly received information unit.
+ * @ch:    RDMA channel through which the information unit has been received.
+ * @ioctx: SRPT I/O context associated with the information unit.
+ */
+static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
+			       struct srpt_recv_ioctx *recv_ioctx,
+			       struct srpt_send_ioctx *send_ioctx)
+{
+	struct srp_cmd *srp_cmd;
+	enum rdma_ch_state ch_state;
+
+	BUG_ON(!ch);
+	BUG_ON(!recv_ioctx);
+
+	ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
+				   recv_ioctx->ioctx.dma, srp_max_req_size,
+				   DMA_FROM_DEVICE);
+
+	ch_state = srpt_get_ch_state(ch);
+	if (unlikely(ch_state == CH_CONNECTING)) {
+		list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+		goto out;
+	}
+
+	if (unlikely(ch_state != CH_LIVE))
+		goto out;
+
+	srp_cmd = recv_ioctx->ioctx.buf;
+	if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
+		if (!send_ioctx)
+			send_ioctx = srpt_get_send_ioctx(ch);
+		if (unlikely(!send_ioctx)) {
+			list_add_tail(&recv_ioctx->wait_list,
+				      &ch->cmd_wait_list);
+			goto out;
+		}
+	}
+
+	transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
+			      0, DMA_NONE, MSG_SIMPLE_TAG,
+			      send_ioctx->sense_data);
+
+	switch (srp_cmd->opcode) {
+	case SRP_CMD:
+		srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
+		break;
+	case SRP_TSK_MGMT:
+		srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
+		break;
+	case SRP_I_LOGOUT:
+		printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
+		break;
+	case SRP_CRED_RSP:
+		pr_debug("received SRP_CRED_RSP\n");
+		break;
+	case SRP_AER_RSP:
+		pr_debug("received SRP_AER_RSP\n");
+		break;
+	case SRP_RSP:
+		printk(KERN_ERR "Received SRP_RSP\n");
+		break;
+	default:
+		printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
+		       srp_cmd->opcode);
+		break;
+	}
+
+	srpt_post_recv(ch->sport->sdev, recv_ioctx);
+out:
+	return;
+}
+
+static void srpt_process_rcv_completion(struct ib_cq *cq,
+					struct srpt_rdma_ch *ch,
+					struct ib_wc *wc)
+{
+	struct srpt_device *sdev = ch->sport->sdev;
+	struct srpt_recv_ioctx *ioctx;
+	u32 index;
+
+	index = idx_from_wr_id(wc->wr_id);
+	if (wc->status == IB_WC_SUCCESS) {
+		int req_lim;
+
+		req_lim = atomic_dec_return(&ch->req_lim);
+		if (unlikely(req_lim < 0))
+			printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
+		ioctx = sdev->ioctx_ring[index];
+		srpt_handle_new_iu(ch, ioctx, NULL);
+	} else {
+		printk(KERN_INFO "receiving failed for idx %u with status %d\n",
+		       index, wc->status);
+	}
+}
+
+/**
+ * srpt_process_send_completion() - Process an IB send completion.
+ *
+ * Note: Although this has not yet been observed during tests, at least in
+ * theory it is possible that the srpt_get_send_ioctx() call invoked by
+ * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
+ * value in each response is set to one, and it is possible that this response
+ * makes the initiator send a new request before the send completion for that
+ * response has been processed. This could e.g. happen if the call to
+ * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
+ * if IB retransmission causes generation of the send completion to be
+ * delayed. Incoming information units for which srpt_get_send_ioctx() fails
+ * are queued on cmd_wait_list. The code below processes these delayed
+ * requests one at a time.
+ */
+static void srpt_process_send_completion(struct ib_cq *cq,
+					 struct srpt_rdma_ch *ch,
+					 struct ib_wc *wc)
+{
+	struct srpt_send_ioctx *send_ioctx;
+	uint32_t index;
+	enum srpt_opcode opcode;
+
+	index = idx_from_wr_id(wc->wr_id);
+	opcode = opcode_from_wr_id(wc->wr_id);
+	send_ioctx = ch->ioctx_ring[index];
+	if (wc->status == IB_WC_SUCCESS) {
+		if (opcode == SRPT_SEND)
+			srpt_handle_send_comp(ch, send_ioctx);
+		else {
+			WARN_ON(opcode != SRPT_RDMA_ABORT &&
+				wc->opcode != IB_WC_RDMA_READ);
+			srpt_handle_rdma_comp(ch, send_ioctx, opcode);
+		}
+	} else {
+		if (opcode == SRPT_SEND) {
+			printk(KERN_INFO "sending response for idx %u failed"
+			       " with status %d\n", index, wc->status);
+			srpt_handle_send_err_comp(ch, wc->wr_id);
+		} else if (opcode != SRPT_RDMA_MID) {
+			printk(KERN_INFO "RDMA t %d for idx %u failed with"
+				" status %d", opcode, index, wc->status);
+			srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
+		}
+	}
+
+	while (unlikely(opcode == SRPT_SEND
+			&& !list_empty(&ch->cmd_wait_list)
+			&& srpt_get_ch_state(ch) == CH_LIVE
+			&& (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
+		struct srpt_recv_ioctx *recv_ioctx;
+
+		recv_ioctx = list_first_entry(&ch->cmd_wait_list,
+					      struct srpt_recv_ioctx,
+					      wait_list);
+		list_del(&recv_ioctx->wait_list);
+		srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
+	}
+}
+
+static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
+{
+	struct ib_wc *const wc = ch->wc;
+	int i, n;
+
+	WARN_ON(cq != ch->cq);
+
+	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+	while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
+		for (i = 0; i < n; i++) {
+			if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
+				srpt_process_rcv_completion(cq, ch, &wc[i]);
+			else
+				srpt_process_send_completion(cq, ch, &wc[i]);
+		}
+	}
+}
+
+/**
+ * srpt_completion() - IB completion queue callback function.
+ *
+ * Notes:
+ * - It is guaranteed that a completion handler will never be invoked
+ *   concurrently on two different CPUs for the same completion queue. See also
+ *   Documentation/infiniband/core_locking.txt and the implementation of
+ *   handle_edge_irq() in kernel/irq/chip.c.
+ * - When threaded IRQs are enabled, completion handlers are invoked in thread
+ *   context instead of interrupt context.
+ */
+static void srpt_completion(struct ib_cq *cq, void *ctx)
+{
+	struct srpt_rdma_ch *ch = ctx;
+
+	wake_up_interruptible(&ch->wait_queue);
+}
+
+static int srpt_compl_thread(void *arg)
+{
+	struct srpt_rdma_ch *ch;
+
+	/* Hibernation / freezing of the SRPT kernel thread is not supported. */
+	current->flags |= PF_NOFREEZE;
+
+	ch = arg;
+	BUG_ON(!ch);
+	printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
+	       ch->sess_name, ch->thread->comm, current->pid);
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(ch->wait_queue,
+			(srpt_process_completion(ch->cq, ch),
+			 kthread_should_stop()));
+	}
+	printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
+	       ch->sess_name, ch->thread->comm, current->pid);
+	return 0;
+}
+
+/**
+ * srpt_create_ch_ib() - Create receive and send completion queues.
+ */
+static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+{
+	struct ib_qp_init_attr *qp_init;
+	struct srpt_port *sport = ch->sport;
+	struct srpt_device *sdev = sport->sdev;
+	u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+	int ret;
+
+	WARN_ON(ch->rq_size < 1);
+
+	ret = -ENOMEM;
+	qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
+	if (!qp_init)
+		goto out;
+
+	ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
+			      ch->rq_size + srp_sq_size, 0);
+	if (IS_ERR(ch->cq)) {
+		ret = PTR_ERR(ch->cq);
+		printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
+		       ch->rq_size + srp_sq_size, ret);
+		goto out;
+	}
+
+	qp_init->qp_context = (void *)ch;
+	qp_init->event_handler
+		= (void(*)(struct ib_event *, void*))srpt_qp_event;
+	qp_init->send_cq = ch->cq;
+	qp_init->recv_cq = ch->cq;
+	qp_init->srq = sdev->srq;
+	qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
+	qp_init->qp_type = IB_QPT_RC;
+	qp_init->cap.max_send_wr = srp_sq_size;
+	qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
+
+	ch->qp = ib_create_qp(sdev->pd, qp_init);
+	if (IS_ERR(ch->qp)) {
+		ret = PTR_ERR(ch->qp);
+		printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+		goto err_destroy_cq;
+	}
+
+	atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
+
+	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+		 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
+		 qp_init->cap.max_send_wr, ch->cm_id);
+
+	ret = srpt_init_ch_qp(ch, ch->qp);
+	if (ret)
+		goto err_destroy_qp;
+
+	init_waitqueue_head(&ch->wait_queue);
+
+	pr_debug("creating thread for session %s\n", ch->sess_name);
+
+	ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
+	if (IS_ERR(ch->thread)) {
+		printk(KERN_ERR "failed to create kernel thread %ld\n",
+		       PTR_ERR(ch->thread));
+		ch->thread = NULL;
+		goto err_destroy_qp;
+	}
+
+out:
+	kfree(qp_init);
+	return ret;
+
+err_destroy_qp:
+	ib_destroy_qp(ch->qp);
+err_destroy_cq:
+	ib_destroy_cq(ch->cq);
+	goto out;
+}
+
+static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
+{
+	if (ch->thread)
+		kthread_stop(ch->thread);
+
+	ib_destroy_qp(ch->qp);
+	ib_destroy_cq(ch->cq);
+}
+
+/**
+ * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
+ *
+ * Reset the QP and make sure all resources associated with the channel will
+ * be deallocated at an appropriate time.
+ *
+ * Note: The caller must hold ch->sport->sdev->spinlock.
+ */
+static void __srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+	struct srpt_device *sdev;
+	enum rdma_ch_state prev_state;
+	unsigned long flags;
+
+	sdev = ch->sport->sdev;
+
+	spin_lock_irqsave(&ch->spinlock, flags);
+	prev_state = ch->state;
+	switch (prev_state) {
+	case CH_CONNECTING:
+	case CH_LIVE:
+		ch->state = CH_DISCONNECTING;
+		break;
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&ch->spinlock, flags);
+
+	switch (prev_state) {
+	case CH_CONNECTING:
+		ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
+			       NULL, 0);
+		/* fall through */
+	case CH_LIVE:
+		if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
+			printk(KERN_ERR "sending CM DREQ failed.\n");
+		break;
+	case CH_DISCONNECTING:
+		break;
+	case CH_DRAINING:
+	case CH_RELEASING:
+		break;
+	}
+}
+
+/**
+ * srpt_close_ch() - Close an RDMA channel.
+ */
+static void srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+	struct srpt_device *sdev;
+
+	sdev = ch->sport->sdev;
+	spin_lock_irq(&sdev->spinlock);
+	__srpt_close_ch(ch);
+	spin_unlock_irq(&sdev->spinlock);
+}
+
+/**
+ * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
+ * @cm_id: Pointer to the CM ID of the channel to be drained.
+ *
+ * Note: Must be called from inside srpt_cm_handler to avoid a race between
+ * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
+ * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
+ * waits until all target sessions for the associated IB device have been
+ * unregistered and target session registration involves a call to
+ * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
+ * this function has finished).
+ */
+static void srpt_drain_channel(struct ib_cm_id *cm_id)
+{
+	struct srpt_device *sdev;
+	struct srpt_rdma_ch *ch;
+	int ret;
+	bool do_reset = false;
+
+	WARN_ON_ONCE(irqs_disabled());
+
+	sdev = cm_id->context;
+	BUG_ON(!sdev);
+	spin_lock_irq(&sdev->spinlock);
+	list_for_each_entry(ch, &sdev->rch_list, list) {
+		if (ch->cm_id == cm_id) {
+			do_reset = srpt_test_and_set_ch_state(ch,
+					CH_CONNECTING, CH_DRAINING) ||
+				   srpt_test_and_set_ch_state(ch,
+					CH_LIVE, CH_DRAINING) ||
+				   srpt_test_and_set_ch_state(ch,
+					CH_DISCONNECTING, CH_DRAINING);
+			break;
+		}
+	}
+	spin_unlock_irq(&sdev->spinlock);
+
+	if (do_reset) {
+		ret = srpt_ch_qp_err(ch);
+		if (ret < 0)
+			printk(KERN_ERR "Setting queue pair in error state"
+			       " failed: %d\n", ret);
+	}
+}
+
+/**
+ * srpt_find_channel() - Look up an RDMA channel.
+ * @cm_id: Pointer to the CM ID of the channel to be looked up.
+ *
+ * Return NULL if no matching RDMA channel has been found.
+ */
+static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
+					      struct ib_cm_id *cm_id)
+{
+	struct srpt_rdma_ch *ch;
+	bool found;
+
+	WARN_ON_ONCE(irqs_disabled());
+	BUG_ON(!sdev);
+
+	found = false;
+	spin_lock_irq(&sdev->spinlock);
+	list_for_each_entry(ch, &sdev->rch_list, list) {
+		if (ch->cm_id == cm_id) {
+			found = true;
+			break;
+		}
+	}
+	spin_unlock_irq(&sdev->spinlock);
+
+	return found ? ch : NULL;
+}
+
+/**
+ * srpt_release_channel() - Release channel resources.
+ *
+ * Schedules the actual release because:
+ * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
+ *   trigger a deadlock.
+ * - It is not safe to call TCM transport_* functions from interrupt context.
+ */
+static void srpt_release_channel(struct srpt_rdma_ch *ch)
+{
+	schedule_work(&ch->release_work);
+}
+
+static void srpt_release_channel_work(struct work_struct *w)
+{
+	struct srpt_rdma_ch *ch;
+	struct srpt_device *sdev;
+
+	ch = container_of(w, struct srpt_rdma_ch, release_work);
+	pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
+		 ch->release_done);
+
+	sdev = ch->sport->sdev;
+	BUG_ON(!sdev);
+
+	transport_deregister_session_configfs(ch->sess);
+	transport_deregister_session(ch->sess);
+	ch->sess = NULL;
+
+	srpt_destroy_ch_ib(ch);
+
+	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+			     ch->sport->sdev, ch->rq_size,
+			     ch->rsp_size, DMA_TO_DEVICE);
+
+	spin_lock_irq(&sdev->spinlock);
+	list_del(&ch->list);
+	spin_unlock_irq(&sdev->spinlock);
+
+	ib_destroy_cm_id(ch->cm_id);
+
+	if (ch->release_done)
+		complete(ch->release_done);
+
+	wake_up(&sdev->ch_releaseQ);
+
+	kfree(ch);
+}
+
+static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
+					       u8 i_port_id[16])
+{
+	struct srpt_node_acl *nacl;
+
+	list_for_each_entry(nacl, &sport->port_acl_list, list)
+		if (memcmp(nacl->i_port_id, i_port_id,
+			   sizeof(nacl->i_port_id)) == 0)
+			return nacl;
+
+	return NULL;
+}
+
+static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
+					     u8 i_port_id[16])
+{
+	struct srpt_node_acl *nacl;
+
+	spin_lock_irq(&sport->port_acl_lock);
+	nacl = __srpt_lookup_acl(sport, i_port_id);
+	spin_unlock_irq(&sport->port_acl_lock);
+
+	return nacl;
+}
+
+/**
+ * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
+ *
+ * Ownership of the cm_id is transferred to the target session if this
+ * functions returns zero. Otherwise the caller remains the owner of cm_id.
+ */
+static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
+			    struct ib_cm_req_event_param *param,
+			    void *private_data)
+{
+	struct srpt_device *sdev = cm_id->context;
+	struct srpt_port *sport = &sdev->port[param->port - 1];
+	struct srp_login_req *req;
+	struct srp_login_rsp *rsp;
+	struct srp_login_rej *rej;
+	struct ib_cm_rep_param *rep_param;
+	struct srpt_rdma_ch *ch, *tmp_ch;
+	struct srpt_node_acl *nacl;
+	u32 it_iu_len;
+	int i;
+	int ret = 0;
+
+	WARN_ON_ONCE(irqs_disabled());
+
+	if (WARN_ON(!sdev || !private_data))
+		return -EINVAL;
+
+	req = (struct srp_login_req *)private_data;
+
+	it_iu_len = be32_to_cpu(req->req_it_iu_len);
+
+	printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
+	       " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
+	       " (guid=0x%llx:0x%llx)\n",
+	       be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
+	       be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
+	       be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
+	       be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
+	       it_iu_len,
+	       param->port,
+	       be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
+	       be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+
+	rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
+	rej = kzalloc(sizeof *rej, GFP_KERNEL);
+	rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
+
+	if (!rsp || !rej || !rep_param) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
+		rej->reason = __constant_cpu_to_be32(
+				SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+		ret = -EINVAL;
+		printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
+		       " length (%d bytes) is out of range (%d .. %d)\n",
+		       it_iu_len, 64, srp_max_req_size);
+		goto reject;
+	}
+
+	if (!sport->enabled) {
+		rej->reason = __constant_cpu_to_be32(
+			     SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+		ret = -EINVAL;
+		printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
+		       " has not yet been enabled\n");
+		goto reject;
+	}
+
+	if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
+
+		spin_lock_irq(&sdev->spinlock);
+
+		list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
+			if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
+			    && !memcmp(ch->t_port_id, req->target_port_id, 16)
+			    && param->port == ch->sport->port
+			    && param->listen_id == ch->sport->sdev->cm_id
+			    && ch->cm_id) {
+				enum rdma_ch_state ch_state;
+
+				ch_state = srpt_get_ch_state(ch);
+				if (ch_state != CH_CONNECTING
+				    && ch_state != CH_LIVE)
+					continue;
+
+				/* found an existing channel */
+				pr_debug("Found existing channel %s"
+					 " cm_id= %p state= %d\n",
+					 ch->sess_name, ch->cm_id, ch_state);
+
+				__srpt_close_ch(ch);
+
+				rsp->rsp_flags =
+					SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+			}
+		}
+
+		spin_unlock_irq(&sdev->spinlock);
+
+	} else
+		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+
+	if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
+	    || *(__be64 *)(req->target_port_id + 8) !=
+	       cpu_to_be64(srpt_service_guid)) {
+		rej->reason = __constant_cpu_to_be32(
+				SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+		ret = -ENOMEM;
+		printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
+		       " has an invalid target port identifier.\n");
+		goto reject;
+	}
+
+	ch = kzalloc(sizeof *ch, GFP_KERNEL);
+	if (!ch) {
+		rej->reason = __constant_cpu_to_be32(
+					SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+		printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
+		ret = -ENOMEM;
+		goto reject;
+	}
+
+	INIT_WORK(&ch->release_work, srpt_release_channel_work);
+	memcpy(ch->i_port_id, req->initiator_port_id, 16);
+	memcpy(ch->t_port_id, req->target_port_id, 16);
+	ch->sport = &sdev->port[param->port - 1];
+	ch->cm_id = cm_id;
+	/*
+	 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
+	 * for the SRP protocol to the command queue size.
+	 */
+	ch->rq_size = SRPT_RQ_SIZE;
+	spin_lock_init(&ch->spinlock);
+	ch->state = CH_CONNECTING;
+	INIT_LIST_HEAD(&ch->cmd_wait_list);
+	ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+
+	ch->ioctx_ring = (struct srpt_send_ioctx **)
+		srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+				      sizeof(*ch->ioctx_ring[0]),
+				      ch->rsp_size, DMA_TO_DEVICE);
+	if (!ch->ioctx_ring)
+		goto free_ch;
+
+	INIT_LIST_HEAD(&ch->free_list);
+	for (i = 0; i < ch->rq_size; i++) {
+		ch->ioctx_ring[i]->ch = ch;
+		list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
+	}
+
+	ret = srpt_create_ch_ib(ch);
+	if (ret) {
+		rej->reason = __constant_cpu_to_be32(
+				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+		printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
+		       " a new RDMA channel failed.\n");
+		goto free_ring;
+	}
+
+	ret = srpt_ch_qp_rtr(ch, ch->qp);
+	if (ret) {
+		rej->reason = __constant_cpu_to_be32(
+				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+		printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
+		       " RTR failed (error code = %d)\n", ret);
+		goto destroy_ib;
+	}
+	/*
+	 * Use the initator port identifier as the session name.
+	 */
+	snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
+			be64_to_cpu(*(__be64 *)ch->i_port_id),
+			be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
+
+	pr_debug("registering session %s\n", ch->sess_name);
+
+	nacl = srpt_lookup_acl(sport, ch->i_port_id);
+	if (!nacl) {
+		printk(KERN_INFO "Rejected login because no ACL has been"
+		       " configured yet for initiator %s.\n", ch->sess_name);
+		rej->reason = __constant_cpu_to_be32(
+				SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+		goto destroy_ib;
+	}
+
+	ch->sess = transport_init_session();
+	if (!ch->sess) {
+		rej->reason = __constant_cpu_to_be32(
+				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+		pr_debug("Failed to create session\n");
+		goto deregister_session;
+	}
+	ch->sess->se_node_acl = &nacl->nacl;
+	transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
+
+	pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
+		 ch->sess_name, ch->cm_id);
+
+	/* create srp_login_response */
+	rsp->opcode = SRP_LOGIN_RSP;
+	rsp->tag = req->tag;
+	rsp->max_it_iu_len = req->req_it_iu_len;
+	rsp->max_ti_iu_len = req->req_it_iu_len;
+	ch->max_ti_iu_len = it_iu_len;
+	rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+					      | SRP_BUF_FORMAT_INDIRECT);
+	rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
+	atomic_set(&ch->req_lim, ch->rq_size);
+	atomic_set(&ch->req_lim_delta, 0);
+
+	/* create cm reply */
+	rep_param->qp_num = ch->qp->qp_num;
+	rep_param->private_data = (void *)rsp;
+	rep_param->private_data_len = sizeof *rsp;
+	rep_param->rnr_retry_count = 7;
+	rep_param->flow_control = 1;
+	rep_param->failover_accepted = 0;
+	rep_param->srq = 1;
+	rep_param->responder_resources = 4;
+	rep_param->initiator_depth = 4;
+
+	ret = ib_send_cm_rep(cm_id, rep_param);
+	if (ret) {
+		printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
+		       " (error code = %d)\n", ret);
+		goto release_channel;
+	}
+
+	spin_lock_irq(&sdev->spinlock);
+	list_add_tail(&ch->list, &sdev->rch_list);
+	spin_unlock_irq(&sdev->spinlock);
+
+	goto out;
+
+release_channel:
+	srpt_set_ch_state(ch, CH_RELEASING);
+	transport_deregister_session_configfs(ch->sess);
+
+deregister_session:
+	transport_deregister_session(ch->sess);
+	ch->sess = NULL;
+
+destroy_ib:
+	srpt_destroy_ch_ib(ch);
+
+free_ring:
+	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+			     ch->sport->sdev, ch->rq_size,
+			     ch->rsp_size, DMA_TO_DEVICE);
+free_ch:
+	kfree(ch);
+
+reject:
+	rej->opcode = SRP_LOGIN_REJ;
+	rej->tag = req->tag;
+	rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+					      | SRP_BUF_FORMAT_INDIRECT);
+
+	ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+			     (void *)rej, sizeof *rej);
+
+out:
+	kfree(rep_param);
+	kfree(rsp);
+	kfree(rej);
+
+	return ret;
+}
+
+static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
+{
+	printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
+	srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
+ *
+ * An IB_CM_RTU_RECEIVED message indicates that the connection is established
+ * and that the recipient may begin transmitting (RTU = ready to use).
+ */
+static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
+{
+	struct srpt_rdma_ch *ch;
+	int ret;
+
+	ch = srpt_find_channel(cm_id->context, cm_id);
+	BUG_ON(!ch);
+
+	if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
+		struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
+
+		ret = srpt_ch_qp_rts(ch, ch->qp);
+
+		list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
+					 wait_list) {
+			list_del(&ioctx->wait_list);
+			srpt_handle_new_iu(ch, ioctx, NULL);
+		}
+		if (ret)
+			srpt_close_ch(ch);
+	}
+}
+
+static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
+{
+	printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
+	srpt_drain_channel(cm_id);
+}
+
+static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
+{
+	printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
+	srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_dreq_recv() - Process reception of a DREQ message.
+ */
+static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
+{
+	struct srpt_rdma_ch *ch;
+	unsigned long flags;
+	bool send_drep = false;
+
+	ch = srpt_find_channel(cm_id->context, cm_id);
+	BUG_ON(!ch);
+
+	pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
+
+	spin_lock_irqsave(&ch->spinlock, flags);
+	switch (ch->state) {
+	case CH_CONNECTING:
+	case CH_LIVE:
+		send_drep = true;
+		ch->state = CH_DISCONNECTING;
+		break;
+	case CH_DISCONNECTING:
+	case CH_DRAINING:
+	case CH_RELEASING:
+		WARN(true, "unexpected channel state %d\n", ch->state);
+		break;
+	}
+	spin_unlock_irqrestore(&ch->spinlock, flags);
+
+	if (send_drep) {
+		if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
+			printk(KERN_ERR "Sending IB DREP failed.\n");
+		printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
+		       ch->sess_name);
+	}
+}
+
+/**
+ * srpt_cm_drep_recv() - Process reception of a DREP message.
+ */
+static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
+{
+	printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
+	       cm_id);
+	srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_handler() - IB connection manager callback function.
+ *
+ * A non-zero return value will cause the caller destroy the CM ID.
+ *
+ * Note: srpt_cm_handler() must only return a non-zero value when transferring
+ * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
+ * a non-zero value in any other case will trigger a race with the
+ * ib_destroy_cm_id() call in srpt_release_channel().
+ */
+static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+{
+	int ret;
+
+	ret = 0;
+	switch (event->event) {
+	case IB_CM_REQ_RECEIVED:
+		ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
+				       event->private_data);
+		break;
+	case IB_CM_REJ_RECEIVED:
+		srpt_cm_rej_recv(cm_id);
+		break;
+	case IB_CM_RTU_RECEIVED:
+	case IB_CM_USER_ESTABLISHED:
+		srpt_cm_rtu_recv(cm_id);
+		break;
+	case IB_CM_DREQ_RECEIVED:
+		srpt_cm_dreq_recv(cm_id);
+		break;
+	case IB_CM_DREP_RECEIVED:
+		srpt_cm_drep_recv(cm_id);
+		break;
+	case IB_CM_TIMEWAIT_EXIT:
+		srpt_cm_timewait_exit(cm_id);
+		break;
+	case IB_CM_REP_ERROR:
+		srpt_cm_rep_error(cm_id);
+		break;
+	case IB_CM_DREQ_ERROR:
+		printk(KERN_INFO "Received IB DREQ ERROR event.\n");
+		break;
+	case IB_CM_MRA_RECEIVED:
+		printk(KERN_INFO "Received IB MRA event\n");
+		break;
+	default:
+		printk(KERN_ERR "received unrecognized IB CM event %d\n",
+		       event->event);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * srpt_perform_rdmas() - Perform IB RDMA.
+ *
+ * Returns zero upon success or a negative number upon failure.
+ */
+static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
+			      struct srpt_send_ioctx *ioctx)
+{
+	struct ib_send_wr wr;
+	struct ib_send_wr *bad_wr;
+	struct rdma_iu *riu;
+	int i;
+	int ret;
+	int sq_wr_avail;
+	enum dma_data_direction dir;
+	const int n_rdma = ioctx->n_rdma;
+
+	dir = ioctx->cmd.data_direction;
+	if (dir == DMA_TO_DEVICE) {
+		/* write */
+		ret = -ENOMEM;
+		sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
+		if (sq_wr_avail < 0) {
+			printk(KERN_WARNING "IB send queue full (needed %d)\n",
+			       n_rdma);
+			goto out;
+		}
+	}
+
+	ioctx->rdma_aborted = false;
+	ret = 0;
+	riu = ioctx->rdma_ius;
+	memset(&wr, 0, sizeof wr);
+
+	for (i = 0; i < n_rdma; ++i, ++riu) {
+		if (dir == DMA_FROM_DEVICE) {
+			wr.opcode = IB_WR_RDMA_WRITE;
+			wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+						SRPT_RDMA_WRITE_LAST :
+						SRPT_RDMA_MID,
+						ioctx->ioctx.index);
+		} else {
+			wr.opcode = IB_WR_RDMA_READ;
+			wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+						SRPT_RDMA_READ_LAST :
+						SRPT_RDMA_MID,
+						ioctx->ioctx.index);
+		}
+		wr.next = NULL;
+		wr.wr.rdma.remote_addr = riu->raddr;
+		wr.wr.rdma.rkey = riu->rkey;
+		wr.num_sge = riu->sge_cnt;
+		wr.sg_list = riu->sge;
+
+		/* only get completion event for the last rdma write */
+		if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
+			wr.send_flags = IB_SEND_SIGNALED;
+
+		ret = ib_post_send(ch->qp, &wr, &bad_wr);
+		if (ret)
+			break;
+	}
+
+	if (ret)
+		printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
+				 __func__, __LINE__, ret, i, n_rdma);
+	if (ret && i > 0) {
+		wr.num_sge = 0;
+		wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
+		wr.send_flags = IB_SEND_SIGNALED;
+		while (ch->state == CH_LIVE &&
+			ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
+			printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
+				ioctx->ioctx.index);
+			msleep(1000);
+		}
+		while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
+			printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
+				ioctx->ioctx.index);
+			msleep(1000);
+		}
+	}
+out:
+	if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
+		atomic_add(n_rdma, &ch->sq_wr_avail);
+	return ret;
+}
+
+/**
+ * srpt_xfer_data() - Start data transfer from initiator to target.
+ */
+static int srpt_xfer_data(struct srpt_rdma_ch *ch,
+			  struct srpt_send_ioctx *ioctx)
+{
+	int ret;
+
+	ret = srpt_map_sg_to_ib_sge(ch, ioctx);
+	if (ret) {
+		printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
+		goto out;
+	}
+
+	ret = srpt_perform_rdmas(ch, ioctx);
+	if (ret) {
+		if (ret == -EAGAIN || ret == -ENOMEM)
+			printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
+				   __func__, __LINE__, ret);
+		else
+			printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
+			       __func__, __LINE__, ret);
+		goto out_unmap;
+	}
+
+out:
+	return ret;
+out_unmap:
+	srpt_unmap_sg_to_ib_sge(ch, ioctx);
+	goto out;
+}
+
+static int srpt_write_pending_status(struct se_cmd *se_cmd)
+{
+	struct srpt_send_ioctx *ioctx;
+
+	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+	return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
+}
+
+/*
+ * srpt_write_pending() - Start data transfer from initiator to target (write).
+ */
+static int srpt_write_pending(struct se_cmd *se_cmd)
+{
+	struct srpt_rdma_ch *ch;
+	struct srpt_send_ioctx *ioctx;
+	enum srpt_command_state new_state;
+	enum rdma_ch_state ch_state;
+	int ret;
+
+	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+
+	new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
+	WARN_ON(new_state == SRPT_STATE_DONE);
+
+	ch = ioctx->ch;
+	BUG_ON(!ch);
+
+	ch_state = srpt_get_ch_state(ch);
+	switch (ch_state) {
+	case CH_CONNECTING:
+		WARN(true, "unexpected channel state %d\n", ch_state);
+		ret = -EINVAL;
+		goto out;
+	case CH_LIVE:
+		break;
+	case CH_DISCONNECTING:
+	case CH_DRAINING:
+	case CH_RELEASING:
+		pr_debug("cmd with tag %lld: channel disconnecting\n",
+			 ioctx->tag);
+		srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
+		ret = -EINVAL;
+		goto out;
+	}
+	ret = srpt_xfer_data(ch, ioctx);
+
+out:
+	return ret;
+}
+
+static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
+{
+	switch (tcm_mgmt_status) {
+	case TMR_FUNCTION_COMPLETE:
+		return SRP_TSK_MGMT_SUCCESS;
+	case TMR_FUNCTION_REJECTED:
+		return SRP_TSK_MGMT_FUNC_NOT_SUPP;
+	}
+	return SRP_TSK_MGMT_FAILED;
+}
+
+/**
+ * srpt_queue_response() - Transmits the response to a SCSI command.
+ *
+ * Callback function called by the TCM core. Must not block since it can be
+ * invoked on the context of the IB completion handler.
+ */
+static int srpt_queue_response(struct se_cmd *cmd)
+{
+	struct srpt_rdma_ch *ch;
+	struct srpt_send_ioctx *ioctx;
+	enum srpt_command_state state;
+	unsigned long flags;
+	int ret;
+	enum dma_data_direction dir;
+	int resp_len;
+	u8 srp_tm_status;
+
+	ret = 0;
+
+	ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+	ch = ioctx->ch;
+	BUG_ON(!ch);
+
+	spin_lock_irqsave(&ioctx->spinlock, flags);
+	state = ioctx->state;
+	switch (state) {
+	case SRPT_STATE_NEW:
+	case SRPT_STATE_DATA_IN:
+		ioctx->state = SRPT_STATE_CMD_RSP_SENT;
+		break;
+	case SRPT_STATE_MGMT:
+		ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
+		break;
+	default:
+		WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
+			ch, ioctx->ioctx.index, ioctx->state);
+		break;
+	}
+	spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+	if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
+		     || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
+		atomic_inc(&ch->req_lim_delta);
+		srpt_abort_cmd(ioctx);
+		goto out;
+	}
+
+	dir = ioctx->cmd.data_direction;
+
+	/* For read commands, transfer the data to the initiator. */
+	if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
+	    !ioctx->queue_status_only) {
+		ret = srpt_xfer_data(ch, ioctx);
+		if (ret) {
+			printk(KERN_ERR "xfer_data failed for tag %llu\n",
+			       ioctx->tag);
+			goto out;
+		}
+	}
+
+	if (state != SRPT_STATE_MGMT)
+		resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
+					      cmd->scsi_status);
+	else {
+		srp_tm_status
+			= tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
+		resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
+						 ioctx->tag);
+	}
+	ret = srpt_post_send(ch, ioctx, resp_len);
+	if (ret) {
+		printk(KERN_ERR "sending cmd response failed for tag %llu\n",
+		       ioctx->tag);
+		srpt_unmap_sg_to_ib_sge(ch, ioctx);
+		srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+		kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+	}
+
+out:
+	return ret;
+}
+
+static int srpt_queue_status(struct se_cmd *cmd)
+{
+	struct srpt_send_ioctx *ioctx;
+
+	ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+	BUG_ON(ioctx->sense_data != cmd->sense_buffer);
+	if (cmd->se_cmd_flags &
+	    (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
+		WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
+	ioctx->queue_status_only = true;
+	return srpt_queue_response(cmd);
+}
+
+static void srpt_refresh_port_work(struct work_struct *work)
+{
+	struct srpt_port *sport = container_of(work, struct srpt_port, work);
+
+	srpt_refresh_port(sport);
+}
+
+static int srpt_ch_list_empty(struct srpt_device *sdev)
+{
+	int res;
+
+	spin_lock_irq(&sdev->spinlock);
+	res = list_empty(&sdev->rch_list);
+	spin_unlock_irq(&sdev->spinlock);
+
+	return res;
+}
+
+/**
+ * srpt_release_sdev() - Free the channel resources associated with a target.
+ */
+static int srpt_release_sdev(struct srpt_device *sdev)
+{
+	struct srpt_rdma_ch *ch, *tmp_ch;
+	int res;
+
+	WARN_ON_ONCE(irqs_disabled());
+
+	BUG_ON(!sdev);
+
+	spin_lock_irq(&sdev->spinlock);
+	list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
+		__srpt_close_ch(ch);
+	spin_unlock_irq(&sdev->spinlock);
+
+	res = wait_event_interruptible(sdev->ch_releaseQ,
+				       srpt_ch_list_empty(sdev));
+	if (res)
+		printk(KERN_ERR "%s: interrupted.\n", __func__);
+
+	return 0;
+}
+
+static struct srpt_port *__srpt_lookup_port(const char *name)
+{
+	struct ib_device *dev;
+	struct srpt_device *sdev;
+	struct srpt_port *sport;
+	int i;
+
+	list_for_each_entry(sdev, &srpt_dev_list, list) {
+		dev = sdev->device;
+		if (!dev)
+			continue;
+
+		for (i = 0; i < dev->phys_port_cnt; i++) {
+			sport = &sdev->port[i];
+
+			if (!strcmp(sport->port_guid, name))
+				return sport;
+		}
+	}
+
+	return NULL;
+}
+
+static struct srpt_port *srpt_lookup_port(const char *name)
+{
+	struct srpt_port *sport;
+
+	spin_lock(&srpt_dev_lock);
+	sport = __srpt_lookup_port(name);
+	spin_unlock(&srpt_dev_lock);
+
+	return sport;
+}
+
+/**
+ * srpt_add_one() - Infiniband device addition callback function.
+ */
+static void srpt_add_one(struct ib_device *device)
+{
+	struct srpt_device *sdev;
+	struct srpt_port *sport;
+	struct ib_srq_init_attr srq_attr;
+	int i;
+
+	pr_debug("device = %p, device->dma_ops = %p\n", device,
+		 device->dma_ops);
+
+	sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+	if (!sdev)
+		goto err;
+
+	sdev->device = device;
+	INIT_LIST_HEAD(&sdev->rch_list);
+	init_waitqueue_head(&sdev->ch_releaseQ);
+	spin_lock_init(&sdev->spinlock);
+
+	if (ib_query_device(device, &sdev->dev_attr))
+		goto free_dev;
+
+	sdev->pd = ib_alloc_pd(device);
+	if (IS_ERR(sdev->pd))
+		goto free_dev;
+
+	sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
+	if (IS_ERR(sdev->mr))
+		goto err_pd;
+
+	sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
+
+	srq_attr.event_handler = srpt_srq_event;
+	srq_attr.srq_context = (void *)sdev;
+	srq_attr.attr.max_wr = sdev->srq_size;
+	srq_attr.attr.max_sge = 1;
+	srq_attr.attr.srq_limit = 0;
+
+	sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
+	if (IS_ERR(sdev->srq))
+		goto err_mr;
+
+	pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
+		 __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
+		 device->name);
+
+	if (!srpt_service_guid)
+		srpt_service_guid = be64_to_cpu(device->node_guid);
+
+	sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
+	if (IS_ERR(sdev->cm_id))
+		goto err_srq;
+
+	/* print out target login information */
+	pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
+		 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
+		 srpt_service_guid, srpt_service_guid);
+
+	/*
+	 * We do not have a consistent service_id (ie. also id_ext of target_id)
+	 * to identify this target. We currently use the guid of the first HCA
+	 * in the system as service_id; therefore, the target_id will change
+	 * if this HCA is gone bad and replaced by different HCA
+	 */
+	if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
+		goto err_cm;
+
+	INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
+			      srpt_event_handler);
+	if (ib_register_event_handler(&sdev->event_handler))
+		goto err_cm;
+
+	sdev->ioctx_ring = (struct srpt_recv_ioctx **)
+		srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
+				      sizeof(*sdev->ioctx_ring[0]),
+				      srp_max_req_size, DMA_FROM_DEVICE);
+	if (!sdev->ioctx_ring)
+		goto err_event;
+
+	for (i = 0; i < sdev->srq_size; ++i)
+		srpt_post_recv(sdev, sdev->ioctx_ring[i]);
+
+	WARN_ON(sdev->device->phys_port_cnt
+		> sizeof(sdev->port)/sizeof(sdev->port[0]));
+
+	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+		sport = &sdev->port[i - 1];
+		sport->sdev = sdev;
+		sport->port = i;
+		sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
+		sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
+		sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
+		INIT_WORK(&sport->work, srpt_refresh_port_work);
+		INIT_LIST_HEAD(&sport->port_acl_list);
+		spin_lock_init(&sport->port_acl_lock);
+
+		if (srpt_refresh_port(sport)) {
+			printk(KERN_ERR "MAD registration failed for %s-%d.\n",
+			       srpt_sdev_name(sdev), i);
+			goto err_ring;
+		}
+		snprintf(sport->port_guid, sizeof(sport->port_guid),
+			"0x%016llx%016llx",
+			be64_to_cpu(sport->gid.global.subnet_prefix),
+			be64_to_cpu(sport->gid.global.interface_id));
+	}
+
+	spin_lock(&srpt_dev_lock);
+	list_add_tail(&sdev->list, &srpt_dev_list);
+	spin_unlock(&srpt_dev_lock);
+
+out:
+	ib_set_client_data(device, &srpt_client, sdev);
+	pr_debug("added %s.\n", device->name);
+	return;
+
+err_ring:
+	srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+			     sdev->srq_size, srp_max_req_size,
+			     DMA_FROM_DEVICE);
+err_event:
+	ib_unregister_event_handler(&sdev->event_handler);
+err_cm:
+	ib_destroy_cm_id(sdev->cm_id);
+err_srq:
+	ib_destroy_srq(sdev->srq);
+err_mr:
+	ib_dereg_mr(sdev->mr);
+err_pd:
+	ib_dealloc_pd(sdev->pd);
+free_dev:
+	kfree(sdev);
+err:
+	sdev = NULL;
+	printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
+	goto out;
+}
+
+/**
+ * srpt_remove_one() - InfiniBand device removal callback function.
+ */
+static void srpt_remove_one(struct ib_device *device)
+{
+	struct srpt_device *sdev;
+	int i;
+
+	sdev = ib_get_client_data(device, &srpt_client);
+	if (!sdev) {
+		printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
+		       device->name);
+		return;
+	}
+
+	srpt_unregister_mad_agent(sdev);
+
+	ib_unregister_event_handler(&sdev->event_handler);
+
+	/* Cancel any work queued by the just unregistered IB event handler. */
+	for (i = 0; i < sdev->device->phys_port_cnt; i++)
+		cancel_work_sync(&sdev->port[i].work);
+
+	ib_destroy_cm_id(sdev->cm_id);
+
+	/*
+	 * Unregistering a target must happen after destroying sdev->cm_id
+	 * such that no new SRP_LOGIN_REQ information units can arrive while
+	 * destroying the target.
+	 */
+	spin_lock(&srpt_dev_lock);
+	list_del(&sdev->list);
+	spin_unlock(&srpt_dev_lock);
+	srpt_release_sdev(sdev);
+
+	ib_destroy_srq(sdev->srq);
+	ib_dereg_mr(sdev->mr);
+	ib_dealloc_pd(sdev->pd);
+
+	srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+			     sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
+	sdev->ioctx_ring = NULL;
+	kfree(sdev);
+}
+
+static struct ib_client srpt_client = {
+	.name = DRV_NAME,
+	.add = srpt_add_one,
+	.remove = srpt_remove_one
+};
+
+static int srpt_check_true(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int srpt_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static char *srpt_get_fabric_name(void)
+{
+	return "srpt";
+}
+
+static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	return SCSI_TRANSPORTID_PROTOCOLID_SRP;
+}
+
+static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
+{
+	struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+
+	return sport->port_guid;
+}
+
+static u16 srpt_get_tag(struct se_portal_group *tpg)
+{
+	return 1;
+}
+
+static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
+				    struct se_node_acl *se_nacl,
+				    struct t10_pr_registration *pr_reg,
+				    int *format_code, unsigned char *buf)
+{
+	struct srpt_node_acl *nacl;
+	struct spc_rdma_transport_id *tr_id;
+
+	nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+	tr_id = (void *)buf;
+	tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
+	memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
+	return sizeof(*tr_id);
+}
+
+static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+					struct se_node_acl *se_nacl,
+					struct t10_pr_registration *pr_reg,
+					int *format_code)
+{
+	*format_code = 0;
+	return sizeof(struct spc_rdma_transport_id);
+}
+
+static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+					    const char *buf, u32 *out_tid_len,
+					    char **port_nexus_ptr)
+{
+	struct spc_rdma_transport_id *tr_id;
+
+	*port_nexus_ptr = NULL;
+	*out_tid_len = sizeof(struct spc_rdma_transport_id);
+	tr_id = (void *)buf;
+	return (char *)tr_id->i_port_id;
+}
+
+static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+	struct srpt_node_acl *nacl;
+
+	nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
+	if (!nacl) {
+		printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
+		return NULL;
+	}
+
+	return &nacl->nacl;
+}
+
+static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
+				    struct se_node_acl *se_nacl)
+{
+	struct srpt_node_acl *nacl;
+
+	nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+	kfree(nacl);
+}
+
+static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static void srpt_release_cmd(struct se_cmd *se_cmd)
+{
+}
+
+/**
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
+ */
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+	return true;
+}
+
+/**
+ * srpt_close_session() - Forcibly close a session.
+ *
+ * Callback function invoked by the TCM core to clean up sessions associated
+ * with a node ACL when the user invokes
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_close_session(struct se_session *se_sess)
+{
+	DECLARE_COMPLETION_ONSTACK(release_done);
+	struct srpt_rdma_ch *ch;
+	struct srpt_device *sdev;
+	int res;
+
+	ch = se_sess->fabric_sess_ptr;
+	WARN_ON(ch->sess != se_sess);
+
+	pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
+
+	sdev = ch->sport->sdev;
+	spin_lock_irq(&sdev->spinlock);
+	BUG_ON(ch->release_done);
+	ch->release_done = &release_done;
+	__srpt_close_ch(ch);
+	spin_unlock_irq(&sdev->spinlock);
+
+	res = wait_for_completion_timeout(&release_done, 60 * HZ);
+	WARN_ON(res <= 0);
+}
+
+/**
+ * To do: Find out whether stop_session() has a meaning for transports
+ * other than iSCSI.
+ */
+static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
+			      int conn_sleep)
+{
+}
+
+static void srpt_reset_nexus(struct se_session *sess)
+{
+	printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
+}
+
+static int srpt_sess_logged_in(struct se_session *se_sess)
+{
+	return true;
+}
+
+/**
+ * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
+ *
+ * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
+ * This object represents an arbitrary integer used to uniquely identify a
+ * particular attached remote initiator port to a particular SCSI target port
+ * within a particular SCSI target device within a particular SCSI instance.
+ */
+static u32 srpt_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
+{
+	struct srpt_send_ioctx *ioctx;
+
+	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+	return ioctx->tag;
+}
+
+/* Note: only used from inside debug printk's by the TCM core. */
+static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
+{
+	struct srpt_send_ioctx *ioctx;
+
+	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+	return srpt_get_cmd_state(ioctx);
+}
+
+static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
+{
+	return 0;
+}
+
+static u16 srpt_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+static int srpt_is_state_remove(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+/**
+ * srpt_parse_i_port_id() - Parse an initiator port ID.
+ * @name: ASCII representation of a 128-bit initiator port ID.
+ * @i_port_id: Binary 128-bit port ID.
+ */
+static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
+{
+	const char *p;
+	unsigned len, count, leading_zero_bytes;
+	int ret, rc;
+
+	p = name;
+	if (strnicmp(p, "0x", 2) == 0)
+		p += 2;
+	ret = -EINVAL;
+	len = strlen(p);
+	if (len % 2)
+		goto out;
+	count = min(len / 2, 16U);
+	leading_zero_bytes = 16 - count;
+	memset(i_port_id, 0, leading_zero_bytes);
+	rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
+	if (rc < 0)
+		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
+	ret = 0;
+out:
+	return ret;
+}
+
+/*
+ * configfs callback function invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
+					     struct config_group *group,
+					     const char *name)
+{
+	struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+	struct se_node_acl *se_nacl, *se_nacl_new;
+	struct srpt_node_acl *nacl;
+	int ret = 0;
+	u32 nexus_depth = 1;
+	u8 i_port_id[16];
+
+	if (srpt_parse_i_port_id(i_port_id, name) < 0) {
+		printk(KERN_ERR "invalid initiator port ID %s\n", name);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	se_nacl_new = srpt_alloc_fabric_acl(tpg);
+	if (!se_nacl_new) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	/*
+	 * nacl_new may be released by core_tpg_add_initiator_node_acl()
+	 * when converting a node ACL from demo mode to explict
+	 */
+	se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
+						  nexus_depth);
+	if (IS_ERR(se_nacl)) {
+		ret = PTR_ERR(se_nacl);
+		goto err;
+	}
+	/* Locate our struct srpt_node_acl and set sdev and i_port_id. */
+	nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+	memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
+	nacl->sport = sport;
+
+	spin_lock_irq(&sport->port_acl_lock);
+	list_add_tail(&nacl->list, &sport->port_acl_list);
+	spin_unlock_irq(&sport->port_acl_lock);
+
+	return se_nacl;
+err:
+	return ERR_PTR(ret);
+}
+
+/*
+ * configfs callback function invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
+{
+	struct srpt_node_acl *nacl;
+	struct srpt_device *sdev;
+	struct srpt_port *sport;
+
+	nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+	sport = nacl->sport;
+	sdev = sport->sdev;
+	spin_lock_irq(&sport->port_acl_lock);
+	list_del(&nacl->list);
+	spin_unlock_irq(&sport->port_acl_lock);
+	core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
+	srpt_release_fabric_acl(NULL, se_nacl);
+}
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &val);
+	if (ret < 0) {
+		pr_err("strict_strtoul() failed with ret: %d\n", ret);
+		return -EINVAL;
+	}
+	if (val > MAX_SRPT_RDMA_SIZE) {
+		pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
+			MAX_SRPT_RDMA_SIZE);
+		return -EINVAL;
+	}
+	if (val < DEFAULT_MAX_RDMA_SIZE) {
+		pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
+			val, DEFAULT_MAX_RDMA_SIZE);
+		return -EINVAL;
+	}
+	sport->port_attrib.srp_max_rdma_size = val;
+
+	return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &val);
+	if (ret < 0) {
+		pr_err("strict_strtoul() failed with ret: %d\n", ret);
+		return -EINVAL;
+	}
+	if (val > MAX_SRPT_RSP_SIZE) {
+		pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
+			MAX_SRPT_RSP_SIZE);
+		return -EINVAL;
+	}
+	if (val < MIN_MAX_RSP_SIZE) {
+		pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
+			MIN_MAX_RSP_SIZE);
+		return -EINVAL;
+	}
+	sport->port_attrib.srp_max_rsp_size = val;
+
+	return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_sq_size(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+	return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_sq_size(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &val);
+	if (ret < 0) {
+		pr_err("strict_strtoul() failed with ret: %d\n", ret);
+		return -EINVAL;
+	}
+	if (val > MAX_SRPT_SRQ_SIZE) {
+		pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
+			MAX_SRPT_SRQ_SIZE);
+		return -EINVAL;
+	}
+	if (val < MIN_SRPT_SRQ_SIZE) {
+		pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
+			MIN_SRPT_SRQ_SIZE);
+		return -EINVAL;
+	}
+	sport->port_attrib.srp_sq_size = val;
+
+	return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
+	&srpt_tpg_attrib_srp_max_rdma_size.attr,
+	&srpt_tpg_attrib_srp_max_rsp_size.attr,
+	&srpt_tpg_attrib_srp_sq_size.attr,
+	NULL,
+};
+
+static ssize_t srpt_tpg_show_enable(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+	return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
+}
+
+static ssize_t srpt_tpg_store_enable(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+	unsigned long tmp;
+        int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
+		return -EINVAL;
+	}
+
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
+		return -EINVAL;
+	}
+	if (tmp == 1)
+		sport->enabled = true;
+	else
+		sport->enabled = false;
+
+	return count;
+}
+
+TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrs[] = {
+	&srpt_tpg_enable.attr,
+	NULL,
+};
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
+					     struct config_group *group,
+					     const char *name)
+{
+	struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+	int res;
+
+	/* Initialize sport->port_wwn and sport->port_tpg_1 */
+	res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
+			&sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
+	if (res)
+		return ERR_PTR(res);
+
+	return &sport->port_tpg_1;
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static void srpt_drop_tpg(struct se_portal_group *tpg)
+{
+	struct srpt_port *sport = container_of(tpg,
+				struct srpt_port, port_tpg_1);
+
+	sport->enabled = false;
+	core_tpg_deregister(&sport->port_tpg_1);
+}
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port
+ */
+static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
+				      struct config_group *group,
+				      const char *name)
+{
+	struct srpt_port *sport;
+	int ret;
+
+	sport = srpt_lookup_port(name);
+	pr_debug("make_tport(%s)\n", name);
+	ret = -EINVAL;
+	if (!sport)
+		goto err;
+
+	return &sport->port_wwn;
+
+err:
+	return ERR_PTR(ret);
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port
+ */
+static void srpt_drop_tport(struct se_wwn *wwn)
+{
+	struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+
+	pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
+}
+
+static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
+					      char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+TF_WWN_ATTR_RO(srpt, version);
+
+static struct configfs_attribute *srpt_wwn_attrs[] = {
+	&srpt_wwn_version.attr,
+	NULL,
+};
+
+static struct target_core_fabric_ops srpt_template = {
+	.get_fabric_name		= srpt_get_fabric_name,
+	.get_fabric_proto_ident		= srpt_get_fabric_proto_ident,
+	.tpg_get_wwn			= srpt_get_fabric_wwn,
+	.tpg_get_tag			= srpt_get_tag,
+	.tpg_get_default_depth		= srpt_get_default_depth,
+	.tpg_get_pr_transport_id	= srpt_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= srpt_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= srpt_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= srpt_check_false,
+	.tpg_check_demo_mode_cache	= srpt_check_true,
+	.tpg_check_demo_mode_write_protect = srpt_check_true,
+	.tpg_check_prod_mode_write_protect = srpt_check_false,
+	.tpg_alloc_fabric_acl		= srpt_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= srpt_release_fabric_acl,
+	.tpg_get_inst_index		= srpt_tpg_get_inst_index,
+	.release_cmd			= srpt_release_cmd,
+	.check_stop_free		= srpt_check_stop_free,
+	.shutdown_session		= srpt_shutdown_session,
+	.close_session			= srpt_close_session,
+	.stop_session			= srpt_stop_session,
+	.fall_back_to_erl0		= srpt_reset_nexus,
+	.sess_logged_in			= srpt_sess_logged_in,
+	.sess_get_index			= srpt_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= srpt_write_pending,
+	.write_pending_status		= srpt_write_pending_status,
+	.set_default_node_attributes	= srpt_set_default_node_attrs,
+	.get_task_tag			= srpt_get_task_tag,
+	.get_cmd_state			= srpt_get_tcm_cmd_state,
+	.queue_data_in			= srpt_queue_response,
+	.queue_status			= srpt_queue_status,
+	.queue_tm_rsp			= srpt_queue_response,
+	.get_fabric_sense_len		= srpt_get_fabric_sense_len,
+	.set_fabric_sense_len		= srpt_set_fabric_sense_len,
+	.is_state_remove		= srpt_is_state_remove,
+	/*
+	 * Setup function pointers for generic logic in
+	 * target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn		= srpt_make_tport,
+	.fabric_drop_wwn		= srpt_drop_tport,
+	.fabric_make_tpg		= srpt_make_tpg,
+	.fabric_drop_tpg		= srpt_drop_tpg,
+	.fabric_post_link		= NULL,
+	.fabric_pre_unlink		= NULL,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_make_nodeacl		= srpt_make_nodeacl,
+	.fabric_drop_nodeacl		= srpt_drop_nodeacl,
+};
+
+/**
+ * srpt_init_module() - Kernel module initialization.
+ *
+ * Note: Since ib_register_client() registers callback functions, and since at
+ * least one of these callback functions (srpt_add_one()) calls target core
+ * functions, this driver must be registered with the target core before
+ * ib_register_client() is called.
+ */
+static int __init srpt_init_module(void)
+{
+	int ret;
+
+	ret = -EINVAL;
+	if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
+		printk(KERN_ERR "invalid value %d for kernel module parameter"
+		       " srp_max_req_size -- must be at least %d.\n",
+		       srp_max_req_size, MIN_MAX_REQ_SIZE);
+		goto out;
+	}
+
+	if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
+	    || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
+		printk(KERN_ERR "invalid value %d for kernel module parameter"
+		       " srpt_srq_size -- must be in the range [%d..%d].\n",
+		       srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
+		goto out;
+	}
+
+	spin_lock_init(&srpt_dev_lock);
+	INIT_LIST_HEAD(&srpt_dev_list);
+
+	ret = -ENODEV;
+	srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
+	if (!srpt_target) {
+		printk(KERN_ERR "couldn't register\n");
+		goto out;
+	}
+
+	srpt_target->tf_ops = srpt_template;
+
+	/* Enable SG chaining */
+	srpt_target->tf_ops.task_sg_chaining = true;
+
+	/*
+	 * Set up default attribute lists.
+	 */
+	srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
+	srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
+	srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
+	srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+	srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+	srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+	srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+
+	ret = target_fabric_configfs_register(srpt_target);
+	if (ret < 0) {
+		printk(KERN_ERR "couldn't register\n");
+		goto out_free_target;
+	}
+
+	ret = ib_register_client(&srpt_client);
+	if (ret) {
+		printk(KERN_ERR "couldn't register IB client\n");
+		goto out_unregister_target;
+	}
+
+	return 0;
+
+out_unregister_target:
+	target_fabric_configfs_deregister(srpt_target);
+	srpt_target = NULL;
+out_free_target:
+	if (srpt_target)
+		target_fabric_configfs_free(srpt_target);
+out:
+	return ret;
+}
+
+static void __exit srpt_cleanup_module(void)
+{
+	ib_unregister_client(&srpt_client);
+	target_fabric_configfs_deregister(srpt_target);
+	srpt_target = NULL;
+}
+
+module_init(srpt_init_module);
+module_exit(srpt_cleanup_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
new file mode 100644
index 0000000..b4b4bbc
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
+ * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_SRPT_H
+#define IB_SRPT_H
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_sa.h>
+#include <rdma/ib_cm.h>
+
+#include <scsi/srp.h>
+
+#include "ib_dm_mad.h"
+
+/*
+ * The prefix the ServiceName field must start with in the device management
+ * ServiceEntries attribute pair. See also the SRP specification.
+ */
+#define SRP_SERVICE_NAME_PREFIX		"SRP.T10:"
+
+enum {
+	/*
+	 * SRP IOControllerProfile attributes for SRP target ports that have
+	 * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
+	 * in the SRP specification.
+	 */
+	SRP_PROTOCOL = 0x0108,
+	SRP_PROTOCOL_VERSION = 0x0001,
+	SRP_IO_SUBCLASS = 0x609e,
+	SRP_SEND_TO_IOC = 0x01,
+	SRP_SEND_FROM_IOC = 0x02,
+	SRP_RDMA_READ_FROM_IOC = 0x08,
+	SRP_RDMA_WRITE_FROM_IOC = 0x20,
+
+	/*
+	 * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
+	 * specification.
+	 */
+	SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
+	SRP_LOSOLNT = 0x10, /* logout solicited notification */
+	SRP_CRSOLNT = 0x20, /* credit request solicited notification */
+	SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
+
+	/*
+	 * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
+	 * 18 and 20 in the SRP specification.
+	 */
+	SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
+	SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
+
+	/*
+	 * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
+	 * 16 and 22 in the SRP specification.
+	 */
+	SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
+
+	/* See also table 24 in the SRP specification. */
+	SRP_TSK_MGMT_SUCCESS = 0x00,
+	SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
+	SRP_TSK_MGMT_FAILED = 0x05,
+
+	/* See also table 21 in the SRP specification. */
+	SRP_CMD_SIMPLE_Q = 0x0,
+	SRP_CMD_HEAD_OF_Q = 0x1,
+	SRP_CMD_ORDERED_Q = 0x2,
+	SRP_CMD_ACA = 0x4,
+
+	SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
+	SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
+	SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
+
+	SRPT_DEF_SG_TABLESIZE = 128,
+	SRPT_DEF_SG_PER_WQE = 16,
+
+	MIN_SRPT_SQ_SIZE = 16,
+	DEF_SRPT_SQ_SIZE = 4096,
+	SRPT_RQ_SIZE = 128,
+	MIN_SRPT_SRQ_SIZE = 4,
+	DEFAULT_SRPT_SRQ_SIZE = 4095,
+	MAX_SRPT_SRQ_SIZE = 65535,
+	MAX_SRPT_RDMA_SIZE = 1U << 24,
+	MAX_SRPT_RSP_SIZE = 1024,
+
+	MIN_MAX_REQ_SIZE = 996,
+	DEFAULT_MAX_REQ_SIZE
+		= sizeof(struct srp_cmd)/*48*/
+		+ sizeof(struct srp_indirect_buf)/*20*/
+		+ 128 * sizeof(struct srp_direct_buf)/*16*/,
+
+	MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
+	DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
+
+	DEFAULT_MAX_RDMA_SIZE = 65536,
+};
+
+enum srpt_opcode {
+	SRPT_RECV,
+	SRPT_SEND,
+	SRPT_RDMA_MID,
+	SRPT_RDMA_ABORT,
+	SRPT_RDMA_READ_LAST,
+	SRPT_RDMA_WRITE_LAST,
+};
+
+static inline u64 encode_wr_id(u8 opcode, u32 idx)
+{
+	return ((u64)opcode << 32) | idx;
+}
+static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
+{
+	return wr_id >> 32;
+}
+static inline u32 idx_from_wr_id(u64 wr_id)
+{
+	return (u32)wr_id;
+}
+
+struct rdma_iu {
+	u64		raddr;
+	u32		rkey;
+	struct ib_sge	*sge;
+	u32		sge_cnt;
+	int		mem_id;
+};
+
+/**
+ * enum srpt_command_state - SCSI command state managed by SRPT.
+ * @SRPT_STATE_NEW:           New command arrived and is being processed.
+ * @SRPT_STATE_NEED_DATA:     Processing a write or bidir command and waiting
+ *                            for data arrival.
+ * @SRPT_STATE_DATA_IN:       Data for the write or bidir command arrived and is
+ *                            being processed.
+ * @SRPT_STATE_CMD_RSP_SENT:  SRP_RSP for SRP_CMD has been sent.
+ * @SRPT_STATE_MGMT:          Processing a SCSI task management command.
+ * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
+ * @SRPT_STATE_DONE:          Command processing finished successfully, command
+ *                            processing has been aborted or command processing
+ *                            failed.
+ */
+enum srpt_command_state {
+	SRPT_STATE_NEW		 = 0,
+	SRPT_STATE_NEED_DATA	 = 1,
+	SRPT_STATE_DATA_IN	 = 2,
+	SRPT_STATE_CMD_RSP_SENT	 = 3,
+	SRPT_STATE_MGMT		 = 4,
+	SRPT_STATE_MGMT_RSP_SENT = 5,
+	SRPT_STATE_DONE		 = 6,
+};
+
+/**
+ * struct srpt_ioctx - Shared SRPT I/O context information.
+ * @buf:   Pointer to the buffer.
+ * @dma:   DMA address of the buffer.
+ * @index: Index of the I/O context in its ioctx_ring array.
+ */
+struct srpt_ioctx {
+	void			*buf;
+	dma_addr_t		dma;
+	uint32_t		index;
+};
+
+/**
+ * struct srpt_recv_ioctx - SRPT receive I/O context.
+ * @ioctx:     See above.
+ * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
+ */
+struct srpt_recv_ioctx {
+	struct srpt_ioctx	ioctx;
+	struct list_head	wait_list;
+};
+
+/**
+ * struct srpt_send_ioctx - SRPT send I/O context.
+ * @ioctx:       See above.
+ * @ch:          Channel pointer.
+ * @free_list:   Node in srpt_rdma_ch.free_list.
+ * @n_rbuf:      Number of data buffers in the received SRP command.
+ * @rbufs:       Pointer to SRP data buffer array.
+ * @single_rbuf: SRP data buffer if the command has only a single buffer.
+ * @sg:          Pointer to sg-list associated with this I/O context.
+ * @sg_cnt:      SG-list size.
+ * @mapped_sg_count: ib_dma_map_sg() return value.
+ * @n_rdma_ius:  Number of elements in the rdma_ius array.
+ * @rdma_ius:    Array with information about the RDMA mapping.
+ * @tag:         Tag of the received SRP information unit.
+ * @spinlock:    Protects 'state'.
+ * @state:       I/O context state.
+ * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
+ * 		 the already initiated transfers have finished.
+ * @cmd:         Target core command data structure.
+ * @sense_data:  SCSI sense data.
+ */
+struct srpt_send_ioctx {
+	struct srpt_ioctx	ioctx;
+	struct srpt_rdma_ch	*ch;
+	struct kref		 kref;
+	struct rdma_iu		*rdma_ius;
+	struct srp_direct_buf	*rbufs;
+	struct srp_direct_buf	single_rbuf;
+	struct scatterlist	*sg;
+	struct list_head	free_list;
+	spinlock_t		spinlock;
+	enum srpt_command_state	state;
+	bool			rdma_aborted;
+	struct se_cmd		cmd;
+	struct completion	tx_done;
+	u64			tag;
+	int			sg_cnt;
+	int			mapped_sg_count;
+	u16			n_rdma_ius;
+	u8			n_rdma;
+	u8			n_rbuf;
+	bool			queue_status_only;
+	u8			sense_data[SCSI_SENSE_BUFFERSIZE];
+};
+
+/**
+ * enum rdma_ch_state - SRP channel state.
+ * @CH_CONNECTING:	 QP is in RTR state; waiting for RTU.
+ * @CH_LIVE:		 QP is in RTS state.
+ * @CH_DISCONNECTING:    DREQ has been received; waiting for DREP
+ *                       or DREQ has been send and waiting for DREP
+ *                       or .
+ * @CH_DRAINING:	 QP is in ERR state; waiting for last WQE event.
+ * @CH_RELEASING:	 Last WQE event has been received; releasing resources.
+ */
+enum rdma_ch_state {
+	CH_CONNECTING,
+	CH_LIVE,
+	CH_DISCONNECTING,
+	CH_DRAINING,
+	CH_RELEASING
+};
+
+/**
+ * struct srpt_rdma_ch - RDMA channel.
+ * @wait_queue:    Allows the kernel thread to wait for more work.
+ * @thread:        Kernel thread that processes the IB queues associated with
+ *                 the channel.
+ * @cm_id:         IB CM ID associated with the channel.
+ * @qp:            IB queue pair used for communicating over this channel.
+ * @cq:            IB completion queue for this channel.
+ * @rq_size:       IB receive queue size.
+ * @rsp_size	   IB response message size in bytes.
+ * @sq_wr_avail:   number of work requests available in the send queue.
+ * @sport:         pointer to the information of the HCA port used by this
+ *                 channel.
+ * @i_port_id:     128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id:     128-bit target port identifier copied from SRP_LOGIN_REQ.
+ * @max_ti_iu_len: maximum target-to-initiator information unit length.
+ * @req_lim:       request limit: maximum number of requests that may be sent
+ *                 by the initiator without having received a response.
+ * @req_lim_delta: Number of credits not yet sent back to the initiator.
+ * @spinlock:      Protects free_list and state.
+ * @free_list:     Head of list with free send I/O contexts.
+ * @state:         channel state. See also enum rdma_ch_state.
+ * @ioctx_ring:    Send ring.
+ * @wc:            IB work completion array for srpt_process_completion().
+ * @list:          Node for insertion in the srpt_device.rch_list list.
+ * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
+ *                 list contains struct srpt_ioctx elements and is protected
+ *                 against concurrent modification by the cm_id spinlock.
+ * @sess:          Session information associated with this SRP channel.
+ * @sess_name:     Session name.
+ * @release_work:  Allows scheduling of srpt_release_channel().
+ * @release_done:  Enables waiting for srpt_release_channel() completion.
+ */
+struct srpt_rdma_ch {
+	wait_queue_head_t	wait_queue;
+	struct task_struct	*thread;
+	struct ib_cm_id		*cm_id;
+	struct ib_qp		*qp;
+	struct ib_cq		*cq;
+	int			rq_size;
+	u32			rsp_size;
+	atomic_t		sq_wr_avail;
+	struct srpt_port	*sport;
+	u8			i_port_id[16];
+	u8			t_port_id[16];
+	int			max_ti_iu_len;
+	atomic_t		req_lim;
+	atomic_t		req_lim_delta;
+	spinlock_t		spinlock;
+	struct list_head	free_list;
+	enum rdma_ch_state	state;
+	struct srpt_send_ioctx	**ioctx_ring;
+	struct ib_wc		wc[16];
+	struct list_head	list;
+	struct list_head	cmd_wait_list;
+	struct se_session	*sess;
+	u8			sess_name[36];
+	struct work_struct	release_work;
+	struct completion	*release_done;
+};
+
+/**
+ * struct srpt_port_attib - Attributes for SRPT port
+ * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
+ * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
+ * @srp_sq_size: Shared receive queue (SRQ) size.
+ */
+struct srpt_port_attrib {
+	u32			srp_max_rdma_size;
+	u32			srp_max_rsp_size;
+	u32			srp_sq_size;
+};
+
+/**
+ * struct srpt_port - Information associated by SRPT with a single IB port.
+ * @sdev:      backpointer to the HCA information.
+ * @mad_agent: per-port management datagram processing information.
+ * @enabled:   Whether or not this target port is enabled.
+ * @port_guid: ASCII representation of Port GUID
+ * @port:      one-based port number.
+ * @sm_lid:    cached value of the port's sm_lid.
+ * @lid:       cached value of the port's lid.
+ * @gid:       cached value of the port's gid.
+ * @port_acl_lock spinlock for port_acl_list:
+ * @work:      work structure for refreshing the aforementioned cached values.
+ * @port_tpg_1 Target portal group = 1 data.
+ * @port_wwn:  Target core WWN data.
+ * @port_acl_list: Head of the list with all node ACLs for this port.
+ */
+struct srpt_port {
+	struct srpt_device	*sdev;
+	struct ib_mad_agent	*mad_agent;
+	bool			enabled;
+	u8			port_guid[64];
+	u8			port;
+	u16			sm_lid;
+	u16			lid;
+	union ib_gid		gid;
+	spinlock_t		port_acl_lock;
+	struct work_struct	work;
+	struct se_portal_group	port_tpg_1;
+	struct se_wwn		port_wwn;
+	struct list_head	port_acl_list;
+	struct srpt_port_attrib port_attrib;
+};
+
+/**
+ * struct srpt_device - Information associated by SRPT with a single HCA.
+ * @device:        Backpointer to the struct ib_device managed by the IB core.
+ * @pd:            IB protection domain.
+ * @mr:            L_Key (local key) with write access to all local memory.
+ * @srq:           Per-HCA SRQ (shared receive queue).
+ * @cm_id:         Connection identifier.
+ * @dev_attr:      Attributes of the InfiniBand device as obtained during the
+ *                 ib_client.add() callback.
+ * @srq_size:      SRQ size.
+ * @ioctx_ring:    Per-HCA SRQ.
+ * @rch_list:      Per-device channel list -- see also srpt_rdma_ch.list.
+ * @ch_releaseQ:   Enables waiting for removal from rch_list.
+ * @spinlock:      Protects rch_list and tpg.
+ * @port:          Information about the ports owned by this HCA.
+ * @event_handler: Per-HCA asynchronous IB event handler.
+ * @list:          Node in srpt_dev_list.
+ */
+struct srpt_device {
+	struct ib_device	*device;
+	struct ib_pd		*pd;
+	struct ib_mr		*mr;
+	struct ib_srq		*srq;
+	struct ib_cm_id		*cm_id;
+	struct ib_device_attr	dev_attr;
+	int			srq_size;
+	struct srpt_recv_ioctx	**ioctx_ring;
+	struct list_head	rch_list;
+	wait_queue_head_t	ch_releaseQ;
+	spinlock_t		spinlock;
+	struct srpt_port	port[2];
+	struct ib_event_handler	event_handler;
+	struct list_head	list;
+};
+
+/**
+ * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
+ * @i_port_id: 128-bit SRP initiator port ID.
+ * @sport:     port information.
+ * @nacl:      Target core node ACL information.
+ * @list:      Element of the per-HCA ACL list.
+ */
+struct srpt_node_acl {
+	u8			i_port_id[16];
+	struct srpt_port	*sport;
+	struct se_node_acl	nacl;
+	struct list_head	list;
+};
+
+/*
+ * SRP-releated SCSI persistent reservation definitions.
+ *
+ * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
+ * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
+ * SCSI over an RDMA interface).
+ */
+
+enum {
+	SCSI_TRANSPORTID_PROTOCOLID_SRP	= 4,
+};
+
+struct spc_rdma_transport_id {
+	uint8_t protocol_identifier;
+	uint8_t reserved[7];
+	uint8_t i_port_id[16];
+};
+
+#endif				/* IB_SRPT_H */
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index c957c34..9ca28fc 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -403,6 +403,13 @@
 	  This option enables support for on-chip LED drivers on
 	  MAXIM MAX8997 PMIC.
 
+config LEDS_OT200
+	tristate "LED support for the Bachmann OT200"
+	depends on LEDS_CLASS && HAS_IOMEM
+	help
+	  This option enables support for the LEDs on the Bachmann OT200.
+	  Say Y to enable LEDs on the Bachmann OT200.
+
 config LEDS_TRIGGERS
 	bool "LED Trigger support"
 	depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index b8a9723..1fc6875 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_LEDS_TCA6507)		+= leds-tca6507.o
 obj-$(CONFIG_LEDS_CLEVO_MAIL)		+= leds-clevo-mail.o
 obj-$(CONFIG_LEDS_HP6XX)		+= leds-hp6xx.o
+obj-$(CONFIG_LEDS_OT200)		+= leds-ot200.o
 obj-$(CONFIG_LEDS_FSG)			+= leds-fsg.o
 obj-$(CONFIG_LEDS_PCA955X)		+= leds-pca955x.o
 obj-$(CONFIG_LEDS_DA903X)		+= leds-da903x.o
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
new file mode 100644
index 0000000..c464682
--- /dev/null
+++ b/drivers/leds/leds-ot200.c
@@ -0,0 +1,171 @@
+/*
+ * Bachmann ot200 leds driver.
+ *
+ * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *         Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * License: GPL as published by the FSF.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+
+struct ot200_led {
+	struct led_classdev cdev;
+	const char *name;
+	unsigned long port;
+	u8 mask;
+};
+
+/*
+ * The device has three leds on the back panel (led_err, led_init and led_run)
+ * and can handle up to seven leds on the front panel.
+ */
+
+static struct ot200_led leds[] = {
+	{
+		.name = "led_run",
+		.port = 0x5a,
+		.mask = BIT(0),
+	},
+	{
+		.name = "led_init",
+		.port = 0x5a,
+		.mask = BIT(1),
+	},
+	{
+		.name = "led_err",
+		.port = 0x5a,
+		.mask = BIT(2),
+	},
+	{
+		.name = "led_1",
+		.port = 0x49,
+		.mask = BIT(7),
+	},
+	{
+		.name = "led_2",
+		.port = 0x49,
+		.mask = BIT(6),
+	},
+	{
+		.name = "led_3",
+		.port = 0x49,
+		.mask = BIT(5),
+	},
+	{
+		.name = "led_4",
+		.port = 0x49,
+		.mask = BIT(4),
+	},
+	{
+		.name = "led_5",
+		.port = 0x49,
+		.mask = BIT(3),
+	},
+	{
+		.name = "led_6",
+		.port = 0x49,
+		.mask = BIT(2),
+	},
+	{
+		.name = "led_7",
+		.port = 0x49,
+		.mask = BIT(1),
+	}
+};
+
+static DEFINE_SPINLOCK(value_lock);
+
+/*
+ * we need to store the current led states, as it is not
+ * possible to read the current led state via inb().
+ */
+static u8 leds_back;
+static u8 leds_front;
+
+static void ot200_led_brightness_set(struct led_classdev *led_cdev,
+		enum led_brightness value)
+{
+	struct ot200_led *led = container_of(led_cdev, struct ot200_led, cdev);
+	u8 *val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&value_lock, flags);
+
+	if (led->port == 0x49)
+		val = &leds_front;
+	else if (led->port == 0x5a)
+		val = &leds_back;
+	else
+		BUG();
+
+	if (value == LED_OFF)
+		*val &= ~led->mask;
+	else
+		*val |= led->mask;
+
+	outb(*val, led->port);
+	spin_unlock_irqrestore(&value_lock, flags);
+}
+
+static int __devinit ot200_led_probe(struct platform_device *pdev)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(leds); i++) {
+
+		leds[i].cdev.name = leds[i].name;
+		leds[i].cdev.brightness_set = ot200_led_brightness_set;
+
+		ret = led_classdev_register(&pdev->dev, &leds[i].cdev);
+		if (ret < 0)
+			goto err;
+	}
+
+	leds_front = 0;		/* turn off all front leds */
+	leds_back = BIT(1);	/* turn on init led */
+	outb(leds_front, 0x49);
+	outb(leds_back, 0x5a);
+
+	return 0;
+
+err:
+	for (i = i - 1; i >= 0; i--)
+		led_classdev_unregister(&leds[i].cdev);
+
+	return ret;
+}
+
+static int __devexit ot200_led_remove(struct platform_device *pdev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(leds); i++)
+		led_classdev_unregister(&leds[i].cdev);
+
+	return 0;
+}
+
+static struct platform_driver ot200_led_driver = {
+	.probe		= ot200_led_probe,
+	.remove		= __devexit_p(ot200_led_remove),
+	.driver		= {
+		.name	= "leds-ot200",
+		.owner	= THIS_MODULE,
+	},
+};
+
+module_platform_driver(ot200_led_driver);
+
+MODULE_AUTHOR("Sebastian A. Siewior <bigeasy@linutronix.de>");
+MODULE_DESCRIPTION("ot200 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-ot200");
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 2755599..b5ee3eb 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -24,6 +24,21 @@
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
+/* Registers (Write-only) */
+#define XREG_INIT         0x00
+#define XREG_RF_FREQ      0x02
+#define XREG_POWER_DOWN   0x08
+
+/* Registers (Read-only) */
+#define XREG_FREQ_ERROR   0x01
+#define XREG_LOCK         0x02
+#define XREG_VERSION      0x04
+#define XREG_PRODUCT_ID   0x08
+#define XREG_HSYNC_FREQ   0x10
+#define XREG_FRAME_LINES  0x20
+#define XREG_SNR          0x40
+
+#define XREG_ADC_ENV      0x0100
 
 static int debug;
 module_param(debug, int, 0644);
@@ -885,7 +900,7 @@
 	mutex_lock(&priv->lock);
 
 	/* Sync Lock Indicator */
-	rc = xc2028_get_reg(priv, 0x0002, &frq_lock);
+	rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
 	if (rc < 0)
 		goto ret;
 
@@ -894,7 +909,7 @@
 		signal = 1 << 11;
 
 	/* Get SNR of the video signal */
-	rc = xc2028_get_reg(priv, 0x0040, &signal);
+	rc = xc2028_get_reg(priv, XREG_SNR, &signal);
 	if (rc < 0)
 		goto ret;
 
@@ -1019,9 +1034,9 @@
 
 	/* CMD= Set frequency */
 	if (priv->firm_version < 0x0202)
-		rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00});
+		rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
 	else
-		rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00});
+		rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
 	if (rc < 0)
 		goto ret;
 
@@ -1201,9 +1216,9 @@
 	mutex_lock(&priv->lock);
 
 	if (priv->firm_version < 0x0202)
-		rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00});
+		rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
 	else
-		rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00});
+		rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
 
 	priv->cur_fw.type = 0;	/* need firmware reload */
 
diff --git a/drivers/media/common/tuners/xc4000.c b/drivers/media/common/tuners/xc4000.c
index d218c1d..6839711 100644
--- a/drivers/media/common/tuners/xc4000.c
+++ b/drivers/media/common/tuners/xc4000.c
@@ -154,6 +154,8 @@
 #define XREG_SNR          0x06
 #define XREG_VERSION      0x07
 #define XREG_PRODUCT_ID   0x08
+#define XREG_SIGNAL_LEVEL 0x0A
+#define XREG_NOISE_LEVEL  0x0B
 
 /*
    Basic firmware description. This will remain with
@@ -486,6 +488,16 @@
 	return xc4000_readreg(priv, XREG_QUALITY, quality);
 }
 
+static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal)
+{
+	return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal);
+}
+
+static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise)
+{
+	return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise);
+}
+
 static u16 xc_wait_for_lock(struct xc4000_priv *priv)
 {
 	u16	lock_state = 0;
@@ -1089,6 +1101,8 @@
 	u32	hsync_freq_hz = 0;
 	u16	frame_lines;
 	u16	quality;
+	u16	signal = 0;
+	u16	noise = 0;
 	u8	hw_majorversion = 0, hw_minorversion = 0;
 	u8	fw_majorversion = 0, fw_minorversion = 0;
 
@@ -1119,6 +1133,12 @@
 
 	xc_get_quality(priv, &quality);
 	dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
+
+	xc_get_signal_level(priv, &signal);
+	dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal);
+
+	xc_get_noise_level(priv, &noise);
+	dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise);
 }
 
 static int xc4000_set_params(struct dvb_frontend *fe)
@@ -1432,6 +1452,71 @@
 	return ret;
 }
 
+static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
+{
+	struct xc4000_priv *priv = fe->tuner_priv;
+	u16 value = 0;
+	int rc;
+
+	mutex_lock(&priv->lock);
+	rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value);
+	mutex_unlock(&priv->lock);
+
+	if (rc < 0)
+		goto ret;
+
+	/* Informations from real testing of DVB-T and radio part,
+	   coeficient for one dB is 0xff.
+	 */
+	tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
+
+	/* all known digital modes */
+	if ((priv->video_standard == XC4000_DTV6) ||
+	    (priv->video_standard == XC4000_DTV7) ||
+	    (priv->video_standard == XC4000_DTV7_8) ||
+	    (priv->video_standard == XC4000_DTV8))
+		goto digital;
+
+	/* Analog mode has NOISE LEVEL important, signal
+	   depends only on gain of antenna and amplifiers,
+	   but it doesn't tell anything about real quality
+	   of reception.
+	 */
+	mutex_lock(&priv->lock);
+	rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value);
+	mutex_unlock(&priv->lock);
+
+	tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value);
+
+	/* highest noise level: 32dB */
+	if (value >= 0x2000) {
+		value = 0;
+	} else {
+		value = ~value << 3;
+	}
+
+	goto ret;
+
+	/* Digital mode has SIGNAL LEVEL important and real
+	   noise level is stored in demodulator registers.
+	 */
+digital:
+	/* best signal: -50dB */
+	if (value <= 0x3200) {
+		value = 0xffff;
+	/* minimum: -114dB - should be 0x7200 but real zero is 0x713A */
+	} else if (value >= 0x713A) {
+		value = 0;
+	} else {
+		value = ~(value - 0x3200) << 2;
+	}
+
+ret:
+	*strength = value;
+
+	return rc;
+}
+
 static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
 {
 	struct xc4000_priv *priv = fe->tuner_priv;
@@ -1559,6 +1644,7 @@
 	.set_params	   = xc4000_set_params,
 	.set_analog_params = xc4000_set_analog_params,
 	.get_frequency	   = xc4000_get_frequency,
+	.get_rf_strength   = xc4000_get_signal,
 	.get_bandwidth	   = xc4000_get_bandwidth,
 	.get_status	   = xc4000_get_status
 };
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index b15db4f..fbbe545 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -904,8 +904,11 @@
 {
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int i;
+	u32 delsys;
 
+	delsys = c->delivery_system;
 	memset(c, 0, sizeof(struct dtv_frontend_properties));
+	c->delivery_system = delsys;
 
 	c->state = DTV_CLEAR;
 
@@ -1009,25 +1012,6 @@
 	_DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
 	_DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
 
-	_DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0),
-	_DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0),
-	_DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0),
-	_DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0),
-	_DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0),
-	_DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
-
 	_DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
 	_DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
 
@@ -1413,6 +1397,15 @@
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	enum dvbv3_emulation_type type;
 
+	/*
+	 * It was reported that some old DVBv5 applications were
+	 * filling delivery_system with SYS_UNDEFINED. If this happens,
+	 * assume that the application wants to use the first supported
+	 * delivery system.
+	 */
+	if (c->delivery_system == SYS_UNDEFINED)
+	        c->delivery_system = fe->ops.delsys[0];
+
 	if (desired_system == SYS_UNDEFINED) {
 		/*
 		 * A DVBv3 call doesn't know what's the desired system.
@@ -1732,6 +1725,7 @@
 {
 	struct dvb_device *dvbdev = file->private_data;
 	struct dvb_frontend *fe = dvbdev->priv;
+	struct dvb_frontend_private *fepriv = fe->frontend_priv;
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int err = 0;
 
@@ -1798,9 +1792,14 @@
 
 		/*
 		 * Fills the cache out struct with the cache contents, plus
-		 * the data retrieved from get_frontend.
+		 * the data retrieved from get_frontend, if the frontend
+		 * is not idle. Otherwise, returns the cached content
 		 */
-		dtv_get_frontend(fe, NULL);
+		if (fepriv->state != FESTATE_IDLE) {
+			err = dtv_get_frontend(fe, NULL);
+			if (err < 0)
+				goto out;
+		}
 		for (i = 0; i < tvps->num; i++) {
 			err = dtv_property_process_get(fe, c, tvp + i, file);
 			if (err < 0)
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index d661929..1455e26 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -877,24 +877,18 @@
 	case ANYSEE_HW_508T2C: /* 20 */
 		/* E7 T2C */
 
+		if (state->fe_id)
+			break;
+
 		/* enable DVB-T/T2/C demod on IOE[5] */
 		ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
 		if (ret)
 			goto error;
 
-		if (state->fe_id == 0)  {
-			/* DVB-T/T2 */
-			adap->fe_adap[state->fe_id].fe =
-				dvb_attach(cxd2820r_attach,
-				&anysee_cxd2820r_config,
-				&adap->dev->i2c_adap, NULL);
-		} else {
-			/* DVB-C */
-			adap->fe_adap[state->fe_id].fe =
-				dvb_attach(cxd2820r_attach,
-				&anysee_cxd2820r_config,
-				&adap->dev->i2c_adap, adap->fe_adap[0].fe);
-		}
+		/* attach demod */
+		adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
+				&anysee_cxd2820r_config, &adap->dev->i2c_adap,
+				NULL);
 
 		state->has_ci = true;
 
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index 9bd6d51..7de125c 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -48,6 +48,8 @@
 	u8 disable_streaming_master_mode;
 	u32 fw_version;
 	u32 nb_packet_buffer_size;
+	int (*read_status)(struct dvb_frontend *, fe_status_t *);
+	int (*sleep)(struct dvb_frontend* fe);
 	u8 buf[255];
 };
 
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 2069994..070e82a 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -834,6 +834,7 @@
 
 module_usb_driver(dib0700_driver);
 
+MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
 MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
 MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
 MODULE_VERSION("1.0");
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 81ef4b4..f9e966a 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -3066,19 +3066,25 @@
 	}
 };
 
+static void stk7070pd_init(struct dvb_usb_device *dev)
+{
+	dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1);
+	msleep(10);
+	dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1);
+	dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1);
+	dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1);
+	dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0);
+
+	dib0700_ctrl_clock(dev, 72, 1);
+
+	msleep(10);
+	dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1);
+}
+
 static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
 {
-	dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
-	msleep(10);
-	dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
-	dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
-	dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
-	dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+	stk7070pd_init(adap->dev);
 
-	dib0700_ctrl_clock(adap->dev, 72, 1);
-
-	msleep(10);
-	dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
 	msleep(10);
 	dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
 
@@ -3099,6 +3105,77 @@
 	return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
 }
 
+static int novatd_read_status_override(struct dvb_frontend *fe,
+		fe_status_t *stat)
+{
+	struct dvb_usb_adapter *adap = fe->dvb->priv;
+	struct dvb_usb_device *dev = adap->dev;
+	struct dib0700_state *state = dev->priv;
+	int ret;
+
+	ret = state->read_status(fe, stat);
+
+	if (!ret)
+		dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT,
+				!!(*stat & FE_HAS_LOCK));
+
+	return ret;
+}
+
+static int novatd_sleep_override(struct dvb_frontend* fe)
+{
+	struct dvb_usb_adapter *adap = fe->dvb->priv;
+	struct dvb_usb_device *dev = adap->dev;
+	struct dib0700_state *state = dev->priv;
+
+	/* turn off LED */
+	dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0);
+
+	return state->sleep(fe);
+}
+
+/**
+ * novatd_frontend_attach - Nova-TD specific attach
+ *
+ * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
+ * information purposes.
+ */
+static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
+{
+	struct dvb_usb_device *dev = adap->dev;
+	struct dib0700_state *st = dev->priv;
+
+	if (adap->id == 0) {
+		stk7070pd_init(dev);
+
+		/* turn the power LED on, the other two off (just in case) */
+		dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0);
+		dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0);
+		dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1);
+
+		if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18,
+					     stk7070pd_dib7000p_config) != 0) {
+			err("%s: dib7000p_i2c_enumeration failed.  Cannot continue\n",
+			    __func__);
+			return -ENODEV;
+		}
+	}
+
+	adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap,
+			adap->id == 0 ? 0x80 : 0x82,
+			&stk7070pd_dib7000p_config[adap->id]);
+
+	if (adap->fe_adap[0].fe == NULL)
+		return -ENODEV;
+
+	st->read_status = adap->fe_adap[0].fe->ops.read_status;
+	adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override;
+	st->sleep = adap->fe_adap[0].fe->ops.sleep;
+	adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override;
+
+	return 0;
+}
+
 /* S5H1411 */
 static struct s5h1411_config pinnacle_801e_config = {
 	.output_mode   = S5H1411_PARALLEL_OUTPUT,
@@ -3870,6 +3947,57 @@
 				.pid_filter_count = 32,
 				.pid_filter       = stk70x0p_pid_filter,
 				.pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+				.frontend_attach  = novatd_frontend_attach,
+				.tuner_attach     = dib7070p_tuner_attach,
+
+				DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+			}},
+				.size_of_priv     = sizeof(struct dib0700_adapter_state),
+			}, {
+			.num_frontends = 1,
+			.fe = {{
+				.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+				.pid_filter_count = 32,
+				.pid_filter       = stk70x0p_pid_filter,
+				.pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+				.frontend_attach  = novatd_frontend_attach,
+				.tuner_attach     = dib7070p_tuner_attach,
+
+				DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+			}},
+				.size_of_priv     = sizeof(struct dib0700_adapter_state),
+			}
+		},
+
+		.num_device_descs = 1,
+		.devices = {
+			{   "Hauppauge Nova-TD Stick (52009)",
+				{ &dib0700_usb_id_table[35], NULL },
+				{ NULL },
+			},
+		},
+
+		.rc.core = {
+			.rc_interval      = DEFAULT_RC_INTERVAL,
+			.rc_codes         = RC_MAP_DIB0700_RC5_TABLE,
+			.module_name	  = "dib0700",
+			.rc_query         = dib0700_rc_query_old_firmware,
+			.allowed_protos   = RC_TYPE_RC5 |
+					    RC_TYPE_RC6 |
+					    RC_TYPE_NEC,
+			.change_protocol = dib0700_change_protocol,
+		},
+	}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+
+		.num_adapters = 2,
+		.adapter = {
+			{
+			.num_frontends = 1,
+			.fe = {{
+				.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+				.pid_filter_count = 32,
+				.pid_filter       = stk70x0p_pid_filter,
+				.pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
 				.frontend_attach  = stk7070pd_frontend_attach0,
 				.tuner_attach     = dib7070p_tuner_attach,
 
@@ -3892,7 +4020,7 @@
 			}
 		},
 
-		.num_device_descs = 6,
+		.num_device_descs = 5,
 		.devices = {
 			{   "DiBcom STK7070PD reference design",
 				{ &dib0700_usb_id_table[17], NULL },
@@ -3902,10 +4030,6 @@
 				{ &dib0700_usb_id_table[18], NULL },
 				{ NULL },
 			},
-			{   "Hauppauge Nova-TD Stick (52009)",
-				{ &dib0700_usb_id_table[35], NULL },
-				{ NULL },
-			},
 			{   "Hauppauge Nova-TD-500 (84xxx)",
 				{ &dib0700_usb_id_table[36], NULL },
 				{ NULL },
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 93e1b12..caae7f7 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -309,9 +309,14 @@
 
 static int cxd2820r_get_frontend(struct dvb_frontend *fe)
 {
+	struct cxd2820r_priv *priv = fe->demodulator_priv;
 	int ret;
 
 	dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+
+	if (priv->delivery_system == SYS_UNDEFINED)
+		return 0;
+
 	switch (fe->dtv_property_cache.delivery_system) {
 	case SYS_DVBT:
 		ret = cxd2820r_get_frontend_t(fe);
@@ -476,10 +481,10 @@
 	dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
 
 	/* switch between DVB-T and DVB-T2 when tune fails */
-	if (priv->last_tune_failed && (priv->delivery_system != SYS_DVBC_ANNEX_A)) {
+	if (priv->last_tune_failed) {
 		if (priv->delivery_system == SYS_DVBT)
 			c->delivery_system = SYS_DVBT2;
-		else
+		else if (priv->delivery_system == SYS_DVBT2)
 			c->delivery_system = SYS_DVBT;
 	}
 
@@ -492,6 +497,7 @@
 	/* frontend lock wait loop count */
 	switch (priv->delivery_system) {
 	case SYS_DVBT:
+	case SYS_DVBC_ANNEX_A:
 		i = 20;
 		break;
 	case SYS_DVBT2:
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index 9387770..af65d01 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -1195,7 +1195,7 @@
 
 	for (i = 0; i < 30 ; i++) {
 		ds3000_read_status(fe, &status);
-		if (status && FE_HAS_LOCK)
+		if (status & FE_HAS_LOCK)
 			break;
 
 		msleep(10);
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index 7fa3e47..fade566 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -402,7 +402,7 @@
 		[2] = 0x8e,	/* Layer C */
 	};
 
-	if (layer > ARRAY_SIZE(reg))
+	if (layer >= ARRAY_SIZE(reg))
 		return -EINVAL;
 	rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
 	if (rc < 0)
@@ -435,7 +435,7 @@
 		[2] = 0x8f,	/* Layer C */
 	};
 
-	if (layer > ARRAY_SIZE(reg))
+	if (layer >= ARRAY_SIZE(reg))
 		return -EINVAL;
 	rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
 	if (rc < 0)
@@ -470,7 +470,7 @@
 		[2] = 0x90,	/* Layer C */
 	};
 
-	if (layer > ARRAY_SIZE(reg))
+	if (layer >= ARRAY_SIZE(reg))
 		return -EINVAL;
 	rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
 	if (rc < 0)
@@ -494,7 +494,7 @@
 		[2] = 0x91,	/* Layer C */
 	};
 
-	if (layer > ARRAY_SIZE(reg))
+	if (layer >= ARRAY_SIZE(reg))
 		return -EINVAL;
 	rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
 	if (rc < 0)
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c
index 86da3d8..ad7c72e 100644
--- a/drivers/media/dvb/frontends/tda18271c2dd.c
+++ b/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -29,7 +29,6 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/version.h>
 #include <asm/div64.h>
 
 #include "dvb_frontend.h"
diff --git a/drivers/media/video/as3645a.c b/drivers/media/video/as3645a.c
index ec859a5..f241702 100644
--- a/drivers/media/video/as3645a.c
+++ b/drivers/media/video/as3645a.c
@@ -29,6 +29,7 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 
 #include <media/as3645a.h>
 #include <media/v4l2-ctrls.h>
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 14cb961..4bfd865 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -751,20 +751,10 @@
 
 	CX18_DEBUG_IOCTL("close() of %s\n", s->name);
 
-	v4l2_fh_del(fh);
-	v4l2_fh_exit(fh);
-
-	/* Easy case first: this stream was never claimed by us */
-	if (s->id != id->open_id) {
-		kfree(id);
-		return 0;
-	}
-
-	/* 'Unclaim' this stream */
-
-	/* Stop radio */
 	mutex_lock(&cx->serialize_lock);
-	if (id->type == CX18_ENC_STREAM_TYPE_RAD) {
+	/* Stop radio */
+	if (id->type == CX18_ENC_STREAM_TYPE_RAD &&
+			v4l2_fh_is_singular_file(filp)) {
 		/* Closing radio device, return to TV mode */
 		cx18_mute(cx);
 		/* Mark that the radio is no longer in use */
@@ -781,10 +771,14 @@
 		}
 		/* Done! Unmute and continue. */
 		cx18_unmute(cx);
-		cx18_release_stream(s);
-	} else {
-		cx18_stop_capture(id, 0);
 	}
+
+	v4l2_fh_del(fh);
+	v4l2_fh_exit(fh);
+
+	/* 'Unclaim' this stream */
+	if (s->id == id->open_id)
+		cx18_stop_capture(id, 0);
 	kfree(id);
 	mutex_unlock(&cx->serialize_lock);
 	return 0;
@@ -810,21 +804,15 @@
 
 	item->open_id = cx->open_id++;
 	filp->private_data = &item->fh;
+	v4l2_fh_add(&item->fh);
 
-	if (item->type == CX18_ENC_STREAM_TYPE_RAD) {
-		/* Try to claim this stream */
-		if (cx18_claim_stream(item, item->type)) {
-			/* No, it's already in use */
-			v4l2_fh_exit(&item->fh);
-			kfree(item);
-			return -EBUSY;
-		}
-
+	if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
+			v4l2_fh_is_singular_file(filp)) {
 		if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
 			if (atomic_read(&cx->ana_capturing) > 0) {
 				/* switching to radio while capture is
 				   in progress is not polite */
-				cx18_release_stream(s);
+				v4l2_fh_del(&item->fh);
 				v4l2_fh_exit(&item->fh);
 				kfree(item);
 				return -EBUSY;
@@ -842,7 +830,6 @@
 		/* Done! Unmute and continue. */
 		cx18_unmute(cx);
 	}
-	v4l2_fh_add(&item->fh);
 	return 0;
 }
 
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 919ed77..875a7ce 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1052,7 +1052,7 @@
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (dev == NULL) {
 		cx231xx_err(DRIVER_NAME ": out of memory!\n");
-		clear_bit(dev->devno, &cx231xx_devused);
+		clear_bit(nr, &cx231xx_devused);
 		return -ENOMEM;
 	}
 
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 3c01be9..19b5499 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -213,8 +213,8 @@
 		.portc		= CX23885_MPEG_DVB,
 		.tuner_type	= TUNER_XC4000,
 		.tuner_addr	= 0x61,
-		.radio_type	= TUNER_XC4000,
-		.radio_addr	= 0x61,
+		.radio_type	= UNSET,
+		.radio_addr	= ADDR_UNSET,
 		.input		= {{
 			.type	= CX23885_VMUX_TELEVISION,
 			.vmux	= CX25840_VIN2_CH1 |
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index af8a225..6835eb1 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -943,6 +943,11 @@
 
 			fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
 					&dev->i2c_bus[1].i2c_adap, &cfg);
+			if (!fe) {
+				printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+				       dev->name);
+				goto frontend_detach;
+			}
 		}
 		break;
 	case CX23885_BOARD_TBS_6920:
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 4bbf9bb9..c654bdc 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1550,7 +1550,6 @@
 	struct v4l2_control ctrl;
 	struct videobuf_dvb_frontend *vfe;
 	struct dvb_frontend *fe;
-	int err = 0;
 
 	struct analog_parameters params = {
 		.mode      = V4L2_TUNER_ANALOG_TV,
@@ -1572,8 +1571,10 @@
 		params.frequency, f->tuner, params.std);
 
 	vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1);
-	if (!vfe)
-		err = -EINVAL;
+	if (!vfe) {
+		mutex_unlock(&dev->lock);
+		return -EINVAL;
+	}
 
 	fe = vfe->dvb.frontend;
 
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 62c7ad0..cbd5d11 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1573,8 +1573,8 @@
 		.name           = "Pinnacle Hybrid PCTV",
 		.tuner_type     = TUNER_XC2028,
 		.tuner_addr     = 0x61,
-		.radio_type     = TUNER_XC2028,
-		.radio_addr     = 0x61,
+		.radio_type     = UNSET,
+		.radio_addr     = ADDR_UNSET,
 		.input          = { {
 			.type   = CX88_VMUX_TELEVISION,
 			.vmux   = 0,
@@ -1611,8 +1611,8 @@
 		.name           = "Leadtek TV2000 XP Global",
 		.tuner_type     = TUNER_XC2028,
 		.tuner_addr     = 0x61,
-		.radio_type     = TUNER_XC2028,
-		.radio_addr     = 0x61,
+		.radio_type     = UNSET,
+		.radio_addr     = ADDR_UNSET,
 		.input          = { {
 			.type   = CX88_VMUX_TELEVISION,
 			.vmux   = 0,
@@ -2115,8 +2115,8 @@
 		.name           = "Terratec Cinergy HT PCI MKII",
 		.tuner_type     = TUNER_XC2028,
 		.tuner_addr     = 0x61,
-		.radio_type     = TUNER_XC2028,
-		.radio_addr     = 0x61,
+		.radio_type     = UNSET,
+		.radio_addr     = ADDR_UNSET,
 		.input          = { {
 			.type   = CX88_VMUX_TELEVISION,
 			.vmux   = 0,
@@ -2154,9 +2154,9 @@
 	[CX88_BOARD_WINFAST_DTV1800H] = {
 		.name           = "Leadtek WinFast DTV1800 Hybrid",
 		.tuner_type     = TUNER_XC2028,
-		.radio_type     = TUNER_XC2028,
+		.radio_type     = UNSET,
 		.tuner_addr     = 0x61,
-		.radio_addr     = 0x61,
+		.radio_addr     = ADDR_UNSET,
 		/*
 		 * GPIO setting
 		 *
@@ -2195,9 +2195,9 @@
 	[CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
 		.name		= "Leadtek WinFast DTV1800 H (XC4000)",
 		.tuner_type	= TUNER_XC4000,
-		.radio_type	= TUNER_XC4000,
+		.radio_type	= UNSET,
 		.tuner_addr	= 0x61,
-		.radio_addr	= 0x61,
+		.radio_addr	= ADDR_UNSET,
 		/*
 		 * GPIO setting
 		 *
@@ -2236,9 +2236,9 @@
 	[CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
 		.name		= "Leadtek WinFast DTV2000 H PLUS",
 		.tuner_type	= TUNER_XC4000,
-		.radio_type	= TUNER_XC4000,
+		.radio_type	= UNSET,
 		.tuner_addr	= 0x61,
-		.radio_addr	= 0x61,
+		.radio_addr	= ADDR_UNSET,
 		/*
 		 * GPIO
 		 *   2: 1: mute audio
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 544af91..3949b7d 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -731,9 +731,6 @@
 
 	init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
 
-	/* start counting open_id at 1 */
-	itv->open_id = 1;
-
 	/* Initial settings */
 	itv->cxhdl.port = CX2341X_PORT_MEMORY;
 	itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 8f9cc17..06f3d78 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -332,7 +332,7 @@
 	const char *name;		/* name of the stream */
 	int type;			/* stream type */
 
-	u32 id;
+	struct v4l2_fh *fh;		/* pointer to the streaming filehandle */
 	spinlock_t qlock; 		/* locks access to the queues */
 	unsigned long s_flags;		/* status flags, see above */
 	int dma;			/* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -379,7 +379,6 @@
 
 struct ivtv_open_id {
 	struct v4l2_fh fh;
-	u32 open_id;                    /* unique ID for this file descriptor */
 	int type;                       /* stream type */
 	int yuv_frames;                 /* 1: started OUT_UDMA_YUV output mode */
 	struct ivtv *itv;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 38f0522..2cd6c89 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -50,16 +50,16 @@
 
 	if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
 		/* someone already claimed this stream */
-		if (s->id == id->open_id) {
+		if (s->fh == &id->fh) {
 			/* yes, this file descriptor did. So that's OK. */
 			return 0;
 		}
-		if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI ||
+		if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
 					 type == IVTV_ENC_STREAM_TYPE_VBI)) {
 			/* VBI is handled already internally, now also assign
 			   the file descriptor to this stream for external
 			   reading of the stream. */
-			s->id = id->open_id;
+			s->fh = &id->fh;
 			IVTV_DEBUG_INFO("Start Read VBI\n");
 			return 0;
 		}
@@ -67,7 +67,7 @@
 		IVTV_DEBUG_INFO("Stream %d is busy\n", type);
 		return -EBUSY;
 	}
-	s->id = id->open_id;
+	s->fh = &id->fh;
 	if (type == IVTV_DEC_STREAM_TYPE_VBI) {
 		/* Enable reinsertion interrupt */
 		ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -104,7 +104,7 @@
 	struct ivtv *itv = s->itv;
 	struct ivtv_stream *s_vbi;
 
-	s->id = -1;
+	s->fh = NULL;
 	if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
 		test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
 		/* this stream is still in use internally */
@@ -136,7 +136,7 @@
 		/* was already cleared */
 		return;
 	}
-	if (s_vbi->id != -1) {
+	if (s_vbi->fh) {
 		/* VBI stream still claimed by a file descriptor */
 		return;
 	}
@@ -268,11 +268,13 @@
 		}
 
 		/* wait for more data to arrive */
+		mutex_unlock(&itv->serialize_lock);
 		prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
 		/* New buffers might have become available before we were added to the waitqueue */
 		if (!s->q_full.buffers)
 			schedule();
 		finish_wait(&s->waitq, &wait);
+		mutex_lock(&itv->serialize_lock);
 		if (signal_pending(current)) {
 			/* return if a signal was received */
 			IVTV_DEBUG_INFO("User stopped %s\n", s->name);
@@ -357,7 +359,7 @@
 	size_t tot_written = 0;
 	int single_frame = 0;
 
-	if (atomic_read(&itv->capturing) == 0 && s->id == -1) {
+	if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
 		/* shouldn't happen */
 		IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
 		return -EIO;
@@ -507,9 +509,7 @@
 
 	IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
 
-	mutex_lock(&itv->serialize_lock);
 	rc = ivtv_start_capture(id);
-	mutex_unlock(&itv->serialize_lock);
 	if (rc)
 		return rc;
 	return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
@@ -584,9 +584,7 @@
 	set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
 
 	/* Start decoder (returns 0 if already started) */
-	mutex_lock(&itv->serialize_lock);
 	rc = ivtv_start_decoding(id, itv->speed);
-	mutex_unlock(&itv->serialize_lock);
 	if (rc) {
 		IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
 
@@ -627,11 +625,13 @@
 			break;
 		if (filp->f_flags & O_NONBLOCK)
 			return -EAGAIN;
+		mutex_unlock(&itv->serialize_lock);
 		prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
 		/* New buffers might have become free before we were added to the waitqueue */
 		if (!s->q_free.buffers)
 			schedule();
 		finish_wait(&s->waitq, &wait);
+		mutex_lock(&itv->serialize_lock);
 		if (signal_pending(current)) {
 			IVTV_DEBUG_INFO("User stopped %s\n", s->name);
 			return -EINTR;
@@ -686,12 +686,14 @@
 			if (mode == OUT_YUV)
 				ivtv_yuv_setup_stream_frame(itv);
 
+			mutex_unlock(&itv->serialize_lock);
 			prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
 			while (!(got_sig = signal_pending(current)) &&
 					test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
 				schedule();
 			}
 			finish_wait(&itv->dma_waitq, &wait);
+			mutex_lock(&itv->serialize_lock);
 			if (got_sig) {
 				IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
 				return -EINTR;
@@ -756,9 +758,7 @@
 	if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
 		int rc;
 
-		mutex_lock(&itv->serialize_lock);
 		rc = ivtv_start_capture(id);
-		mutex_unlock(&itv->serialize_lock);
 		if (rc) {
 			IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
 					s->name, rc);
@@ -808,7 +808,7 @@
 		     id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
 		    test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
 			/* Also used internally, don't stop capturing */
-			s->id = -1;
+			s->fh = NULL;
 		}
 		else {
 			ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -861,20 +861,9 @@
 
 	IVTV_DEBUG_FILE("close %s\n", s->name);
 
-	v4l2_fh_del(fh);
-	v4l2_fh_exit(fh);
-
-	/* Easy case first: this stream was never claimed by us */
-	if (s->id != id->open_id) {
-		kfree(id);
-		return 0;
-	}
-
-	/* 'Unclaim' this stream */
-
 	/* Stop radio */
-	mutex_lock(&itv->serialize_lock);
-	if (id->type == IVTV_ENC_STREAM_TYPE_RAD) {
+	if (id->type == IVTV_ENC_STREAM_TYPE_RAD &&
+			v4l2_fh_is_singular_file(filp)) {
 		/* Closing radio device, return to TV mode */
 		ivtv_mute(itv);
 		/* Mark that the radio is no longer in use */
@@ -890,13 +879,25 @@
 		if (atomic_read(&itv->capturing) > 0) {
 			/* Undo video mute */
 			ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
-				v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
-				(v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
+					v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
+					(v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
 		}
 		/* Done! Unmute and continue. */
 		ivtv_unmute(itv);
-		ivtv_release_stream(s);
-	} else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+	}
+
+	v4l2_fh_del(fh);
+	v4l2_fh_exit(fh);
+
+	/* Easy case first: this stream was never claimed by us */
+	if (s->fh != &id->fh) {
+		kfree(id);
+		return 0;
+	}
+
+	/* 'Unclaim' this stream */
+
+	if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
 		struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];
 
 		ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
@@ -911,21 +912,25 @@
 		ivtv_stop_capture(id, 0);
 	}
 	kfree(id);
-	mutex_unlock(&itv->serialize_lock);
 	return 0;
 }
 
-static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
+int ivtv_v4l2_open(struct file *filp)
 {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
 	struct video_device *vdev = video_devdata(filp);
-#endif
+	struct ivtv_stream *s = video_get_drvdata(vdev);
 	struct ivtv *itv = s->itv;
 	struct ivtv_open_id *item;
 	int res = 0;
 
 	IVTV_DEBUG_FILE("open %s\n", s->name);
 
+	if (ivtv_init_on_first_open(itv)) {
+		IVTV_ERR("Failed to initialize on device %s\n",
+			 video_device_node_name(vdev));
+		return -ENXIO;
+	}
+
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	/* Unless ivtv_fw_debug is set, error out if firmware dead. */
 	if (ivtv_fw_debug) {
@@ -966,31 +971,19 @@
 		return -ENOMEM;
 	}
 	v4l2_fh_init(&item->fh, s->vdev);
-	if (res < 0) {
-		v4l2_fh_exit(&item->fh);
-		kfree(item);
-		return res;
-	}
 	item->itv = itv;
 	item->type = s->type;
 
-	item->open_id = itv->open_id++;
 	filp->private_data = &item->fh;
+	v4l2_fh_add(&item->fh);
 
-	if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
-		/* Try to claim this stream */
-		if (ivtv_claim_stream(item, item->type)) {
-			/* No, it's already in use */
-			v4l2_fh_exit(&item->fh);
-			kfree(item);
-			return -EBUSY;
-		}
-
+	if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
+			v4l2_fh_is_singular_file(filp)) {
 		if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
 			if (atomic_read(&itv->capturing) > 0) {
 				/* switching to radio while capture is
 				   in progress is not polite */
-				ivtv_release_stream(s);
+				v4l2_fh_del(&item->fh);
 				v4l2_fh_exit(&item->fh);
 				kfree(item);
 				return -EBUSY;
@@ -1022,32 +1015,9 @@
 				1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
 		itv->yuv_info.stream_size = 0;
 	}
-	v4l2_fh_add(&item->fh);
 	return 0;
 }
 
-int ivtv_v4l2_open(struct file *filp)
-{
-	int res;
-	struct ivtv *itv = NULL;
-	struct ivtv_stream *s = NULL;
-	struct video_device *vdev = video_devdata(filp);
-
-	s = video_get_drvdata(vdev);
-	itv = s->itv;
-
-	mutex_lock(&itv->serialize_lock);
-	if (ivtv_init_on_first_open(itv)) {
-		IVTV_ERR("Failed to initialize on device %s\n",
-			 video_device_node_name(vdev));
-		mutex_unlock(&itv->serialize_lock);
-		return -ENXIO;
-	}
-	res = ivtv_serialized_open(s, filp);
-	mutex_unlock(&itv->serialize_lock);
-	return res;
-}
-
 void ivtv_mute(struct ivtv *itv)
 {
 	if (atomic_read(&itv->capturing))
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index ecafa69..c4bc481 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -179,6 +179,7 @@
 		ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
 
 		/* Wait for any DMA to finish */
+		mutex_unlock(&itv->serialize_lock);
 		prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
 		while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
 			got_sig = signal_pending(current);
@@ -188,6 +189,7 @@
 			schedule();
 		}
 		finish_wait(&itv->dma_waitq, &wait);
+		mutex_lock(&itv->serialize_lock);
 		if (got_sig)
 			return -EINTR;
 
@@ -1107,6 +1109,7 @@
 	 * happens within the first 100 lines of the top field.
 	 * Make 4 attempts to sync to the decoder before giving up.
 	 */
+	mutex_unlock(&itv->serialize_lock);
 	for (f = 0; f < 4; f++) {
 		prepare_to_wait(&itv->vsync_waitq, &wait,
 				TASK_UNINTERRUPTIBLE);
@@ -1115,6 +1118,7 @@
 		schedule_timeout(msecs_to_jiffies(25));
 	}
 	finish_wait(&itv->vsync_waitq, &wait);
+	mutex_lock(&itv->serialize_lock);
 
 	if (f == 4)
 		IVTV_WARN("Mode change failed to sync to decoder\n");
@@ -1842,8 +1846,7 @@
 	return 0;
 }
 
-static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
-		unsigned int cmd, unsigned long arg)
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct video_device *vfd = video_devdata(filp);
 	long ret;
@@ -1855,21 +1858,6 @@
 	return ret;
 }
 
-long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-	struct ivtv_open_id *id = fh2id(filp->private_data);
-	struct ivtv *itv = id->itv;
-	long res;
-
-	/* DQEVENT can block, so this should not run with the serialize lock */
-	if (cmd == VIDIOC_DQEVENT)
-		return ivtv_serialized_ioctl(itv, filp, cmd, arg);
-	mutex_lock(&itv->serialize_lock);
-	res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
-	mutex_unlock(&itv->serialize_lock);
-	return res;
-}
-
 static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
 	.vidioc_querycap    		    = ivtv_querycap,
 	.vidioc_s_audio     		    = ivtv_s_audio,
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 9c29e96..1b3b957 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -288,13 +288,13 @@
 			ivtv_process_vbi_data(itv, buf, 0, s->type);
 			s->q_dma.bytesused += buf->bytesused;
 		}
-		if (s->id == -1) {
+		if (s->fh == NULL) {
 			ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
 			return;
 		}
 	}
 	ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
-	if (s->id != -1)
+	if (s->fh)
 		wake_up(&s->waitq);
 }
 
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index e7794dc..c6e28b4 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -159,7 +159,6 @@
 		s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
 	spin_lock_init(&s->qlock);
 	init_waitqueue_head(&s->waitq);
-	s->id = -1;
 	s->sg_handle = IVTV_DMA_UNMAPPED;
 	ivtv_queue_init(&s->q_free);
 	ivtv_queue_init(&s->q_full);
@@ -214,6 +213,7 @@
 	s->vdev->fops = ivtv_stream_info[type].fops;
 	s->vdev->release = video_device_release;
 	s->vdev->tvnorms = V4L2_STD_ALL;
+	s->vdev->lock = &itv->serialize_lock;
 	set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
 	ivtv_set_funcs(s->vdev);
 	return 0;
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index dcbab6a..2ad65eb 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -1149,23 +1149,37 @@
 {
 	struct yuv_playback_info *yi = &itv->yuv_info;
 	struct ivtv_dma_frame dma_args;
+	int res;
 
 	ivtv_yuv_setup_stream_frame(itv);
 
 	/* We only need to supply source addresses for this */
 	dma_args.y_source = src;
 	dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
-	return ivtv_yuv_udma_frame(itv, &dma_args);
+	/* Wait for frame DMA. Note that serialize_lock is locked,
+	   so to allow other processes to access the driver while
+	   we are waiting unlock first and later lock again. */
+	mutex_unlock(&itv->serialize_lock);
+	res = ivtv_yuv_udma_frame(itv, &dma_args);
+	mutex_lock(&itv->serialize_lock);
+	return res;
 }
 
 /* IVTV_IOC_DMA_FRAME ioctl handler */
 int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
 {
-/*	IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
+	int res;
 
+/*	IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
 	ivtv_yuv_next_free(itv);
 	ivtv_yuv_setup_frame(itv, args);
-	return ivtv_yuv_udma_frame(itv, args);
+	/* Wait for frame DMA. Note that serialize_lock is locked,
+	   so to allow other processes to access the driver while
+	   we are waiting unlock first and later lock again. */
+	mutex_unlock(&itv->serialize_lock);
+	res = ivtv_yuv_udma_frame(itv, args);
+	mutex_lock(&itv->serialize_lock);
+	return res;
 }
 
 void ivtv_yuv_close(struct ivtv *itv)
@@ -1174,7 +1188,9 @@
 	int h_filter, v_filter_1, v_filter_2;
 
 	IVTV_DEBUG_YUV("ivtv_yuv_close\n");
+	mutex_unlock(&itv->serialize_lock);
 	ivtv_waitq(&itv->vsync_waitq);
+	mutex_lock(&itv->serialize_lock);
 
 	yi->running = 0;
 	atomic_set(&yi->next_dma_frame, -1);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index a277f95..1fb7d5b 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1042,7 +1042,8 @@
 	strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
 	strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
 	cap->bus_info[0] = '\0';
-	cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+	cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+		V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
 
 	return 0;
 }
@@ -1825,7 +1826,9 @@
 	ovid = &vout->vid_info;
 	ovl = ovid->overlays[0];
 
-	a->flags = 0x0;
+	/* The video overlay must stay within the framebuffer and can't be
+	   positioned independently. */
+	a->flags = V4L2_FBUF_FLAG_OVERLAY;
 	a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
 		| V4L2_FBUF_CAP_SRC_CHROMAKEY;
 
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 905d41d..1f506fd 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -104,47 +104,16 @@
 
 /****************************************************************************/
 
-static int _send_control_msg(struct pwc_device *pdev,
-	u8 request, u16 value, int index, void *buf, int buflen)
-{
-	int rc;
-	void *kbuf = NULL;
-
-	if (buflen) {
-		kbuf = kmemdup(buf, buflen, GFP_KERNEL); /* not allowed on stack */
-		if (kbuf == NULL)
-			return -ENOMEM;
-	}
-
-	rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
-		request,
-		USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		value,
-		index,
-		kbuf, buflen, USB_CTRL_SET_TIMEOUT);
-
-	kfree(kbuf);
-	return rc;
-}
-
 static int recv_control_msg(struct pwc_device *pdev,
-	u8 request, u16 value, void *buf, int buflen)
+	u8 request, u16 value, int recv_count)
 {
 	int rc;
-	void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
-
-	if (kbuf == NULL)
-		return -ENOMEM;
 
 	rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0),
 		request,
 		USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		value,
-		pdev->vcinterface,
-		kbuf, buflen, USB_CTRL_GET_TIMEOUT);
-	memcpy(buf, kbuf, buflen);
-	kfree(kbuf);
-
+		value, pdev->vcinterface,
+		pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT);
 	if (rc < 0)
 		PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
 			  rc, request, value);
@@ -152,27 +121,39 @@
 }
 
 static inline int send_video_command(struct pwc_device *pdev,
-	int index, void *buf, int buflen)
+	int index, const unsigned char *buf, int buflen)
 {
-	return _send_control_msg(pdev,
-		SET_EP_STREAM_CTL,
-		VIDEO_OUTPUT_CONTROL_FORMATTER,
-		index,
-		buf, buflen);
+	int rc;
+
+	memcpy(pdev->ctrl_buf, buf, buflen);
+
+	rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+			SET_EP_STREAM_CTL,
+			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			VIDEO_OUTPUT_CONTROL_FORMATTER, index,
+			pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT);
+	if (rc >= 0)
+		memcpy(pdev->cmd_buf, buf, buflen);
+	else
+		PWC_ERROR("send_video_command error %d\n", rc);
+
+	return rc;
 }
 
 int send_control_msg(struct pwc_device *pdev,
 	u8 request, u16 value, void *buf, int buflen)
 {
-	return _send_control_msg(pdev,
-		request, value, pdev->vcinterface, buf, buflen);
+	return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+			request,
+			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			value, pdev->vcinterface,
+			buf, buflen, USB_CTRL_SET_TIMEOUT);
 }
 
-static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
-			       int *compression)
+static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
+			       int frames, int *compression, int send_to_cam)
 {
-	unsigned char buf[3];
-	int ret, fps;
+	int fps, ret = 0;
 	struct Nala_table_entry *pEntry;
 	int frames2frames[31] =
 	{ /* closest match of framerate */
@@ -194,30 +175,29 @@
 	  7              /* 30    */
 	};
 
-	if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25)
+	if (size < 0 || size > PSZ_CIF)
 		return -EINVAL;
+	if (frames < 4)
+		frames = 4;
+	else if (frames > 25)
+		frames = 25;
 	frames = frames2frames[frames];
 	fps = frames2table[frames];
 	pEntry = &Nala_table[size][fps];
 	if (pEntry->alternate == 0)
 		return -EINVAL;
 
-	memcpy(buf, pEntry->mode, 3);
-	ret = send_video_command(pdev, pdev->vendpoint, buf, 3);
-	if (ret < 0) {
-		PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
+	if (send_to_cam)
+		ret = send_video_command(pdev, pdev->vendpoint,
+					 pEntry->mode, 3);
+	if (ret < 0)
 		return ret;
-	}
-	if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
-		ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
-		if (ret < 0)
-			return ret;
-	}
 
-	pdev->cmd_len = 3;
-	memcpy(pdev->cmd_buf, buf, 3);
+	if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420)
+		pwc_dec1_init(pdev, pEntry->mode);
 
 	/* Set various parameters */
+	pdev->pixfmt = pixfmt;
 	pdev->vframes = frames;
 	pdev->valternate = pEntry->alternate;
 	pdev->width  = pwc_image_sizes[size][0];
@@ -243,18 +223,20 @@
 }
 
 
-static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
-	int *compression)
+static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
+				int frames, int *compression, int send_to_cam)
 {
-	unsigned char buf[13];
 	const struct Timon_table_entry *pChoose;
-	int ret, fps;
+	int fps, ret = 0;
 
-	if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
-	    *compression < 0 || *compression > 3)
+	if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
 		return -EINVAL;
-	if (size == PSZ_VGA && frames > 15)
-		return -EINVAL;
+	if (frames < 5)
+		frames = 5;
+	else if (size == PSZ_VGA && frames > 15)
+		frames = 15;
+	else if (frames > 30)
+		frames = 30;
 	fps = (frames / 5) - 1;
 
 	/* Find a supported framerate with progressively higher compression */
@@ -268,22 +250,18 @@
 	if (pChoose == NULL || pChoose->alternate == 0)
 		return -ENOENT; /* Not supported. */
 
-	memcpy(buf, pChoose->mode, 13);
-	ret = send_video_command(pdev, pdev->vendpoint, buf, 13);
+	if (send_to_cam)
+		ret = send_video_command(pdev, pdev->vendpoint,
+					 pChoose->mode, 13);
 	if (ret < 0)
 		return ret;
 
-	if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
-		ret = pwc_dec23_init(pdev, pdev->type, buf);
-		if (ret < 0)
-			return ret;
-	}
-
-	pdev->cmd_len = 13;
-	memcpy(pdev->cmd_buf, buf, 13);
+	if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+		pwc_dec23_init(pdev, pChoose->mode);
 
 	/* Set various parameters */
-	pdev->vframes = frames;
+	pdev->pixfmt = pixfmt;
+	pdev->vframes = (fps + 1) * 5;
 	pdev->valternate = pChoose->alternate;
 	pdev->width  = pwc_image_sizes[size][0];
 	pdev->height = pwc_image_sizes[size][1];
@@ -296,18 +274,20 @@
 }
 
 
-static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
-	int *compression)
+static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
+				int frames, int *compression, int send_to_cam)
 {
 	const struct Kiara_table_entry *pChoose = NULL;
-	int fps, ret;
-	unsigned char buf[12];
+	int fps, ret = 0;
 
-	if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
-	    *compression < 0 || *compression > 3)
+	if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
 		return -EINVAL;
-	if (size == PSZ_VGA && frames > 15)
-		return -EINVAL;
+	if (frames < 5)
+		frames = 5;
+	else if (size == PSZ_VGA && frames > 15)
+		frames = 15;
+	else if (frames > 30)
+		frames = 30;
 	fps = (frames / 5) - 1;
 
 	/* Find a supported framerate with progressively higher compression */
@@ -320,26 +300,18 @@
 	if (pChoose == NULL || pChoose->alternate == 0)
 		return -ENOENT; /* Not supported. */
 
-	PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate);
-
-	/* usb_control_msg won't take staticly allocated arrays as argument?? */
-	memcpy(buf, pChoose->mode, 12);
-
 	/* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
-	ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12);
+	if (send_to_cam)
+		ret = send_video_command(pdev, 4, pChoose->mode, 12);
 	if (ret < 0)
 		return ret;
 
-	if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
-		ret = pwc_dec23_init(pdev, pdev->type, buf);
-		if (ret < 0)
-			return ret;
-	}
+	if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+		pwc_dec23_init(pdev, pChoose->mode);
 
-	pdev->cmd_len = 12;
-	memcpy(pdev->cmd_buf, buf, 12);
 	/* All set and go */
-	pdev->vframes = frames;
+	pdev->pixfmt = pixfmt;
+	pdev->vframes = (fps + 1) * 5;
 	pdev->valternate = pChoose->alternate;
 	pdev->width  = pwc_image_sizes[size][0];
 	pdev->height = pwc_image_sizes[size][1];
@@ -354,22 +326,24 @@
 }
 
 int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
-	int frames, int *compression)
+	int pixfmt, int frames, int *compression, int send_to_cam)
 {
 	int ret, size;
 
-	PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt);
+	PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n",
+		       width, height, frames, pixfmt);
 	size = pwc_get_size(pdev, width, height);
 	PWC_TRACE("decode_size = %d.\n", size);
 
 	if (DEVICE_USE_CODEC1(pdev->type)) {
-		ret = set_video_mode_Nala(pdev, size, frames, compression);
-
+		ret = set_video_mode_Nala(pdev, size, pixfmt, frames,
+					  compression, send_to_cam);
 	} else if (DEVICE_USE_CODEC3(pdev->type)) {
-		ret = set_video_mode_Kiara(pdev, size, frames, compression);
-
+		ret = set_video_mode_Kiara(pdev, size, pixfmt, frames,
+					   compression, send_to_cam);
 	} else {
-		ret = set_video_mode_Timon(pdev, size, frames, compression);
+		ret = set_video_mode_Timon(pdev, size, pixfmt, frames,
+					   compression, send_to_cam);
 	}
 	if (ret < 0) {
 		PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
@@ -436,13 +410,12 @@
 int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
 {
 	int ret;
-	u8 buf;
 
-	ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+	ret = recv_control_msg(pdev, request, value, 1);
 	if (ret < 0)
 		return ret;
 
-	*data = buf;
+	*data = pdev->ctrl_buf[0];
 	return 0;
 }
 
@@ -450,7 +423,8 @@
 {
 	int ret;
 
-	ret = send_control_msg(pdev, request, value, &data, sizeof(data));
+	pdev->ctrl_buf[0] = data;
+	ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1);
 	if (ret < 0)
 		return ret;
 
@@ -460,37 +434,34 @@
 int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
 {
 	int ret;
-	s8 buf;
 
-	ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+	ret = recv_control_msg(pdev, request, value, 1);
 	if (ret < 0)
 		return ret;
 
-	*data = buf;
+	*data = ((s8 *)pdev->ctrl_buf)[0];
 	return 0;
 }
 
 int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
 {
 	int ret;
-	u8 buf[2];
 
-	ret = recv_control_msg(pdev, request, value, buf, sizeof(buf));
+	ret = recv_control_msg(pdev, request, value, 2);
 	if (ret < 0)
 		return ret;
 
-	*data = (buf[1] << 8) | buf[0];
+	*data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0];
 	return 0;
 }
 
 int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
 {
 	int ret;
-	u8 buf[2];
 
-	buf[0] = data & 0xff;
-	buf[1] = data >> 8;
-	ret = send_control_msg(pdev, request, value, buf, sizeof(buf));
+	pdev->ctrl_buf[0] = data & 0xff;
+	pdev->ctrl_buf[1] = data >> 8;
+	ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2);
 	if (ret < 0)
 		return ret;
 
@@ -511,7 +482,6 @@
 /* POWER */
 void pwc_camera_power(struct pwc_device *pdev, int power)
 {
-	char buf;
 	int r;
 
 	if (!pdev->power_save)
@@ -521,13 +491,11 @@
 		return;	/* Not supported by Nala or Timon < release 6 */
 
 	if (power)
-		buf = 0x00; /* active */
+		pdev->ctrl_buf[0] = 0x00; /* active */
 	else
-		buf = 0xFF; /* power save */
-	r = send_control_msg(pdev,
-		SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
-		&buf, sizeof(buf));
-
+		pdev->ctrl_buf[0] = 0xFF; /* power save */
+	r = send_control_msg(pdev, SET_STATUS_CTL,
+		SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1);
 	if (r < 0)
 		PWC_ERROR("Failed to power %s camera (%d)\n",
 			  power ? "on" : "off", r);
@@ -535,7 +503,6 @@
 
 int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
 {
-	unsigned char buf[2];
 	int r;
 
 	if (pdev->type < 730)
@@ -551,11 +518,11 @@
 	if (off_value > 0xff)
 		off_value = 0xff;
 
-	buf[0] = on_value;
-	buf[1] = off_value;
+	pdev->ctrl_buf[0] = on_value;
+	pdev->ctrl_buf[1] = off_value;
 
 	r = send_control_msg(pdev,
-		SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
+		SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2);
 	if (r < 0)
 		PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
 
@@ -565,7 +532,6 @@
 #ifdef CONFIG_USB_PWC_DEBUG
 int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
 {
-	unsigned char buf;
 	int ret = -1, request;
 
 	if (pdev->type < 675)
@@ -575,14 +541,13 @@
 	else
 		request = SENSOR_TYPE_FORMATTER2;
 
-	ret = recv_control_msg(pdev,
-		GET_STATUS_CTL, request, &buf, sizeof(buf));
+	ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1);
 	if (ret < 0)
 		return ret;
 	if (pdev->type < 675)
-		*sensor = buf | 0x100;
+		*sensor = pdev->ctrl_buf[0] | 0x100;
 	else
-		*sensor = buf;
+		*sensor = pdev->ctrl_buf[0];
 	return 0;
 }
 #endif
diff --git a/drivers/media/video/pwc/pwc-dec1.c b/drivers/media/video/pwc/pwc-dec1.c
index be0e02c..e899036 100644
--- a/drivers/media/video/pwc/pwc-dec1.c
+++ b/drivers/media/video/pwc/pwc-dec1.c
@@ -22,19 +22,11 @@
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
-#include "pwc-dec1.h"
+#include "pwc.h"
 
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer)
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd)
 {
-	struct pwc_dec1_private *pdec;
+	struct pwc_dec1_private *pdec = &pdev->dec1;
 
-	if (pwc->decompress_data == NULL) {
-		pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
-		if (pdec == NULL)
-			return -ENOMEM;
-		pwc->decompress_data = pdec;
-	}
-	pdec = pwc->decompress_data;
-
-	return 0;
+	pdec->version = pdev->release;
 }
diff --git a/drivers/media/video/pwc/pwc-dec1.h b/drivers/media/video/pwc/pwc-dec1.h
index a57d860..c565ef8 100644
--- a/drivers/media/video/pwc/pwc-dec1.h
+++ b/drivers/media/video/pwc/pwc-dec1.h
@@ -25,13 +25,15 @@
 #ifndef PWC_DEC1_H
 #define PWC_DEC1_H
 
-#include "pwc.h"
+#include <linux/mutex.h>
+
+struct pwc_device;
 
 struct pwc_dec1_private
 {
 	int version;
 };
 
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer);
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd);
 
 #endif
diff --git a/drivers/media/video/pwc/pwc-dec23.c b/drivers/media/video/pwc/pwc-dec23.c
index 2c67091..3792fed 100644
--- a/drivers/media/video/pwc/pwc-dec23.c
+++ b/drivers/media/video/pwc/pwc-dec23.c
@@ -294,22 +294,17 @@
 
 
 /* If the type or the command change, we rebuild the lookup table */
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd)
 {
 	int flags, version, shift, i;
-	struct pwc_dec23_private *pdec;
-
-	if (pwc->decompress_data == NULL) {
-		pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
-		if (pdec == NULL)
-			return -ENOMEM;
-		pwc->decompress_data = pdec;
-	}
-	pdec = pwc->decompress_data;
+	struct pwc_dec23_private *pdec = &pdev->dec23;
 
 	mutex_init(&pdec->lock);
 
-	if (DEVICE_USE_CODEC3(type)) {
+	if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2])
+		return;
+
+	if (DEVICE_USE_CODEC3(pdev->type)) {
 		flags = cmd[2] & 0x18;
 		if (flags == 8)
 			pdec->nbits = 7;	/* More bits, mean more bits to encode the stream, but better quality */
@@ -356,7 +351,8 @@
 		pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255;
 #endif
 
-	return 0;
+	pdec->last_cmd = cmd[2];
+	pdec->last_cmd_valid = 1;
 }
 
 /*
@@ -659,12 +655,12 @@
  * src: raw data
  * dst: image output
  */
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_decompress(struct pwc_device *pdev,
 			  const void *src,
 			  void *dst)
 {
 	int bandlines_left, bytes_per_block;
-	struct pwc_dec23_private *pdec = pwc->decompress_data;
+	struct pwc_dec23_private *pdec = &pdev->dec23;
 
 	/* YUV420P image format */
 	unsigned char *pout_planar_y;
@@ -674,23 +670,22 @@
 
 	mutex_lock(&pdec->lock);
 
-	bandlines_left = pwc->height / 4;
-	bytes_per_block = pwc->width * 4;
-	plane_size = pwc->height * pwc->width;
+	bandlines_left = pdev->height / 4;
+	bytes_per_block = pdev->width * 4;
+	plane_size = pdev->height * pdev->width;
 
 	pout_planar_y = dst;
 	pout_planar_u = dst + plane_size;
 	pout_planar_v = dst + plane_size + plane_size / 4;
 
 	while (bandlines_left--) {
-		DecompressBand23(pwc->decompress_data,
-				 src,
+		DecompressBand23(pdec, src,
 				 pout_planar_y, pout_planar_u, pout_planar_v,
-				 pwc->width, pwc->width);
-		src += pwc->vbandlength;
+				 pdev->width, pdev->width);
+		src += pdev->vbandlength;
 		pout_planar_y += bytes_per_block;
-		pout_planar_u += pwc->width;
-		pout_planar_v += pwc->width;
+		pout_planar_u += pdev->width;
+		pout_planar_v += pdev->width;
 	}
 	mutex_unlock(&pdec->lock);
 }
diff --git a/drivers/media/video/pwc/pwc-dec23.h b/drivers/media/video/pwc/pwc-dec23.h
index d64a3c2..c655b1c 100644
--- a/drivers/media/video/pwc/pwc-dec23.h
+++ b/drivers/media/video/pwc/pwc-dec23.h
@@ -25,17 +25,20 @@
 #ifndef PWC_DEC23_H
 #define PWC_DEC23_H
 
-#include "pwc.h"
+struct pwc_device;
 
 struct pwc_dec23_private
 {
 	struct mutex lock;
 
+	unsigned char last_cmd, last_cmd_valid;
+
   unsigned int scalebits;
   unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */
 
   unsigned int reservoir;
   unsigned int nbits_in_reservoir;
+
   const unsigned char *stream;
   int temp_colors[16];
 
@@ -51,8 +54,8 @@
 
 };
 
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd);
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd);
+void pwc_dec23_decompress(struct pwc_device *pdev,
 			  const void *src,
 			  void *dst);
 #endif
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 943d37a..122fbd0 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -128,18 +128,11 @@
 #define MAX_DEV_HINTS	20
 #define MAX_ISOC_ERRORS	20
 
-static int default_fps = 10;
 #ifdef CONFIG_USB_PWC_DEBUG
 	int pwc_trace = PWC_DEBUG_LEVEL;
 #endif
 static int power_save = -1;
-static int led_on = 100, led_off; /* defaults to LED that is on while in use */
-static struct {
-	int type;
-	char serial_number[30];
-	int device_node;
-	struct pwc_device *pdev;
-} device_hint[MAX_DEV_HINTS];
+static int leds[2] = { 100, 0 };
 
 /***/
 
@@ -386,8 +379,8 @@
 retry:
 	/* We first try with low compression and then retry with a higher
 	   compression setting if there is not enough bandwidth. */
-	ret = pwc_set_video_mode(pdev, pdev->width, pdev->height,
-				 pdev->vframes, &compression);
+	ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+				 pdev->vframes, &compression, 1);
 
 	/* Get the current alternate interface, adjust packet size */
 	intf = usb_ifnum_to_if(udev, 0);
@@ -597,23 +590,9 @@
 static void pwc_video_release(struct v4l2_device *v)
 {
 	struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
-	int hint;
-
-	/* search device_hint[] table if we occupy a slot, by any chance */
-	for (hint = 0; hint < MAX_DEV_HINTS; hint++)
-		if (device_hint[hint].pdev == pdev)
-			device_hint[hint].pdev = NULL;
-
-	/* Free intermediate decompression buffer & tables */
-	if (pdev->decompress_data != NULL) {
-		PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
-				 pdev->decompress_data);
-		kfree(pdev->decompress_data);
-		pdev->decompress_data = NULL;
-	}
 
 	v4l2_ctrl_handler_free(&pdev->ctrl_handler);
-
+	kfree(pdev->ctrl_buf);
 	kfree(pdev);
 }
 
@@ -758,7 +737,7 @@
 
 	/* Turn on camera and set LEDS on */
 	pwc_camera_power(pdev, 1);
-	pwc_set_leds(pdev, led_on, led_off);
+	pwc_set_leds(pdev, leds[0], leds[1]);
 
 	r = pwc_isoc_init(pdev);
 	if (r) {
@@ -813,10 +792,9 @@
 	struct usb_device *udev = interface_to_usbdev(intf);
 	struct pwc_device *pdev = NULL;
 	int vendor_id, product_id, type_id;
-	int hint, rc;
+	int rc;
 	int features = 0;
 	int compression = 0;
-	int video_nr = -1; /* default: use next available device */
 	int my_power_save = power_save;
 	char serial_number[30], *name;
 
@@ -1076,7 +1054,6 @@
 		return -ENOMEM;
 	}
 	pdev->type = type_id;
-	pdev->vframes = default_fps;
 	pdev->features = features;
 	pwc_construct(pdev); /* set min/max sizes correct */
 
@@ -1107,24 +1084,14 @@
 	pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
 	PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
 
-	/* Now search device_hint[] table for a match, so we can hint a node number. */
-	for (hint = 0; hint < MAX_DEV_HINTS; hint++) {
-		if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) &&
-		     (device_hint[hint].pdev == NULL)) {
-			/* so far, so good... try serial number */
-			if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) {
-				/* match! */
-				video_nr = device_hint[hint].device_node;
-				PWC_DEBUG_PROBE("Found hint, will try to register as /dev/video%d\n", video_nr);
-				break;
-			}
-		}
+	/* Allocate USB command buffers */
+	pdev->ctrl_buf = kmalloc(sizeof(pdev->cmd_buf), GFP_KERNEL);
+	if (!pdev->ctrl_buf) {
+		PWC_ERROR("Oops, could not allocate memory for pwc_device.\n");
+		rc = -ENOMEM;
+		goto err_free_mem;
 	}
 
-	/* occupy slot */
-	if (hint < MAX_DEV_HINTS)
-		device_hint[hint].pdev = pdev;
-
 #ifdef CONFIG_USB_PWC_DEBUG
 	/* Query sensor type */
 	if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
@@ -1138,8 +1105,8 @@
 	pwc_set_leds(pdev, 0, 0);
 
 	/* Setup intial videomode */
-	rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, pdev->vframes,
-				&compression);
+	rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
+				V4L2_PIX_FMT_YUV420, 30, &compression, 1);
 	if (rc)
 		goto err_free_mem;
 
@@ -1164,7 +1131,7 @@
 	pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
 	pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
 
-	rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+	rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
 	if (rc < 0) {
 		PWC_ERROR("Failed to register as video device (%d).\n", rc);
 		goto err_unregister_v4l2_dev;
@@ -1207,8 +1174,7 @@
 err_free_controls:
 	v4l2_ctrl_handler_free(&pdev->ctrl_handler);
 err_free_mem:
-	if (hint < MAX_DEV_HINTS)
-		device_hint[hint].pdev = NULL;
+	kfree(pdev->ctrl_buf);
 	kfree(pdev);
 	return rc;
 }
@@ -1243,27 +1209,19 @@
  * Initialization code & module stuff
  */
 
-static int fps;
-static int leds[2] = { -1, -1 };
 static unsigned int leds_nargs;
-static char *dev_hint[MAX_DEV_HINTS];
-static unsigned int dev_hint_nargs;
 
-module_param(fps, int, 0444);
 #ifdef CONFIG_USB_PWC_DEBUG
 module_param_named(trace, pwc_trace, int, 0644);
 #endif
 module_param(power_save, int, 0644);
 module_param_array(leds, int, &leds_nargs, 0444);
-module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
 
-MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
 #ifdef CONFIG_USB_PWC_DEBUG
 MODULE_PARM_DESC(trace, "For debugging purposes");
 #endif
 MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
 MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
-MODULE_PARM_DESC(dev_hint, "Device node hints");
 
 MODULE_DESCRIPTION("Philips & OEM USB webcam driver");
 MODULE_AUTHOR("Luc Saillard <luc@saillard.org>");
@@ -1273,114 +1231,13 @@
 
 static int __init usb_pwc_init(void)
 {
-	int i;
-
-#ifdef CONFIG_USB_PWC_DEBUG
-	PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
-	PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
-	PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
-	PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
-
-	if (pwc_trace >= 0) {
-		PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
-	}
-#endif
-
-	if (fps) {
-		if (fps < 4 || fps > 30) {
-			PWC_ERROR("Framerate out of bounds (4-30).\n");
-			return -EINVAL;
-		}
-		default_fps = fps;
-		PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
-	}
-
-	if (leds[0] >= 0)
-		led_on = leds[0];
-	if (leds[1] >= 0)
-		led_off = leds[1];
-
-	/* Big device node whoopla. Basically, it allows you to assign a
-	   device node (/dev/videoX) to a camera, based on its type
-	   & serial number. The format is [type[.serialnumber]:]node.
-
-	   Any camera that isn't matched by these rules gets the next
-	   available free device node.
-	 */
-	for (i = 0; i < MAX_DEV_HINTS; i++) {
-		char *s, *colon, *dot;
-
-		/* This loop also initializes the array */
-		device_hint[i].pdev = NULL;
-		s = dev_hint[i];
-		if (s != NULL && *s != '\0') {
-			device_hint[i].type = -1; /* wildcard */
-			strcpy(device_hint[i].serial_number, "*");
-
-			/* parse string: chop at ':' & '/' */
-			colon = dot = s;
-			while (*colon != '\0' && *colon != ':')
-				colon++;
-			while (*dot != '\0' && *dot != '.')
-				dot++;
-			/* Few sanity checks */
-			if (*dot != '\0' && dot > colon) {
-				PWC_ERROR("Malformed camera hint: the colon must be after the dot.\n");
-				return -EINVAL;
-			}
-
-			if (*colon == '\0') {
-				/* No colon */
-				if (*dot != '\0') {
-					PWC_ERROR("Malformed camera hint: no colon + device node given.\n");
-					return -EINVAL;
-				}
-				else {
-					/* No type or serial number specified, just a number. */
-					device_hint[i].device_node =
-						simple_strtol(s, NULL, 10);
-				}
-			}
-			else {
-				/* There's a colon, so we have at least a type and a device node */
-				device_hint[i].type =
-					simple_strtol(s, NULL, 10);
-				device_hint[i].device_node =
-					simple_strtol(colon + 1, NULL, 10);
-				if (*dot != '\0') {
-					/* There's a serial number as well */
-					int k;
-
-					dot++;
-					k = 0;
-					while (*dot != ':' && k < 29) {
-						device_hint[i].serial_number[k++] = *dot;
-						dot++;
-					}
-					device_hint[i].serial_number[k] = '\0';
-				}
-			}
-			PWC_TRACE("device_hint[%d]:\n", i);
-			PWC_TRACE("  type    : %d\n", device_hint[i].type);
-			PWC_TRACE("  serial# : %s\n", device_hint[i].serial_number);
-			PWC_TRACE("  node    : %d\n", device_hint[i].device_node);
-		}
-		else
-			device_hint[i].type = 0; /* not filled */
-	} /* ..for MAX_DEV_HINTS */
-
-	PWC_DEBUG_PROBE("Registering driver at address 0x%p.\n", &pwc_driver);
 	return usb_register(&pwc_driver);
 }
 
 static void __exit usb_pwc_exit(void)
 {
-	PWC_DEBUG_MODULE("Deregistering driver.\n");
 	usb_deregister(&pwc_driver);
-	PWC_INFO("Philips webcam module removed.\n");
 }
 
 module_init(usb_pwc_init);
 module_exit(usb_pwc_exit);
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 23a55b5..9be5adf 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -90,5 +90,4 @@
 		pdev->frame_header_size = 0;
 		pdev->frame_trailer_size = 0;
 	}
-	pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
 }
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 80e2584..f495eeb 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -493,16 +493,11 @@
 			(pixelformat>>24)&255);
 
 	ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height,
-				 pdev->vframes, &compression);
+				 pixelformat, 30, &compression, 0);
 
 	PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
 
-	if (ret == 0) {
-		pdev->pixfmt = pixelformat;
-		pwc_vidioc_fill_fmt(f, pdev->width, pdev->height,
-				    pdev->pixfmt);
-	}
-
+	pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
 leave:
 	mutex_unlock(&pdev->udevlock);
 	return ret;
@@ -777,33 +772,33 @@
 static int pwc_set_motor(struct pwc_device *pdev)
 {
 	int ret;
-	u8 buf[4];
 
-	buf[0] = 0;
+	pdev->ctrl_buf[0] = 0;
 	if (pdev->motor_pan_reset->is_new)
-		buf[0] |= 0x01;
+		pdev->ctrl_buf[0] |= 0x01;
 	if (pdev->motor_tilt_reset->is_new)
-		buf[0] |= 0x02;
+		pdev->ctrl_buf[0] |= 0x02;
 	if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
 		ret = send_control_msg(pdev, SET_MPT_CTL,
-				       PT_RESET_CONTROL_FORMATTER, buf, 1);
+				       PT_RESET_CONTROL_FORMATTER,
+				       pdev->ctrl_buf, 1);
 		if (ret < 0)
 			return ret;
 	}
 
-	memset(buf, 0, sizeof(buf));
+	memset(pdev->ctrl_buf, 0, 4);
 	if (pdev->motor_pan->is_new) {
-		buf[0] = pdev->motor_pan->val & 0xFF;
-		buf[1] = (pdev->motor_pan->val >> 8);
+		pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF;
+		pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8);
 	}
 	if (pdev->motor_tilt->is_new) {
-		buf[2] = pdev->motor_tilt->val & 0xFF;
-		buf[3] = (pdev->motor_tilt->val >> 8);
+		pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF;
+		pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8);
 	}
 	if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
 		ret = send_control_msg(pdev, SET_MPT_CTL,
 				       PT_RELATIVE_CONTROL_FORMATTER,
-				       buf, sizeof(buf));
+				       pdev->ctrl_buf, 4);
 		if (ret < 0)
 			return ret;
 	}
@@ -1094,6 +1089,63 @@
 	return 0;
 }
 
+static int pwc_g_parm(struct file *file, void *fh,
+		      struct v4l2_streamparm *parm)
+{
+	struct pwc_device *pdev = video_drvdata(file);
+
+	if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	memset(parm, 0, sizeof(*parm));
+
+	parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	parm->parm.capture.readbuffers = MIN_FRAMES;
+	parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME;
+	parm->parm.capture.timeperframe.denominator = pdev->vframes;
+	parm->parm.capture.timeperframe.numerator = 1;
+
+	return 0;
+}
+
+static int pwc_s_parm(struct file *file, void *fh,
+		      struct v4l2_streamparm *parm)
+{
+	struct pwc_device *pdev = video_drvdata(file);
+	int compression = 0;
+	int ret, fps;
+
+	if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+	    parm->parm.capture.timeperframe.numerator == 0)
+		return -EINVAL;
+
+	if (pwc_test_n_set_capt_file(pdev, file))
+		return -EBUSY;
+
+	fps = parm->parm.capture.timeperframe.denominator /
+	      parm->parm.capture.timeperframe.numerator;
+
+	mutex_lock(&pdev->udevlock);
+	if (!pdev->udev) {
+		ret = -ENODEV;
+		goto leave;
+	}
+
+	if (pdev->iso_init) {
+		ret = -EBUSY;
+		goto leave;
+	}
+
+	ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+				 fps, &compression, 0);
+
+	pwc_g_parm(file, fh, parm);
+
+leave:
+	mutex_unlock(&pdev->udevlock);
+	return ret;
+}
+
 static int pwc_log_status(struct file *file, void *priv)
 {
 	struct pwc_device *pdev = video_drvdata(file);
@@ -1120,4 +1172,6 @@
 	.vidioc_log_status		    = pwc_log_status,
 	.vidioc_enum_framesizes		    = pwc_enum_framesizes,
 	.vidioc_enum_frameintervals	    = pwc_enum_frameintervals,
+	.vidioc_g_parm			    = pwc_g_parm,
+	.vidioc_s_parm			    = pwc_s_parm,
 };
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 47c518f..e4d4d71 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -44,6 +44,8 @@
 #ifdef CONFIG_USB_PWC_INPUT_EVDEV
 #include <linux/input.h>
 #endif
+#include "pwc-dec1.h"
+#include "pwc-dec23.h"
 
 /* Version block */
 #define PWC_VERSION	"10.0.15"
@@ -132,9 +134,6 @@
 #define DEVICE_USE_CODEC3(x) ((x)>=700)
 #define DEVICE_USE_CODEC23(x) ((x)>=675)
 
-/* from pwc-dec.h */
-#define PWCX_FLAG_PLANAR        0x0001
-
 /* Request types: video */
 #define SET_LUM_CTL			0x01
 #define GET_LUM_CTL			0x02
@@ -248,8 +247,8 @@
 	char vmirror;		/* for ToUCaM series */
 	char power_save;	/* Do powersaving for this cam */
 
-	int cmd_len;
 	unsigned char cmd_buf[13];
+	unsigned char *ctrl_buf;
 
 	struct urb *urbs[MAX_ISO_BUFS];
 	char iso_init;
@@ -272,7 +271,10 @@
 	int frame_total_size;	/* including header & trailer */
 	int drop_frames;
 
-	void *decompress_data;	/* private data for decompression engine */
+	union {	/* private data for decompression engine */
+		struct pwc_dec1_private dec1;
+		struct pwc_dec23_private dec23;
+	};
 
 	/*
 	 * We have an 'image' and a 'view', where 'image' is the fixed-size img
@@ -364,7 +366,7 @@
 /** Functions in pwc-ctrl.c */
 /* Request a certain video mode. Returns < 0 if not possible */
 extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
-			      int frames, int *compression);
+	int pixfmt, int frames, int *compression, int send_to_cam);
 extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
 extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
 extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 510cfab..a9e9653 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -693,7 +693,7 @@
 			mf->code = 0;
 			continue;
 		}
-		if (mf->width != tfmt->width || mf->width != tfmt->width) {
+		if (mf->width != tfmt->width || mf->height != tfmt->height) {
 			u32 fcc = ffmt->fourcc;
 			tfmt->width  = mf->width;
 			tfmt->height = mf->height;
@@ -702,7 +702,8 @@
 					       NULL, &fcc, FIMC_SD_PAD_SOURCE);
 			if (ffmt && ffmt->mbus_code)
 				mf->code = ffmt->mbus_code;
-			if (mf->width != tfmt->width || mf->width != tfmt->width)
+			if (mf->width != tfmt->width ||
+			    mf->height != tfmt->height)
 				continue;
 			tfmt->code = mf->code;
 		}
@@ -710,7 +711,7 @@
 			ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
 
 		if (mf->code == tfmt->code &&
-		    mf->width == tfmt->width && mf->width == tfmt->width)
+		    mf->width == tfmt->width && mf->height == tfmt->height)
 			break;
 	}
 
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index f5cbb8a..81bcbb9 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -848,11 +848,11 @@
 	v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
 
 	ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
-				     V4L2_CID_HFLIP, 0, 1, 1, 0);
+					V4L2_CID_ROTATE, 0, 270, 90, 0);
 	ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
-				    V4L2_CID_VFLIP, 0, 1, 1, 0);
+					V4L2_CID_HFLIP, 0, 1, 1, 0);
 	ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
-				    V4L2_CID_ROTATE, 0, 270, 90, 0);
+					V4L2_CID_VFLIP, 0, 1, 1, 0);
 	if (variant->has_alpha)
 		ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
 				    &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 615c862..8ea4ee1 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -21,7 +21,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/version.h>
 #include <media/v4l2-ctrls.h>
 #include <media/media-device.h>
 
diff --git a/drivers/media/video/s5p-g2d/g2d.c b/drivers/media/video/s5p-g2d/g2d.c
index c40b0dd..febaa67 100644
--- a/drivers/media/video/s5p-g2d/g2d.c
+++ b/drivers/media/video/s5p-g2d/g2d.c
@@ -184,6 +184,7 @@
 			ctx->rop = ROP4_INVERT;
 		else
 			ctx->rop = ROP4_COPY;
+		break;
 	default:
 		v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n");
 		return -EINVAL;
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.c b/drivers/media/video/s5p-jpeg/jpeg-core.c
index f841a3e..1105a87 100644
--- a/drivers/media/video/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.c
@@ -989,9 +989,10 @@
  * ============================================================================
  */
 
-static int s5p_jpeg_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
-				unsigned int *nplanes, unsigned int sizes[],
-				void *alloc_ctxs[])
+static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
+			   const struct v4l2_format *fmt,
+			   unsigned int *nbuffers, unsigned int *nplanes,
+			   unsigned int sizes[], void *alloc_ctxs[])
 {
 	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
 	struct s5p_jpeg_q_data *q_data = NULL;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
index e43e128..83fe461 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -18,7 +18,6 @@
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/version.h>
 #include <linux/videodev2.h>
 #include <linux/workqueue.h>
 #include <media/videobuf2-core.h>
@@ -475,7 +474,7 @@
 			ctx->mv_size = 0;
 		}
 		ctx->dpb_count = s5p_mfc_get_dpb_count();
-		if (ctx->img_width == 0 || ctx->img_width == 0)
+		if (ctx->img_width == 0 || ctx->img_height == 0)
 			ctx->state = MFCINST_ERROR;
 		else
 			ctx->state = MFCINST_HEAD_PARSED;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 844a4d7..c25ec02 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -165,7 +165,7 @@
 		.maximum = 32,
 		.step = 1,
 		.default_value = 1,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
+		.is_volatile = 1,
 	},
 };
 
diff --git a/drivers/media/video/saa7164/saa7164-cards.c b/drivers/media/video/saa7164/saa7164-cards.c
index 971591d..5b72da5 100644
--- a/drivers/media/video/saa7164/saa7164-cards.c
+++ b/drivers/media/video/saa7164/saa7164-cards.c
@@ -269,8 +269,6 @@
 		.portb		= SAA7164_MPEG_DVB,
 		.portc		= SAA7164_MPEG_ENCODER,
 		.portd		= SAA7164_MPEG_ENCODER,
-		.portc		= SAA7164_MPEG_ENCODER,
-		.portd		= SAA7164_MPEG_ENCODER,
 		.porte		= SAA7164_MPEG_VBI,
 		.portf		= SAA7164_MPEG_VBI,
 		.chiprev	= SAA7164_CHIP_REV3,
@@ -333,8 +331,6 @@
 		.portd		= SAA7164_MPEG_ENCODER,
 		.porte		= SAA7164_MPEG_VBI,
 		.portf		= SAA7164_MPEG_VBI,
-		.porte		= SAA7164_MPEG_VBI,
-		.portf		= SAA7164_MPEG_VBI,
 		.chiprev	= SAA7164_CHIP_REV3,
 		.unit		= {{
 			.id		= 0x28,
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 129f135..c096b3f 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -374,7 +374,7 @@
 }
 #endif
 
-static bool check_firmware(struct usb_device *udev, int *down_firmware)
+static int check_firmware(struct usb_device *udev, int *down_firmware)
 {
 	void *buf;
 	int ret;
@@ -398,7 +398,7 @@
 		*down_firmware = 1;
 		return firmware_download(udev);
 	}
-	return ret;
+	return 0;
 }
 
 static int poseidon_probe(struct usb_interface *interface,
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index da1f4c2..cccd42b 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -465,8 +465,8 @@
 	case V4L2_CID_CHROMA_GAIN:		return "Chroma Gain";
 	case V4L2_CID_ILLUMINATORS_1:		return "Illuminator 1";
 	case V4L2_CID_ILLUMINATORS_2:		return "Illuminator 2";
-	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:	return "Minimum Number of Capture Buffers";
-	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:	return "Minimum Number of Output Buffers";
+	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:	return "Min Number of Capture Buffers";
+	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:	return "Min Number of Output Buffers";
 	case V4L2_CID_ALPHA_COMPONENT:		return "Alpha Component";
 
 	/* MPEG controls */
@@ -506,25 +506,25 @@
 	case V4L2_CID_MPEG_VIDEO_MUTE_YUV:	return "Video Mute YUV";
 	case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:	return "Decoder Slice Interface";
 	case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:	return "MPEG4 Loop Filter Enable";
-	case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:	return "The Number of Intra Refresh MBs";
+	case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:	return "Number of Intra Refresh MBs";
 	case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:		return "Frame Level Rate Control Enable";
 	case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:			return "H264 MB Level Rate Control";
 	case V4L2_CID_MPEG_VIDEO_HEADER_MODE:			return "Sequence Header Mode";
-	case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC:			return "The Max Number of Reference Picture";
+	case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC:			return "Max Number of Reference Pics";
 	case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP:		return "H263 I-Frame QP Value";
-	case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:		return "H263 P frame QP Value";
-	case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:		return "H263 B frame QP Value";
+	case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:		return "H263 P-Frame QP Value";
+	case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:		return "H263 B-Frame QP Value";
 	case V4L2_CID_MPEG_VIDEO_H263_MIN_QP:			return "H263 Minimum QP Value";
 	case V4L2_CID_MPEG_VIDEO_H263_MAX_QP:			return "H263 Maximum QP Value";
 	case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:		return "H264 I-Frame QP Value";
-	case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:		return "H264 P frame QP Value";
-	case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:		return "H264 B frame QP Value";
+	case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:		return "H264 P-Frame QP Value";
+	case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:		return "H264 B-Frame QP Value";
 	case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:			return "H264 Maximum QP Value";
 	case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:			return "H264 Minimum QP Value";
 	case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:		return "H264 8x8 Transform Enable";
 	case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:			return "H264 CPB Buffer Size";
-	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:		return "H264 Entorpy Mode";
-	case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:			return "H264 I Period";
+	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:		return "H264 Entropy Mode";
+	case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:			return "H264 I-Frame Period";
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:			return "H264 Level";
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:	return "H264 Loop Filter Alpha Offset";
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:		return "H264 Loop Filter Beta Offset";
@@ -535,16 +535,16 @@
 	case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:		return "Aspect Ratio VUI Enable";
 	case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:		return "VUI Aspect Ratio IDC";
 	case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:		return "MPEG4 I-Frame QP Value";
-	case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:		return "MPEG4 P frame QP Value";
-	case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:		return "MPEG4 B frame QP Value";
+	case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:		return "MPEG4 P-Frame QP Value";
+	case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:		return "MPEG4 B-Frame QP Value";
 	case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP:			return "MPEG4 Minimum QP Value";
 	case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP:			return "MPEG4 Maximum QP Value";
 	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:			return "MPEG4 Level";
 	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:			return "MPEG4 Profile";
 	case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:			return "Quarter Pixel Search Enable";
-	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:		return "The Maximum Bytes Per Slice";
-	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:		return "The Number of MB in a Slice";
-	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:		return "The Slice Partitioning Method";
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:		return "Maximum Bytes in a Slice";
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:		return "Number of MBs in a Slice";
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:		return "Slice Partitioning Method";
 	case V4L2_CID_MPEG_VIDEO_VBV_SIZE:			return "VBV Buffer Size";
 
 	/* CAMERA controls */
@@ -580,7 +580,7 @@
 	case V4L2_CID_AUDIO_LIMITER_ENABLED:	return "Audio Limiter Feature Enabled";
 	case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
 	case V4L2_CID_AUDIO_LIMITER_DEVIATION:	return "Audio Limiter Deviation";
-	case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled";
+	case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
 	case V4L2_CID_AUDIO_COMPRESSION_GAIN:	return "Audio Compression Gain";
 	case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
 	case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
@@ -588,24 +588,24 @@
 	case V4L2_CID_PILOT_TONE_ENABLED:	return "Pilot Tone Feature Enabled";
 	case V4L2_CID_PILOT_TONE_DEVIATION:	return "Pilot Tone Deviation";
 	case V4L2_CID_PILOT_TONE_FREQUENCY:	return "Pilot Tone Frequency";
-	case V4L2_CID_TUNE_PREEMPHASIS:		return "Pre-emphasis settings";
+	case V4L2_CID_TUNE_PREEMPHASIS:		return "Pre-Emphasis";
 	case V4L2_CID_TUNE_POWER_LEVEL:		return "Tune Power Level";
 	case V4L2_CID_TUNE_ANTENNA_CAPACITOR:	return "Tune Antenna Capacitor";
 
 	/* Flash controls */
-	case V4L2_CID_FLASH_CLASS:		return "Flash controls";
-	case V4L2_CID_FLASH_LED_MODE:		return "LED mode";
-	case V4L2_CID_FLASH_STROBE_SOURCE:	return "Strobe source";
+	case V4L2_CID_FLASH_CLASS:		return "Flash Controls";
+	case V4L2_CID_FLASH_LED_MODE:		return "LED Mode";
+	case V4L2_CID_FLASH_STROBE_SOURCE:	return "Strobe Source";
 	case V4L2_CID_FLASH_STROBE:		return "Strobe";
-	case V4L2_CID_FLASH_STROBE_STOP:	return "Stop strobe";
-	case V4L2_CID_FLASH_STROBE_STATUS:	return "Strobe status";
-	case V4L2_CID_FLASH_TIMEOUT:		return "Strobe timeout";
-	case V4L2_CID_FLASH_INTENSITY:		return "Intensity, flash mode";
-	case V4L2_CID_FLASH_TORCH_INTENSITY:	return "Intensity, torch mode";
-	case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator";
+	case V4L2_CID_FLASH_STROBE_STOP:	return "Stop Strobe";
+	case V4L2_CID_FLASH_STROBE_STATUS:	return "Strobe Status";
+	case V4L2_CID_FLASH_TIMEOUT:		return "Strobe Timeout";
+	case V4L2_CID_FLASH_INTENSITY:		return "Intensity, Flash Mode";
+	case V4L2_CID_FLASH_TORCH_INTENSITY:	return "Intensity, Torch Mode";
+	case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
 	case V4L2_CID_FLASH_FAULT:		return "Faults";
 	case V4L2_CID_FLASH_CHARGE:		return "Charge";
-	case V4L2_CID_FLASH_READY:		return "Ready to strobe";
+	case V4L2_CID_FLASH_READY:		return "Ready to Strobe";
 
 	default:
 		return NULL;
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 77feeb6..3f62385 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1871,6 +1871,7 @@
 	case VIDIOC_S_FREQUENCY:
 	{
 		struct v4l2_frequency *p = arg;
+		enum v4l2_tuner_type type;
 
 		if (!ops->vidioc_s_frequency)
 			break;
@@ -1878,9 +1879,14 @@
 			ret = ret_prio;
 			break;
 		}
+		type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+			V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
 		dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
 				p->tuner, p->type, p->frequency);
-		ret = ops->vidioc_s_frequency(file, fh, p);
+		if (p->type != type)
+			ret = -EINVAL;
+		else
+			ret = ops->vidioc_s_frequency(file, fh, p);
 		break;
 	}
 	case VIDIOC_G_SLICED_VBI_CAP:
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index f6d2641..4c09ab7 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1958,7 +1958,6 @@
 	mutex_unlock(&zr->resource_lock);
 	fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
 	fb->fmt.field = V4L2_FIELD_INTERLACED;
-	fb->flags = V4L2_FBUF_FLAG_OVERLAY;
 	fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
 
 	return 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 97fff78..af295bb 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2802,7 +2802,7 @@
 
 /**
  * pci_intx_mask_supported - probe for INTx masking support
- * @pdev: the PCI device to operate on
+ * @dev: the PCI device to operate on
  *
  * Check if the device dev support INTx masking via the config space
  * command word.
@@ -2884,7 +2884,7 @@
 
 /**
  * pci_check_and_mask_intx - mask INTx on pending interrupt
- * @pdev: the PCI device to operate on
+ * @dev: the PCI device to operate on
  *
  * Check if the device dev has its INTx line asserted, mask it and
  * return true in that case. False is returned if not interrupt was
@@ -2898,7 +2898,7 @@
 
 /**
  * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
- * @pdev: the PCI device to operate on
+ * @dev: the PCI device to operate on
  *
  * Check if the device dev has its INTx line asserted, unmask it if not
  * and return true. False is returned and the mask remains active if
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ca86f39..e9a83f8 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2731,6 +2731,8 @@
  * @dev: struct device for the regulator
  * @init_data: platform provided init data, passed through by driver
  * @driver_data: private regulator data
+ * @of_node: OpenFirmware node to parse for device tree bindings (may be
+ *           NULL).
  *
  * Called by regulator drivers to register a regulator.
  * Returns 0 on success.
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 06ea3bc..16570aa 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,16 +830,11 @@
 	tristate "Intel(R) C600 Series Chipset SAS Controller"
 	depends on PCI && SCSI
 	depends on X86
-	# (temporary): known alpha quality driver
-	depends on EXPERIMENTAL
 	select SCSI_SAS_LIBSAS
-	select SCSI_SAS_HOST_SMP
 	---help---
 	  This driver supports the 6Gb/s SAS capabilities of the storage
 	  control unit found in the Intel(R) C600 series chipset.
 
-	  The experimental tag will be removed after the driver exits alpha
-
 config SCSI_GENERIC_NCR5380
 	tristate "Generic NCR5380/53c400 SCSI PIO support"
 	depends on ISA && SCSI
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 78963be..cb07c62 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -673,12 +673,7 @@
 	u32	tm_iocdowns;		/*  TM cleaned-up due to IOC down   */
 	u32	tm_cleanups;		/*  TM cleanup requests	*/
 	u32	tm_cleanup_comps;	/*  TM cleanup completions	*/
-	u32	lm_lun_across_sg;	/*  LM lun is across sg data buf */
-	u32	lm_lun_not_sup;		/*  LM lun not supported */
-	u32	lm_rpl_data_changed;	/*  LM report-lun data changed */
-	u32	lm_wire_residue_changed; /* LM report-lun rsp residue changed */
-	u32	lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
-	u32	lm_lun_not_rdy;		/* LM lun not ready */
+	u32	rsvd[6];
 };
 
 /* Modify char* port_stt[] in bfal_port.c if a new state was added */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 50b6a1c..8d0b88f 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,161 +56,6 @@
 
 #define SCSI_MAX_ALLOC_LEN      0xFF    /* maximum allocarion length */
 
-#define SCSI_SENSE_CUR_ERR	0x70
-#define SCSI_SENSE_DEF_ERR	0x71
-
-/*
- * SCSI additional sense codes
- */
-#define SCSI_ASC_LUN_NOT_READY		0x04
-#define SCSI_ASC_LUN_NOT_SUPPORTED	0x25
-#define SCSI_ASC_TOCC			0x3F
-
-/*
- * SCSI additional sense code qualifiers
- */
-#define SCSI_ASCQ_MAN_INTR_REQ		0x03	/* manual intervention req */
-#define SCSI_ASCQ_RL_DATA_CHANGED	0x0E	/* report luns data changed */
-
-/*
- * Methods of reporting informational exceptions
- */
-#define SCSI_MP_IEC_UNIT_ATTN		0x2	/* generate unit attention */
-
-struct scsi_report_luns_data_s {
-	u32		lun_list_length;	/* length of LUN list length */
-	u32		reserved;
-	struct scsi_lun	lun[1];			/* first LUN in lun list */
-};
-
-struct scsi_inquiry_vendor_s {
-	u8	vendor_id[8];
-};
-
-struct scsi_inquiry_prodid_s {
-	u8	product_id[16];
-};
-
-struct scsi_inquiry_prodrev_s {
-	u8	product_rev[4];
-};
-
-struct scsi_inquiry_data_s {
-#ifdef __BIG_ENDIAN
-	u8		peripheral_qual:3;	/* peripheral qualifier */
-	u8		device_type:5;		/* peripheral device type */
-	u8		rmb:1;			/* removable medium bit */
-	u8		device_type_mod:7;	/* device type modifier */
-	u8		version;
-	u8		aenc:1;		/* async evt notification capability */
-	u8		trm_iop:1;	/* terminate I/O process */
-	u8		norm_aca:1;	/* normal ACA supported */
-	u8		hi_support:1;	/* SCSI-3: supports REPORT LUNS */
-	u8		rsp_data_format:4;
-	u8		additional_len;
-	u8		sccs:1;
-	u8		reserved1:7;
-	u8		reserved2:1;
-	u8		enc_serv:1;	/* enclosure service component */
-	u8		reserved3:1;
-	u8		multi_port:1;	/* multi-port device */
-	u8		m_chngr:1;	/* device in medium transport element */
-	u8		ack_req_q:1;	/* SIP specific bit */
-	u8		addr32:1;	/* SIP specific bit */
-	u8		addr16:1;	/* SIP specific bit */
-	u8		rel_adr:1;	/* relative address */
-	u8		w_bus32:1;
-	u8		w_bus16:1;
-	u8		synchronous:1;
-	u8		linked_commands:1;
-	u8		trans_dis:1;
-	u8		cmd_queue:1;	/* command queueing supported */
-	u8		soft_reset:1;	/* soft reset alternative (VS) */
-#else
-	u8		device_type:5;	/* peripheral device type */
-	u8		peripheral_qual:3; /* peripheral qualifier */
-	u8		device_type_mod:7; /* device type modifier */
-	u8		rmb:1;		/* removable medium bit */
-	u8		version;
-	u8		rsp_data_format:4;
-	u8		hi_support:1;	/* SCSI-3: supports REPORT LUNS */
-	u8		norm_aca:1;	/* normal ACA supported */
-	u8		terminate_iop:1;/* terminate I/O process */
-	u8		aenc:1;		/* async evt notification capability */
-	u8		additional_len;
-	u8		reserved1:7;
-	u8		sccs:1;
-	u8		addr16:1;	/* SIP specific bit */
-	u8		addr32:1;	/* SIP specific bit */
-	u8		ack_req_q:1;	/* SIP specific bit */
-	u8		m_chngr:1;	/* device in medium transport element */
-	u8		multi_port:1;	/* multi-port device */
-	u8		reserved3:1;	/* TBD - Vendor Specific */
-	u8		enc_serv:1;	/* enclosure service component */
-	u8		reserved2:1;
-	u8		soft_seset:1;	/* soft reset alternative (VS) */
-	u8		cmd_queue:1;	/* command queueing supported */
-	u8		trans_dis:1;
-	u8		linked_commands:1;
-	u8		synchronous:1;
-	u8		w_bus16:1;
-	u8		w_bus32:1;
-	u8		rel_adr:1;	/* relative address */
-#endif
-	struct scsi_inquiry_vendor_s	vendor_id;
-	struct scsi_inquiry_prodid_s	product_id;
-	struct scsi_inquiry_prodrev_s	product_rev;
-	u8		vendor_specific[20];
-	u8		reserved4[40];
-};
-
-/*
- *	SCSI sense data format
- */
-struct scsi_sense_s {
-#ifdef __BIG_ENDIAN
-	u8		valid:1;
-	u8		rsp_code:7;
-#else
-	u8		rsp_code:7;
-	u8		valid:1;
-#endif
-	u8		seg_num;
-#ifdef __BIG_ENDIAN
-	u8		file_mark:1;
-	u8		eom:1;		/* end of media */
-	u8		ili:1;		/* incorrect length indicator */
-	u8		reserved:1;
-	u8		sense_key:4;
-#else
-	u8		sense_key:4;
-	u8		reserved:1;
-	u8		ili:1;		/* incorrect length indicator */
-	u8		eom:1;		/* end of media */
-	u8		file_mark:1;
-#endif
-	u8		information[4];	/* device-type or cmd specific info */
-	u8		add_sense_length; /* additional sense length */
-	u8		command_info[4];/* command specific information */
-	u8		asc;		/* additional sense code */
-	u8		ascq;		/* additional sense code qualifier */
-	u8		fru_code;	/* field replaceable unit code */
-#ifdef __BIG_ENDIAN
-	u8		sksv:1;		/* sense key specific valid */
-	u8		c_d:1;		/* command/data bit */
-	u8		res1:2;
-	u8		bpv:1;		/* bit pointer valid */
-	u8		bpointer:3;	/* bit pointer */
-#else
-	u8		bpointer:3;	/* bit pointer */
-	u8		bpv:1;		/* bit pointer valid */
-	u8		res1:2;
-	u8		c_d:1;		/* command/data bit */
-	u8		sksv:1;		/* sense key specific valid */
-#endif
-	u8		fpointer[2];	/* field pointer */
-};
-
 /*
  * Fibre Channel Header Structure (FCHS) definition
  */
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index e07bd47..f0f80e2 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,8 +24,6 @@
  *  BFA ITNIM Related definitions
  */
 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
-static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
-static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
 static void bfa_ioim_lm_init(struct bfa_s *bfa);
 
 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
@@ -60,14 +58,6 @@
 	}								\
 } while (0)
 
-#define bfa_ioim_rp_wwn(__ioim)						\
-	(((struct bfa_fcs_rport_s *)					\
-	 (__ioim)->itnim->rport->rport_drv)->pwwn)
-
-#define bfa_ioim_lp_wwn(__ioim)						\
-	((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa),			\
-	(__ioim)->itnim->rport->rport_info.lp_tag))->pwwn)		\
-
 #define bfa_itnim_sler_cb(__itnim) do {					\
 	if ((__itnim)->bfa->fcs)					\
 		bfa_cb_itnim_sler((__itnim)->ditn);      \
@@ -77,13 +67,6 @@
 	}								\
 } while (0)
 
-enum bfa_ioim_lm_status {
-	BFA_IOIM_LM_PRESENT = 1,
-	BFA_IOIM_LM_LUN_NOT_SUP = 2,
-	BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
-	BFA_IOIM_LM_LUN_NOT_RDY = 4,
-};
-
 enum bfa_ioim_lm_ua_status {
 	BFA_IOIM_LM_UA_RESET = 0,
 	BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@
 	BFA_IOIM_SM_TMDONE	= 16,	/*  IO cleanup from tskim */
 	BFA_IOIM_SM_HWFAIL	= 17,	/*  IOC h/w failure event */
 	BFA_IOIM_SM_IOTOV	= 18,	/*  ITN offline TOV */
-	BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/*  lunmask lun not supported */
-	BFA_IOIM_SM_LM_RPL_DC = 20,	/*  lunmask report-lun data changed */
-	BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/*  lunmask lun not ready */
 };
 
 
@@ -245,9 +225,6 @@
 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
-static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
 
 /*
  * forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@
 	bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
 	bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
 	bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
-	bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
-	bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
-	bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
-	bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
-	bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
-	bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
 }
 
 bfa_status_t
@@ -1580,27 +1551,6 @@
 			__bfa_cb_ioim_abort, ioim);
 		break;
 
-	case BFA_IOIM_SM_LM_LUN_NOT_SUP:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_ioim_move_to_comp_q(ioim);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-			__bfa_cb_ioim_lm_lun_not_sup, ioim);
-		break;
-
-	case BFA_IOIM_SM_LM_RPL_DC:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_ioim_move_to_comp_q(ioim);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-				__bfa_cb_ioim_lm_rpl_dc, ioim);
-		break;
-
-	case BFA_IOIM_SM_LM_LUN_NOT_RDY:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_ioim_move_to_comp_q(ioim);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-			__bfa_cb_ioim_lm_lun_not_rdy, ioim);
-		break;
-
 	default:
 		bfa_sm_fault(ioim->bfa, event);
 	}
@@ -2160,243 +2110,6 @@
 	}
 }
 
-/*
- * Validate LUN for LUN masking
- */
-static enum bfa_ioim_lm_status
-bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
-		struct bfa_rport_s *rp, struct scsi_lun lun)
-{
-	u8 i;
-	struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-	struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
-
-	if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
-	    (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
-		ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-		return BFA_IOIM_LM_PRESENT;
-	}
-
-	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-
-		if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
-			continue;
-
-		if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
-		    scsilun_to_int((struct scsi_lun *)&lun))
-		    && (rp->rport_tag == lun_list[i].rp_tag)
-		    && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
-						lun_list[i].lp_tag)) {
-			bfa_trc(ioim->bfa, lun_list[i].rp_tag);
-			bfa_trc(ioim->bfa, lun_list[i].lp_tag);
-			bfa_trc(ioim->bfa, scsilun_to_int(
-				(struct scsi_lun *)&lun_list[i].lun));
-
-			if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
-			    ((cdb->scsi_cdb[0] != INQUIRY) ||
-			    (cdb->scsi_cdb[0] != REPORT_LUNS))) {
-				lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
-				return BFA_IOIM_LM_RPL_DATA_CHANGED;
-			}
-
-			if (cdb->scsi_cdb[0] == REPORT_LUNS)
-				ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-
-			return BFA_IOIM_LM_PRESENT;
-		}
-	}
-
-	if ((cdb->scsi_cdb[0] == INQUIRY) &&
-	    (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
-		ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
-		return BFA_IOIM_LM_PRESENT;
-	}
-
-	if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
-		return BFA_IOIM_LM_LUN_NOT_RDY;
-
-	return BFA_IOIM_LM_LUN_NOT_SUP;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
-{
-	return BFA_TRUE;
-}
-
-static void
-bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
-		int buf_lun_cnt)
-{
-	struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
-	struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
-	struct scsi_lun lun;
-	int i, j;
-
-	bfa_trc(ioim->bfa, buf_lun_cnt);
-	for (j = 0; j < buf_lun_cnt; j++) {
-		lun = *((struct scsi_lun *)(lun_data + j));
-		for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-			if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
-				continue;
-			if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
-			    (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
-			    (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
-				== scsilun_to_int((struct scsi_lun *)&lun))) {
-				lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
-				break;
-			}
-		} /* next lun in mask DB */
-	} /* next lun in buf */
-}
-
-static int
-bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
-		struct scsi_report_luns_data_s *rl)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-	struct scatterlist *sg = scsi_sglist(cmnd);
-	struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
-	struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
-	int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
-	int lun_across_sg_bytes, bytes_from_next_buf;
-	u64	last_lun, temp_last_lun;
-
-	/* fetch luns from the first sg element */
-	bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
-			(sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
-
-	/* fetch luns from multiple sg elements */
-	scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
-		if (sgeid == 0) {
-			prev_sg_len = sg_dma_len(sg);
-			prev_rl_data = (struct scsi_lun *)
-					phys_to_virt(sg_dma_address(sg));
-			continue;
-		}
-
-		/* if the buf is having more data */
-		lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
-		if (lun_across_sg_bytes) {
-			bfa_trc(ioim->bfa, lun_across_sg_bytes);
-			bfa_stats(ioim->itnim, lm_lun_across_sg);
-			bytes_from_next_buf = sizeof(struct scsi_lun) -
-					      lun_across_sg_bytes;
-
-			/* from next buf take higher bytes */
-			temp_last_lun = *((u64 *)
-					  phys_to_virt(sg_dma_address(sg)));
-			last_lun |= temp_last_lun >>
-				    (lun_across_sg_bytes * BITS_PER_BYTE);
-
-			/* from prev buf take higher bytes */
-			temp_last_lun = *((u64 *)(prev_rl_data +
-					  (prev_sg_len - lun_across_sg_bytes)));
-			temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
-			last_lun = last_lun | (temp_last_lun <<
-				   (bytes_from_next_buf * BITS_PER_BYTE));
-
-			bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
-		} else
-			bytes_from_next_buf = 0;
-
-		*pgdlen += sg_dma_len(sg);
-		prev_sg_len = sg_dma_len(sg);
-		prev_rl_data = (struct scsi_lun *)
-				phys_to_virt(sg_dma_address(sg));
-		bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
-				bytes_from_next_buf,
-				sg_dma_len(sg) / sizeof(struct scsi_lun));
-	}
-
-	/* update the report luns data - based on fetched luns */
-	sg = scsi_sglist(cmnd);
-	base_rl_data = (struct scsi_lun *)rl->lun;
-	base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
-	for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
-		if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
-			base_rl_data[j] = lun_list[i].lun;
-			lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
-			j++;
-			lun_fetched_cnt++;
-		}
-
-		if (j > base_count) {
-			j = 0;
-			sg = sg_next(sg);
-			base_rl_data = (struct scsi_lun *)
-					phys_to_virt(sg_dma_address(sg));
-			base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
-		}
-	}
-
-	bfa_trc(ioim->bfa, lun_fetched_cnt);
-	return lun_fetched_cnt;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
-{
-	struct scsi_inquiry_data_s *inq;
-	struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
-
-	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
-	inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
-
-	bfa_trc(ioim->bfa, inq->device_type);
-	inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
-	return 0;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-	struct scatterlist *sg = scsi_sglist(cmnd);
-	struct bfi_ioim_rsp_s *m;
-	struct scsi_report_luns_data_s *rl = NULL;
-	int lun_count = 0, lun_fetched_cnt = 0;
-	u32 residue, pgdlen = 0;
-
-	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
-	if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
-		return BFA_TRUE;
-
-	m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
-	if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
-		return BFA_TRUE;
-
-	pgdlen = sg_dma_len(sg);
-	bfa_trc(ioim->bfa, pgdlen);
-	rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
-	lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
-	lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
-
-	if (lun_count == lun_fetched_cnt)
-		return BFA_TRUE;
-
-	bfa_trc(ioim->bfa, lun_count);
-	bfa_trc(ioim->bfa, lun_fetched_cnt);
-	bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-
-	if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
-		rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
-				      sizeof(struct scsi_lun);
-	else
-		bfa_stats(ioim->itnim, lm_small_buf_addresidue);
-
-	bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-	bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
-
-	residue = be32_to_cpu(m->residue);
-	residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
-	bfa_stats(ioim->itnim, lm_wire_residue_changed);
-	m->residue = be32_to_cpu(residue);
-	bfa_trc(ioim->bfa, ioim->nsges);
-	return BFA_FALSE;
-}
-
 static void
 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
 {
@@ -2454,83 +2167,6 @@
 			  m->scsi_status, sns_len, snsinfo, residue);
 }
 
-static void
-__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_ioim_s *ioim = cbarg;
-	int sns_len = 0xD;
-	u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
-	struct scsi_sense_s *snsinfo;
-
-	if (!complete) {
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-		return;
-	}
-
-	snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
-					ioim->fcpim->fcp, ioim->iotag);
-	snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
-	snsinfo->add_sense_length = 0xa;
-	snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
-	snsinfo->sense_key = ILLEGAL_REQUEST;
-	bfa_trc(ioim->bfa, residue);
-	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
-			SCSI_STATUS_CHECK_CONDITION, sns_len,
-			(u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_ioim_s *ioim = cbarg;
-	int sns_len = 0xD;
-	u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
-	struct scsi_sense_s *snsinfo;
-
-	if (!complete) {
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-		return;
-	}
-
-	snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
-						       ioim->iotag);
-	snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
-	snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
-	snsinfo->asc = SCSI_ASC_TOCC;
-	snsinfo->add_sense_length = 0x6;
-	snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
-	bfa_trc(ioim->bfa, residue);
-	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
-			SCSI_STATUS_CHECK_CONDITION, sns_len,
-			(u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_ioim_s *ioim = cbarg;
-	int sns_len = 0xD;
-	u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
-	struct scsi_sense_s *snsinfo;
-
-	if (!complete) {
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-		return;
-	}
-
-	snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
-					ioim->fcpim->fcp, ioim->iotag);
-	snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
-	snsinfo->add_sense_length = 0xa;
-	snsinfo->sense_key = NOT_READY;
-	snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
-	snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
-	bfa_trc(ioim->bfa, residue);
-	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
-			SCSI_STATUS_CHECK_CONDITION, sns_len,
-			(u8 *)snsinfo, residue);
-}
-
 void
 bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
 			u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@
 	if (port) {
 		*pwwn = port->port_cfg.pwwn;
 		rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
-		rp = rp_fcs->bfa_rport;
+		if (rp_fcs)
+			rp = rp_fcs->bfa_rport;
 	}
 
 	lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@
 		if (port) {
 			*pwwn = port->port_cfg.pwwn;
 			rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
-			rp = rp_fcs->bfa_rport;
+			if (rp_fcs)
+				rp = rp_fcs->bfa_rport;
 		}
 	}
 
@@ -2757,7 +2395,6 @@
 		return;
 	}
 
-	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
 			  0, 0, NULL, 0);
 }
@@ -2773,7 +2410,6 @@
 		return;
 	}
 
-	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
 			  0, 0, NULL, 0);
 }
@@ -2788,7 +2424,6 @@
 		return;
 	}
 
-	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 	bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
 }
 
@@ -3132,7 +2767,6 @@
 		ioim->bfa     = fcpim->bfa;
 		ioim->fcpim   = fcpim;
 		ioim->iosp    = iosp;
-		ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 		INIT_LIST_HEAD(&ioim->sgpg_q);
 		bfa_reqq_winit(&ioim->iosp->reqq_wait,
 				   bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@
 			evt = BFA_IOIM_SM_DONE;
 		else
 			evt = BFA_IOIM_SM_COMP;
-		ioim->proc_rsp_data(ioim);
 		break;
 
 	case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@
 		if (rsp->abort_tag != ioim->abort_tag) {
 			bfa_trc(ioim->bfa, rsp->abort_tag);
 			bfa_trc(ioim->bfa, ioim->abort_tag);
-			ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 			return;
 		}
 
@@ -3225,7 +2857,6 @@
 		WARN_ON(1);
 	}
 
-	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 	bfa_sm_send_event(ioim, evt);
 }
 
@@ -3244,15 +2875,7 @@
 
 	bfa_ioim_cb_profile_comp(fcpim, ioim);
 
-	if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED)  {
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
-		return;
-	}
-
-	if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
-	else
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
 }
 
 /*
@@ -3364,35 +2987,6 @@
 void
 bfa_ioim_start(struct bfa_ioim_s *ioim)
 {
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-	struct bfa_lps_s	*lps;
-	enum bfa_ioim_lm_status status;
-	struct scsi_lun scsilun;
-
-	if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
-		lps = BFA_IOIM_TO_LPS(ioim);
-		int_to_scsilun(cmnd->device->lun, &scsilun);
-		status = bfa_ioim_lm_check(ioim, lps,
-				ioim->itnim->rport, scsilun);
-		if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
-			bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
-			bfa_stats(ioim->itnim, lm_lun_not_rdy);
-			return;
-		}
-
-		if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
-			bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
-			bfa_stats(ioim->itnim, lm_lun_not_sup);
-			return;
-		}
-
-		if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
-			bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
-			bfa_stats(ioim->itnim, lm_rpl_data_changed);
-			return;
-		}
-	}
-
 	bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
 
 	/*
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 1080bcb..36f26da 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -110,7 +110,6 @@
 struct bfad_tskim_s;
 
 typedef void    (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
-typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
 
 struct bfa_fcpim_s {
 	struct bfa_s		*bfa;
@@ -124,7 +123,6 @@
 	u32			path_tov;
 	u16			q_depth;
 	u8			reqq;		/*  Request queue to be used */
-	u8			lun_masking_pending;
 	struct list_head	itnim_q;	/*  queue of active itnim */
 	struct list_head	ioim_resfree_q; /*  IOs waiting for f/w */
 	struct list_head	ioim_comp_q;	/*  IO global comp Q	*/
@@ -181,7 +179,6 @@
 	u8			reqq;		/*  Request queue for I/O */
 	u8			mode;		/*  IO is passthrough or not */
 	u64			start_time;	/*  IO's Profile start val */
-	bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
 };
 
 struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@
 	(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET;		\
 } while (0)
 
-#define BFA_IOIM_TO_LPS(__ioim)		\
-	BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa),	\
-		__ioim->itnim->rport->rport_info.lp_tag)
-
 static inline bfa_boolean_t
 bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 {
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 95adb86..b52cbb6 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -582,11 +582,6 @@
 #define BFA_LP_TAG_INVALID	0xff
 void	bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
 void	bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
-bfa_boolean_t	bfa_rport_lunmask_active(struct bfa_rport_s *rp);
-wwn_t	bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
-struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
-					 wwn_t *lpwwn, wwn_t rpwwn);
-void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
 
 /*
  * bfa fcxp API functions
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 66fb725..404fd10 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -674,6 +674,7 @@
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	bfa_fcs_vport_start(&vport->fcs_vport);
+	list_add_tail(&vport->list_entry, &bfad->vport_list);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 	return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@
 	bfad->ref_count = 0;
 	bfad->pport.bfad = bfad;
 	INIT_LIST_HEAD(&bfad->pbc_vport_list);
+	INIT_LIST_HEAD(&bfad->vport_list);
 
 	/* Setup the debugfs node for this bfad */
 	if (bfa_debugfs_enable)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9d95844..1938fe0 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -491,7 +491,7 @@
 
 free_scsi_host:
 	bfad_scsi_host_free(bfad, im_port);
-
+	list_del(&vport->list_entry);
 	kfree(vport);
 
 	return 0;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 06fc00c..530de2b 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2394,6 +2394,21 @@
 	return 0;
 }
 
+/* Function to reset the LUN SCAN mode */
+static void
+bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
+{
+	struct bfad_im_port_s *pport_im = bfad->pport.im_port;
+	struct bfad_vport_s *vport = NULL;
+
+	/* Set the scsi device LUN SCAN flags for base port */
+	bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
+
+	/* Set the scsi device LUN SCAN flags for the vports */
+	list_for_each_entry(vport, &bfad->vport_list, list_entry)
+		bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
+}
+
 int
 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
 {
@@ -2401,11 +2416,17 @@
 	unsigned long	flags;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+	if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
 		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
-	else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+		/* Set the LUN Scanning mode to be Sequential scan */
+		if (iocmd->status == BFA_STATUS_OK)
+			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
+	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
 		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
-	else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+		/* Set the LUN Scanning mode to default REPORT_LUNS scan */
+		if (iocmd->status == BFA_STATUS_OK)
+			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
+	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
 		iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	return 0;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 5e19a5f..dc5b9d9 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_devinfo.h>
 
 #include "bfa_modules.h"
 #include "bfa_fcs.h"
@@ -227,6 +228,7 @@
 	struct list_head	active_aen_q;
 	struct bfa_aen_entry_s	aen_list[BFA_AEN_MAX_ENTRY];
 	spinlock_t		bfad_aen_spinlock;
+	struct list_head	vport_list;
 };
 
 /* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index e5db649..3153923 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -918,16 +918,70 @@
 }
 
 /*
+ * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Has the logic to query the LUN Mask database to check if this LUN needs to
+ * be made visible to the SCSI mid-layer or not.
+ *
+ * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
+ * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
+ */
+static int
+bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
+				  struct fc_rport *rport)
+{
+	struct bfad_itnim_data_s *itnim_data =
+				(struct bfad_itnim_data_s *) rport->dd_data;
+	struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+	struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
+	struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
+	int i = 0, ret = -ENXIO;
+
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
+		    scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
+		    lun_list[i].rp_tag == bfa_rport->rport_tag &&
+		    lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
+			ret = BFA_STATUS_OK;
+			break;
+		}
+	}
+	return ret;
+}
+
+/*
  * Scsi_Host template entry slave_alloc
  */
 static int
 bfad_im_slave_alloc(struct scsi_device *sdev)
 {
 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct bfad_itnim_data_s *itnim_data =
+				(struct bfad_itnim_data_s *) rport->dd_data;
+	struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
 
 	if (!rport || fc_remote_port_chkready(rport))
 		return -ENXIO;
 
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
+		/*
+		 * We should not mask LUN 0 - since this will translate
+		 * to no LUN / TARGET for SCSI ml resulting no scan.
+		 */
+		if (sdev->lun == 0) {
+			sdev->sdev_bflags |= BLIST_NOREPORTLUN |
+					     BLIST_SPARSELUN;
+			goto done;
+		}
+
+		/*
+		 * Query LUN Mask configuration - to expose this LUN
+		 * to the SCSI mid-layer or to mask it.
+		 */
+		if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
+							BFA_STATUS_OK)
+			return -ENXIO;
+	}
+done:
 	sdev->hostdata = rport->dd_data;
 
 	return 0;
@@ -1037,6 +1091,8 @@
 	    && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
 		itnim->scsi_tgt_id = fc_rport->scsi_target_id;
 
+	itnim->channel = fc_rport->channel;
+
 	return;
 }
 
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 004b6cf..0814367 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -91,6 +91,7 @@
 	struct fc_rport *fc_rport;
 	struct bfa_itnim_s *bfa_itnim;
 	u16        scsi_tgt_id;
+	u16	   channel;
 	u16        queue_work;
 	unsigned long	last_ramp_up_time;
 	unsigned long	last_queue_full_time;
@@ -166,4 +167,30 @@
 int bfad_im_bsg_request(struct fc_bsg_job *job);
 int bfad_im_bsg_timeout(struct fc_bsg_job *job);
 
+/*
+ * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
+ * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
+ *
+ * Internally iterate's over all the ITNIM's part of the im_port & set's the
+ * sdev_bflags for the scsi_device associated with LUN #0.
+ */
+#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do {		\
+	struct scsi_device *__sdev = NULL;				\
+	struct bfad_itnim_s *__itnim = NULL;				\
+	u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN;		\
+	list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list),	\
+			    list_entry) {				\
+		__sdev = scsi_device_lookup((__im_port)->shost,		\
+					    __itnim->channel,		\
+					    __itnim->scsi_tgt_id, 0);	\
+		if (__sdev) {						\
+			if ((__lunmask_cfg) == BFA_TRUE)		\
+				__sdev->sdev_bflags |= scan_flags;	\
+			else						\
+				__sdev->sdev_bflags &= ~scan_flags;	\
+			scsi_device_put(__sdev);			\
+		}							\
+	}								\
+} while (0)
+
 #endif
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c5360ff..d3ff9cd 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1868,8 +1868,9 @@
 
 	tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
 	if (!tdata->skb) {
-		pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
-			cdev->skb_tx_rsvd, headroom, opcode);
+		struct cxgbi_sock *csk = cconn->cep->csk;
+		struct net_device *ndev = cdev->ports[csk->port_id];
+		ndev->stats.tx_dropped++;
 		return -ENOMEM;
 	}
 
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4ef0212..04c5cea 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -466,6 +466,11 @@
 			 * Power On, Reset, or Bus Device Reset, just retry.
 			 */
 			return ADD_TO_MLQUEUE;
+		if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
+			/*
+			 * Mode Parameters Changed
+			 */
+			return ADD_TO_MLQUEUE;
 		if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
 			/*
 			 * ALUA state changed
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 841ebf4..53a31c7 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -953,6 +953,8 @@
 	if (!kmpath_rdacd) {
 		scsi_unregister_device_handler(&rdac_dh);
 		printk(KERN_ERR "kmpath_rdacd creation failed.\n");
+
+		r = -EINVAL;
 	}
 done:
 	return r;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 8d67467..e959960 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -58,7 +58,11 @@
 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "	\
 		 "Direct Data Placement (DDP).");
 
-DEFINE_MUTEX(fcoe_config_mutex);
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+static DEFINE_MUTEX(fcoe_config_mutex);
 
 static struct workqueue_struct *fcoe_wq;
 
@@ -67,8 +71,8 @@
 
 /* fcoe host list */
 /* must only by accessed under the RTNL mutex */
-LIST_HEAD(fcoe_hostlist);
-DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
+static LIST_HEAD(fcoe_hostlist);
+static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
 
 /* Function Prototypes */
 static int fcoe_reset(struct Scsi_Host *);
@@ -157,7 +161,7 @@
 	.lport_set_port_id = fcoe_set_port_id,
 };
 
-struct fc_function_template fcoe_nport_fc_functions = {
+static struct fc_function_template fcoe_nport_fc_functions = {
 	.show_host_node_name = 1,
 	.show_host_port_name = 1,
 	.show_host_supported_classes = 1,
@@ -197,7 +201,7 @@
 	.bsg_request = fc_lport_bsg_request,
 };
 
-struct fc_function_template fcoe_vport_fc_functions = {
+static struct fc_function_template fcoe_vport_fc_functions = {
 	.show_host_node_name = 1,
 	.show_host_port_name = 1,
 	.show_host_supported_classes = 1,
@@ -433,7 +437,7 @@
  *
  * Caller must be holding the RTNL mutex
  */
-void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 {
 	struct net_device *netdev = fcoe->netdev;
 	struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -748,7 +752,7 @@
  *
  * Returns: True for read types I/O, otherwise returns false.
  */
-bool fcoe_oem_match(struct fc_frame *fp)
+static bool fcoe_oem_match(struct fc_frame *fp)
 {
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 	struct fcp_cmnd *fcp;
@@ -756,11 +760,12 @@
 	if (fc_fcp_is_read(fr_fsp(fp)) &&
 	    (fr_fsp(fp)->data_len > fcoe_ddp_min))
 		return true;
-	else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
+	else if ((fr_fsp(fp) == NULL) &&
+		 (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
+		 (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
 		fcp = fc_frame_payload_get(fp, sizeof(*fcp));
-		if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
-		    fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
-		    (fcp->fc_flags & FCP_CFL_WRDATA))
+		if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
+		    (ntohl(fcp->fc_dl) > fcoe_ddp_min))
 			return true;
 	}
 	return false;
@@ -1106,7 +1111,7 @@
  *
  * Returns: 0 on success
  */
-int __exit fcoe_if_exit(void)
+static int __exit fcoe_if_exit(void)
 {
 	fc_release_transport(fcoe_nport_scsi_transport);
 	fc_release_transport(fcoe_vport_scsi_transport);
@@ -1295,7 +1300,7 @@
  *
  * Returns: 0 for success
  */
-int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
 	     struct packet_type *ptype, struct net_device *olddev)
 {
 	struct fc_lport *lport;
@@ -1451,7 +1456,7 @@
  *
  * Return: 0 for success
  */
-int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
+static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
 {
 	int wlen;
 	u32 crc;
@@ -1671,8 +1676,7 @@
 			skb->dev ? skb->dev->name : "<NULL>");
 
 	port = lport_priv(lport);
-	if (skb_is_nonlinear(skb))
-		skb_linearize(skb);	/* not ideal */
+	skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
 
 	/*
 	 * Frame length checks and setting up the header pointers
@@ -1728,7 +1732,7 @@
  *
  * Return: 0 for success
  */
-int fcoe_percpu_receive_thread(void *arg)
+static int fcoe_percpu_receive_thread(void *arg)
 {
 	struct fcoe_percpu_s *p = arg;
 	struct sk_buff *skb;
@@ -2146,7 +2150,7 @@
  * Returns: 0 if the ethtool query was successful
  *          -1 if the ethtool query failed
  */
-int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_link_speed_update(struct fc_lport *lport)
 {
 	struct net_device *netdev = fcoe_netdev(lport);
 	struct ethtool_cmd ecmd;
@@ -2180,7 +2184,7 @@
  * Returns: 0 if link is UP and OK, -1 if not
  *
  */
-int fcoe_link_ok(struct fc_lport *lport)
+static int fcoe_link_ok(struct fc_lport *lport)
 {
 	struct net_device *netdev = fcoe_netdev(lport);
 
@@ -2200,7 +2204,7 @@
  * there no packets that will be handled by the lport, but also that any
  * threads already handling packet have returned.
  */
-void fcoe_percpu_clean(struct fc_lport *lport)
+static void fcoe_percpu_clean(struct fc_lport *lport)
 {
 	struct fcoe_percpu_s *pp;
 	struct fcoe_rcv_info *fr;
@@ -2251,7 +2255,7 @@
  *
  * Returns: Always 0 (return value required by FC transport template)
  */
-int fcoe_reset(struct Scsi_Host *shost)
+static int fcoe_reset(struct Scsi_Host *shost)
 {
 	struct fc_lport *lport = shost_priv(shost);
 	struct fcoe_port *port = lport_priv(lport);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 6c6884b..bcc89e6 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,9 +40,7 @@
 #define FCOE_MIN_XID		0x0000	/* the min xid supported by fcoe_sw */
 #define FCOE_MAX_XID		0x0FFF	/* the max xid supported by fcoe_sw */
 
-unsigned int fcoe_debug_logging;
-module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+extern unsigned int fcoe_debug_logging;
 
 #define FCOE_LOGGING	    0x01 /* General logging, not categorized */
 #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5140f5d..b96962c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4271,7 +4271,9 @@
 	remove_ctlr_from_lockup_detector_list(h);
 	/* If the list of ctlr's to monitor is empty, stop the thread */
 	if (list_empty(&hpsa_ctlr_list)) {
+		spin_unlock_irqrestore(&lockup_detector_lock, flags);
 		kthread_stop(hpsa_lockup_detector);
+		spin_lock_irqsave(&lockup_detector_lock, flags);
 		hpsa_lockup_detector = NULL;
 	}
 	spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644
index 5f54461..0000000
--- a/drivers/scsi/isci/firmware/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# Makefile for create_fw
-#
-CC=gcc
-CFLAGS=-c -Wall -O2 -g
-LDFLAGS=
-SOURCES=create_fw.c
-OBJECTS=$(SOURCES:.cpp=.o)
-EXECUTABLE=create_fw
-
-all: $(SOURCES) $(EXECUTABLE)
-
-$(EXECUTABLE): $(OBJECTS)
-	$(CC) $(LDFLAGS) $(OBJECTS) -o $@
-
-.c.o:
-	$(CC) $(CFLAGS) $< -O $@
-
-clean:
-	rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
deleted file mode 100644
index 8056d2b..0000000
--- a/drivers/scsi/isci/firmware/README
+++ /dev/null
@@ -1,36 +0,0 @@
-This defines the temporary binary blow we are to pass to the SCU
-driver to emulate the binary firmware that we will eventually be
-able to access via NVRAM on the SCU controller.
-
-The current size of the binary blob is expected to be 149 bytes or larger
-
-Header Types:
-0x1: Phy Masks
-0x2: Phy Gens
-0x3: SAS Addrs
-0xff: End of Data
-
-ID string - u8[12]: "#SCU MAGIC#\0"
-Version - u8: 1
-SubVersion - u8: 0
-
-Header Type - u8: 0x1
-Size - u8: 8
-Phy Mask - u32[8]
-
-Header Type - u8: 0x2
-Size - u8: 8
-Phy Gen - u32[8]
-
-Header Type - u8: 0x3
-Size - u8: 8
-Sas Addr - u64[8]
-
-Header Type - u8: 0xf
-
-
-==============================================================================
-
-Place isci_firmware.bin in /lib/firmware
-Be sure to recreate the initramfs image to include the firmware.
-
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644
index c7a2887..0000000
--- a/drivers/scsi/isci/firmware/create_fw.c
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <string.h>
-#include <errno.h>
-#include <asm/types.h>
-#include <strings.h>
-#include <stdint.h>
-
-#include "create_fw.h"
-#include "../probe_roms.h"
-
-int write_blob(struct isci_orom *isci_orom)
-{
-	FILE *fd;
-	int err;
-	size_t count;
-
-	fd = fopen(blob_name, "w+");
-	if (!fd) {
-		perror("Open file for write failed");
-		fclose(fd);
-		return -EIO;
-	}
-
-	count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
-	if (count != 1) {
-		perror("Write data failed");
-		fclose(fd);
-		return -EIO;
-	}
-
-	fclose(fd);
-
-	return 0;
-}
-
-void set_binary_values(struct isci_orom *isci_orom)
-{
-	int ctrl_idx, phy_idx, port_idx;
-
-	/* setting OROM signature */
-	strncpy(isci_orom->hdr.signature, sig, strlen(sig));
-	isci_orom->hdr.version = version;
-	isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
-	isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
-	isci_orom->hdr.num_elements = num_elements;
-
-	for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
-		isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
-		isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
-			max_num_concurrent_dev_spin_up;
-		isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
-			enable_ssc;
-
-		for (port_idx = 0; port_idx < 4; port_idx++)
-			isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
-				phy_mask[ctrl_idx][port_idx];
-
-		for (phy_idx = 0; phy_idx < 4; phy_idx++) {
-			isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
-				(__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
-			isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
-				(__u32)(sas_addr[ctrl_idx][phy_idx]);
-
-			isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
-				afe_tx_amp_control0;
-			isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
-				afe_tx_amp_control1;
-			isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
-				afe_tx_amp_control2;
-			isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
-				afe_tx_amp_control3;
-		}
-	}
-}
-
-int main(void)
-{
-	int err;
-	struct isci_orom *isci_orom;
-
-	isci_orom = malloc(sizeof(struct isci_orom));
-	memset(isci_orom, 0, sizeof(struct isci_orom));
-
-	set_binary_values(isci_orom);
-
-	err = write_blob(isci_orom);
-	if (err < 0) {
-		free(isci_orom);
-		return err;
-	}
-
-	free(isci_orom);
-	return 0;
-}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644
index 5f29882..0000000
--- a/drivers/scsi/isci/firmware/create_fw.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _CREATE_FW_H_
-#define _CREATE_FW_H_
-#include "../probe_roms.h"
-
-
-/* we are configuring for 2 SCUs */
-static const int num_elements = 2;
-
-/*
- * For all defined arrays:
- * elements 0-3 are for SCU0, ports 0-3
- * elements 4-7 are for SCU1, ports 0-3
- *
- * valid configurations for one SCU are:
- *  P0  P1  P2  P3
- * ----------------
- * 0xF,0x0,0x0,0x0 # 1 x4 port
- * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
- *                 # ports
- * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
- *                 # port
- * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
- * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
- *
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value assigned to UNINIT_PARAM (255).
- */
-
-/* discovery mode type (port auto config mode by default ) */
-
-/*
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value "0000000000000000". SAS address of zero's is
- * considered invalid and will not be used.
- */
-#ifdef MPC
-static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
-				     {1, 2, 4, 8} };
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
-						     0x5FCFFFFFF0000002ULL,
-						     0x5FCFFFFFF0000003ULL,
-						     0x5FCFFFFFF0000004ULL },
-						   { 0x5FCFFFFFF0000005ULL,
-						     0x5FCFFFFFF0000006ULL,
-						     0x5FCFFFFFF0000007ULL,
-						     0x5FCFFFFFF0000008ULL } };
-#else	/* APC (default) */
-static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4];
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
-						     0x5FCFFFFF00000001ULL,
-						     0x5FCFFFFF00000001ULL,
-						     0x5FCFFFFF00000001ULL },
-						   { 0x5FCFFFFF00000002ULL,
-						     0x5FCFFFFF00000002ULL,
-						     0x5FCFFFFF00000002ULL,
-						     0x5FCFFFFF00000002ULL } };
-#endif
-
-/* Maximum number of concurrent device spin up */
-static const int max_num_concurrent_dev_spin_up = 1;
-
-/* enable of ssc operation */
-static const int enable_ssc;
-
-/* AFE_TX_AMP_CONTROL */
-static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
-static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
-static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
-static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
-
-static const char blob_name[] = "isci_firmware.bin";
-static const char sig[] = "ISCUOEMB";
-static const unsigned char version = 0x10;
-
-#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index e7fe9c4..1a65d65 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -899,7 +899,8 @@
 			 */
 			if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
 			    (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
-			    (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+			    (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+			    (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
 				is_controller_start_complete = false;
 				break;
 			}
@@ -1666,6 +1667,9 @@
 	/* Default to no SSC operation. */
 	ihost->oem_parameters.controller.do_enable_ssc = false;
 
+	/* Default to short cables on all phys. */
+	ihost->oem_parameters.controller.cable_selection_mask = 0;
+
 	/* Initialize all of the port parameter information to narrow ports. */
 	for (index = 0; index < SCI_MAX_PORTS; index++) {
 		ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@
 
 	/* Initialize all of the phy parameter information. */
 	for (index = 0; index < SCI_MAX_PHYS; index++) {
-		/* Default to 6G (i.e. Gen 3) for now. */
-		ihost->user_parameters.phys[index].max_speed_generation = 3;
+		/* Default to 3G (i.e. Gen 2). */
+		ihost->user_parameters.phys[index].max_speed_generation =
+			SCIC_SDS_PARM_GEN2_SPEED;
 
 		/* the frequencies cannot be 0 */
 		ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@
 	ihost->user_parameters.ssp_inactivity_timeout = 5;
 	ihost->user_parameters.stp_max_occupancy_timeout = 5;
 	ihost->user_parameters.ssp_max_occupancy_timeout = 20;
-	ihost->user_parameters.no_outbound_task_timeout = 20;
+	ihost->user_parameters.no_outbound_task_timeout = 2;
 }
 
 static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@
 	return sci_controller_reset(ihost);
 }
 
-int sci_oem_parameters_validate(struct sci_oem_params *oem)
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
 {
 	int i;
 
@@ -1791,18 +1796,61 @@
 	    oem->controller.max_concurr_spin_up < 1)
 		return -EINVAL;
 
+	if (oem->controller.do_enable_ssc) {
+		if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
+			return -EINVAL;
+
+		if (version >= ISCI_ROM_VER_1_1) {
+			u8 test = oem->controller.ssc_sata_tx_spread_level;
+
+			switch (test) {
+			case 0:
+			case 2:
+			case 3:
+			case 6:
+			case 7:
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			test = oem->controller.ssc_sas_tx_spread_level;
+			if (oem->controller.ssc_sas_tx_type == 0) {
+				switch (test) {
+				case 0:
+				case 2:
+				case 3:
+					break;
+				default:
+					return -EINVAL;
+				}
+			} else if (oem->controller.ssc_sas_tx_type == 1) {
+				switch (test) {
+				case 0:
+				case 3:
+				case 6:
+					break;
+				default:
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
 	return 0;
 }
 
 static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
 {
 	u32 state = ihost->sm.current_state_id;
+	struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
 
 	if (state == SCIC_RESET ||
 	    state == SCIC_INITIALIZING ||
 	    state == SCIC_INITIALIZED) {
 
-		if (sci_oem_parameters_validate(&ihost->oem_parameters))
+		if (sci_oem_parameters_validate(&ihost->oem_parameters,
+						pci_info->orom->hdr.version))
 			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
 
 		return SCI_SUCCESS;
@@ -1857,6 +1905,31 @@
 		ihost->power_control.phys_waiting--;
 		ihost->power_control.phys_granted_power++;
 		sci_phy_consume_power_handler(iphy);
+
+		if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+			u8 j;
+
+			for (j = 0; j < SCI_MAX_PHYS; j++) {
+				struct isci_phy *requester = ihost->power_control.requesters[j];
+
+				/*
+				 * Search the power_control queue to see if there are other phys
+				 * attached to the same remote device. If found, take all of
+				 * them out of await_sas_power state.
+				 */
+				if (requester != NULL && requester != iphy) {
+					u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
+							  iphy->frame_rcvd.iaf.sas_addr,
+							  sizeof(requester->frame_rcvd.iaf.sas_addr));
+
+					if (other == 0) {
+						ihost->power_control.requesters[j] = NULL;
+						ihost->power_control.phys_waiting--;
+						sci_phy_consume_power_handler(requester);
+					}
+				}
+			}
+		}
 	}
 
 	/*
@@ -1891,9 +1964,34 @@
 		ihost->power_control.timer_started = true;
 
 	} else {
-		/* Add the phy in the waiting list */
-		ihost->power_control.requesters[iphy->phy_index] = iphy;
-		ihost->power_control.phys_waiting++;
+		/*
+		 * There are phys, attached to the same sas address as this phy, are
+		 * already in READY state, this phy don't need wait.
+		 */
+		u8 i;
+		struct isci_phy *current_phy;
+
+		for (i = 0; i < SCI_MAX_PHYS; i++) {
+			u8 other;
+			current_phy = &ihost->phys[i];
+
+			other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
+				       iphy->frame_rcvd.iaf.sas_addr,
+				       sizeof(current_phy->frame_rcvd.iaf.sas_addr));
+
+			if (current_phy->sm.current_state_id == SCI_PHY_READY &&
+			    current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+			    other == 0) {
+				sci_phy_consume_power_handler(iphy);
+				break;
+			}
+		}
+
+		if (i == SCI_MAX_PHYS) {
+			/* Add the phy in the waiting list */
+			ihost->power_control.requesters[iphy->phy_index] = iphy;
+			ihost->power_control.phys_waiting++;
+		}
 	}
 }
 
@@ -1908,162 +2006,250 @@
 	ihost->power_control.requesters[iphy->phy_index] = NULL;
 }
 
+static int is_long_cable(int phy, unsigned char selection_byte)
+{
+	return !!(selection_byte & (1 << phy));
+}
+
+static int is_medium_cable(int phy, unsigned char selection_byte)
+{
+	return !!(selection_byte & (1 << (phy + 4)));
+}
+
+static enum cable_selections decode_selection_byte(
+	int phy,
+	unsigned char selection_byte)
+{
+	return ((selection_byte & (1 << phy)) ? 1 : 0)
+		+ (selection_byte & (1 << (phy + 4)) ? 2 : 0);
+}
+
+static unsigned char *to_cable_select(struct isci_host *ihost)
+{
+	if (is_cable_select_overridden())
+		return ((unsigned char *)&cable_selection_override)
+			+ ihost->id;
+	else
+		return &ihost->oem_parameters.controller.cable_selection_mask;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
+{
+	return decode_selection_byte(phy, *to_cable_select(ihost));
+}
+
+char *lookup_cable_names(enum cable_selections selection)
+{
+	static char *cable_names[] = {
+		[short_cable]     = "short",
+		[long_cable]      = "long",
+		[medium_cable]    = "medium",
+		[undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
+	};
+	return (selection <= undefined_cable) ? cable_names[selection]
+					      : cable_names[undefined_cable];
+}
+
 #define AFE_REGISTER_WRITE_DELAY 10
 
-/* Initialize the AFE for this phy index. We need to read the AFE setup from
- * the OEM parameters
- */
 static void sci_controller_afe_initialization(struct isci_host *ihost)
 {
+	struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
 	const struct sci_oem_params *oem = &ihost->oem_parameters;
 	struct pci_dev *pdev = ihost->pdev;
 	u32 afe_status;
 	u32 phy_id;
+	unsigned char cable_selection_mask = *to_cable_select(ihost);
 
 	/* Clear DFX Status registers */
-	writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+	writel(0x0081000f, &afe->afe_dfx_master_control0);
 	udelay(AFE_REGISTER_WRITE_DELAY);
 
-	if (is_b0(pdev)) {
+	if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
 		/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
-		 * Timer, PM Stagger Timer */
-		writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+		 * Timer, PM Stagger Timer
+		 */
+		writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
 		udelay(AFE_REGISTER_WRITE_DELAY);
 	}
 
 	/* Configure bias currents to normal */
 	if (is_a2(pdev))
-		writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+		writel(0x00005A00, &afe->afe_bias_control);
 	else if (is_b0(pdev) || is_c0(pdev))
-		writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+		writel(0x00005F00, &afe->afe_bias_control);
+	else if (is_c1(pdev))
+		writel(0x00005500, &afe->afe_bias_control);
 
 	udelay(AFE_REGISTER_WRITE_DELAY);
 
 	/* Enable PLL */
-	if (is_b0(pdev) || is_c0(pdev))
-		writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
-	else
-		writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+	if (is_a2(pdev))
+		writel(0x80040908, &afe->afe_pll_control0);
+	else if (is_b0(pdev) || is_c0(pdev))
+		writel(0x80040A08, &afe->afe_pll_control0);
+	else if (is_c1(pdev)) {
+		writel(0x80000B08, &afe->afe_pll_control0);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+		writel(0x00000B08, &afe->afe_pll_control0);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+		writel(0x80000B08, &afe->afe_pll_control0);
+	}
 
 	udelay(AFE_REGISTER_WRITE_DELAY);
 
 	/* Wait for the PLL to lock */
 	do {
-		afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+		afe_status = readl(&afe->afe_common_block_status);
 		udelay(AFE_REGISTER_WRITE_DELAY);
 	} while ((afe_status & 0x00001000) == 0);
 
 	if (is_a2(pdev)) {
-		/* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
-		writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+		/* Shorten SAS SNW lock time (RxLock timer value from 76
+		 * us to 50 us)
+		 */
+		writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
 		udelay(AFE_REGISTER_WRITE_DELAY);
 	}
 
 	for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+		struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
 		const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+		int cable_length_long =
+			is_long_cable(phy_id, cable_selection_mask);
+		int cable_length_medium =
+			is_medium_cable(phy_id, cable_selection_mask);
 
-		if (is_b0(pdev)) {
-			 /* Configure transmitter SSC parameters */
-			writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+		if (is_a2(pdev)) {
+			/* All defaults, except the Receive Word
+			 * Alignament/Comma Detect Enable....(0xe800)
+			 */
+			writel(0x00004512, &xcvr->afe_xcvr_control0);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(0x0050100F, &xcvr->afe_xcvr_control1);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+		} else if (is_b0(pdev)) {
+			/* Configure transmitter SSC parameters */
+			writel(0x00030000, &xcvr->afe_tx_ssc_control);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 		} else if (is_c0(pdev)) {
-			 /* Configure transmitter SSC parameters */
-			writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+			/* Configure transmitter SSC parameters */
+			writel(0x00010202, &xcvr->afe_tx_ssc_control);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 
-			/*
-			 * All defaults, except the Receive Word Alignament/Comma Detect
-			 * Enable....(0xe800) */
-			writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+			/* All defaults, except the Receive Word
+			 * Alignament/Comma Detect Enable....(0xe800)
+			 */
+			writel(0x00014500, &xcvr->afe_xcvr_control0);
 			udelay(AFE_REGISTER_WRITE_DELAY);
-		} else {
-			/*
-			 * All defaults, except the Receive Word Alignament/Comma Detect
-			 * Enable....(0xe800) */
-			writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+		} else if (is_c1(pdev)) {
+			/* Configure transmitter SSC parameters */
+			writel(0x00010202, &xcvr->afe_tx_ssc_control);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 
-			writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+			/* All defaults, except the Receive Word
+			 * Alignament/Comma Detect Enable....(0xe800)
+			 */
+			writel(0x0001C500, &xcvr->afe_xcvr_control0);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 		}
 
-		/*
-		 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
-		 * & increase TX int & ext bias 20%....(0xe85c) */
+		/* Power up TX and RX out from power down (PWRDNTX and
+		 * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
+		 */
 		if (is_a2(pdev))
-			writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+			writel(0x000003F0, &xcvr->afe_channel_control);
 		else if (is_b0(pdev)) {
-			 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
-			writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+			writel(0x000003D7, &xcvr->afe_channel_control);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 
-			/*
-			 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
-			 * & increase TX int & ext bias 20%....(0xe85c) */
-			writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
-		} else {
-			writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+			writel(0x000003D4, &xcvr->afe_channel_control);
+		} else if (is_c0(pdev)) {
+			writel(0x000001E7, &xcvr->afe_channel_control);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 
-			/*
-			 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
-			 * & increase TX int & ext bias 20%....(0xe85c) */
-			writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+			writel(0x000001E4, &xcvr->afe_channel_control);
+		} else if (is_c1(pdev)) {
+			writel(cable_length_long ? 0x000002F7 : 0x000001F7,
+			       &xcvr->afe_channel_control);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(cable_length_long ? 0x000002F4 : 0x000001F4,
+			       &xcvr->afe_channel_control);
 		}
 		udelay(AFE_REGISTER_WRITE_DELAY);
 
 		if (is_a2(pdev)) {
 			/* Enable TX equalization (0xe824) */
-			writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+			writel(0x00040000, &xcvr->afe_tx_control);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 		}
 
-		/*
-		 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
-		 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
-		writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+		if (is_a2(pdev) || is_b0(pdev))
+			/* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
+			 * TPD=0x0(TX Power On), RDD=0x0(RX Detect
+			 * Enabled) ....(0xe800)
+			 */
+			writel(0x00004100, &xcvr->afe_xcvr_control0);
+		else if (is_c0(pdev))
+			writel(0x00014100, &xcvr->afe_xcvr_control0);
+		else if (is_c1(pdev))
+			writel(0x0001C100, &xcvr->afe_xcvr_control0);
 		udelay(AFE_REGISTER_WRITE_DELAY);
 
 		/* Leave DFE/FFE on */
 		if (is_a2(pdev))
-			writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+			writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
 		else if (is_b0(pdev)) {
-			writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+			writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 			/* Enable TX equalization (0xe824) */
-			writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
-		} else {
-			writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+			writel(0x00040000, &xcvr->afe_tx_control);
+		} else if (is_c0(pdev)) {
+			writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 
-			writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+			writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
 			udelay(AFE_REGISTER_WRITE_DELAY);
 
 			/* Enable TX equalization (0xe824) */
-			writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+			writel(0x00040000, &xcvr->afe_tx_control);
+		} else if (is_c1(pdev)) {
+			writel(cable_length_long ? 0x01500C0C :
+			       cable_length_medium ? 0x01400C0D : 0x02400C0D,
+			       &xcvr->afe_xcvr_control1);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(cable_length_long ? 0x33091C1F :
+			       cable_length_medium ? 0x3315181F : 0x2B17161F,
+			       &xcvr->afe_rx_ssc_control0);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			/* Enable TX equalization (0xe824) */
+			writel(0x00040000, &xcvr->afe_tx_control);
 		}
 
 		udelay(AFE_REGISTER_WRITE_DELAY);
 
-		writel(oem_phy->afe_tx_amp_control0,
-			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+		writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
 		udelay(AFE_REGISTER_WRITE_DELAY);
 
-		writel(oem_phy->afe_tx_amp_control1,
-			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+		writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
 		udelay(AFE_REGISTER_WRITE_DELAY);
 
-		writel(oem_phy->afe_tx_amp_control2,
-			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+		writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
 		udelay(AFE_REGISTER_WRITE_DELAY);
 
-		writel(oem_phy->afe_tx_amp_control3,
-			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+		writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
 		udelay(AFE_REGISTER_WRITE_DELAY);
 	}
 
 	/* Transfer control to the PEs */
-	writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+	writel(0x00010f00, &afe->afe_dfx_master_control0);
 	udelay(AFE_REGISTER_WRITE_DELAY);
 }
 
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 646051a..5477f0f 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -435,11 +435,36 @@
 
 static inline bool is_c0(struct pci_dev *pdev)
 {
-	if (pdev->revision >= 5)
+	if (pdev->revision == 5)
 		return true;
 	return false;
 }
 
+static inline bool is_c1(struct pci_dev *pdev)
+{
+	if (pdev->revision >= 6)
+		return true;
+	return false;
+}
+
+enum cable_selections {
+	short_cable     = 0,
+	long_cable      = 1,
+	medium_cable    = 2,
+	undefined_cable = 3
+};
+
+#define CABLE_OVERRIDE_DISABLED (0x10000)
+
+static inline int is_cable_select_overridden(void)
+{
+	return cable_selection_override < CABLE_OVERRIDE_DISABLED;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
+void validate_cable_selections(struct isci_host *ihost);
+char *lookup_cable_names(enum cable_selections);
+
 /* set hw control for 'activity', even though active enclosures seem to drive
  * the activity led on their own.  Skip setting FSENG control on 'status' due
  * to unexpected operation and 'error' due to not being a supported automatic
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index a97edab..17c4c2c 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -65,7 +65,7 @@
 #include "probe_roms.h"
 
 #define MAJ 1
-#define MIN 0
+#define MIN 1
 #define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 	__stringify(BUILD)
@@ -94,7 +94,7 @@
 
 /* linux isci specific settings */
 
-unsigned char no_outbound_task_to = 20;
+unsigned char no_outbound_task_to = 2;
 module_param(no_outbound_task_to, byte, 0);
 MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
 
@@ -114,7 +114,7 @@
 module_param(stp_inactive_to, ushort, 0);
 MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
 
-unsigned char phy_gen = 3;
+unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
 module_param(phy_gen, byte, 0);
 MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
 
@@ -122,6 +122,14 @@
 module_param(max_concurr_spinup, byte, 0);
 MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
 
+uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
+module_param(cable_selection_override, uint, 0);
+
+MODULE_PARM_DESC(cable_selection_override,
+		 "This field indicates length of the SAS/SATA cable between "
+		 "host and device. If any bits > 15 are set (default) "
+		 "indicates \"use platform defaults\"");
+
 static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@
 		return NULL;
 	isci_host->shost = shost;
 
+	dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
+		 "{%s, %s, %s, %s}\n",
+		 (is_cable_select_overridden() ? "* " : ""), isci_host->id,
+		 lookup_cable_names(decode_cable_selection(isci_host, 3)),
+		 lookup_cable_names(decode_cable_selection(isci_host, 2)),
+		 lookup_cable_names(decode_cable_selection(isci_host, 1)),
+		 lookup_cable_names(decode_cable_selection(isci_host, 0)));
+
 	err = isci_host_init(isci_host);
 	if (err)
 		goto err_shost;
@@ -466,7 +482,8 @@
 		orom = isci_request_oprom(pdev);
 
 	for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
-		if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+		if (sci_oem_parameters_validate(&orom->ctrl[i],
+						orom->hdr.version)) {
 			dev_warn(&pdev->dev,
 				 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
 			devm_kfree(&pdev->dev, orom);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 8efeb6b..234ab46 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -480,6 +480,7 @@
 extern u16 stp_inactive_to;
 extern unsigned char phy_gen;
 extern unsigned char max_concurr_spinup;
+extern uint cable_selection_override;
 
 irqreturn_t isci_msix_isr(int vec, void *data);
 irqreturn_t isci_intx_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 35f50c2..fe18acf 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -91,22 +91,23 @@
 
 static enum sci_status
 sci_phy_link_layer_initialization(struct isci_phy *iphy,
-				  struct scu_link_layer_registers __iomem *reg)
+				  struct scu_link_layer_registers __iomem *llr)
 {
 	struct isci_host *ihost = iphy->owning_port->owning_controller;
+	struct sci_phy_user_params *phy_user;
+	struct sci_phy_oem_params *phy_oem;
 	int phy_idx = iphy->phy_index;
-	struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
-	struct sci_phy_oem_params *phy_oem =
-		&ihost->oem_parameters.phys[phy_idx];
-	u32 phy_configuration;
 	struct sci_phy_cap phy_cap;
+	u32 phy_configuration;
 	u32 parity_check = 0;
 	u32 parity_count = 0;
 	u32 llctl, link_rate;
 	u32 clksm_value = 0;
 	u32 sp_timeouts = 0;
 
-	iphy->link_layer_registers = reg;
+	phy_user = &ihost->user_parameters.phys[phy_idx];
+	phy_oem = &ihost->oem_parameters.phys[phy_idx];
+	iphy->link_layer_registers = llr;
 
 	/* Set our IDENTIFY frame data */
 	#define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@
 	       SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
 	       SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
 	       SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
-	       &iphy->link_layer_registers->transmit_identification);
+	       &llr->transmit_identification);
 
 	/* Write the device SAS Address */
-	writel(0xFEDCBA98,
-	       &iphy->link_layer_registers->sas_device_name_high);
-	writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+	writel(0xFEDCBA98, &llr->sas_device_name_high);
+	writel(phy_idx, &llr->sas_device_name_low);
 
 	/* Write the source SAS Address */
-	writel(phy_oem->sas_address.high,
-		&iphy->link_layer_registers->source_sas_address_high);
-	writel(phy_oem->sas_address.low,
-		&iphy->link_layer_registers->source_sas_address_low);
+	writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
+	writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
 
 	/* Clear and Set the PHY Identifier */
-	writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
-	writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
-		&iphy->link_layer_registers->identify_frame_phy_id);
+	writel(0, &llr->identify_frame_phy_id);
+	writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
 
 	/* Change the initial state of the phy configuration register */
-	phy_configuration =
-		readl(&iphy->link_layer_registers->phy_configuration);
+	phy_configuration = readl(&llr->phy_configuration);
 
 	/* Hold OOB state machine in reset */
 	phy_configuration |=  SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
-	writel(phy_configuration,
-		&iphy->link_layer_registers->phy_configuration);
+	writel(phy_configuration, &llr->phy_configuration);
 
 	/* Configure the SNW capabilities */
 	phy_cap.all = 0;
@@ -149,15 +144,64 @@
 	phy_cap.gen3_no_ssc = 1;
 	phy_cap.gen2_no_ssc = 1;
 	phy_cap.gen1_no_ssc = 1;
-	if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+	if (ihost->oem_parameters.controller.do_enable_ssc) {
+		struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+		struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
+		struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+		bool en_sas = false;
+		bool en_sata = false;
+		u32 sas_type = 0;
+		u32 sata_spread = 0x2;
+		u32 sas_spread = 0x2;
+
 		phy_cap.gen3_ssc = 1;
 		phy_cap.gen2_ssc = 1;
 		phy_cap.gen1_ssc = 1;
+
+		if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
+			en_sas = en_sata = true;
+		else {
+			sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
+			sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
+
+			if (sata_spread)
+				en_sata = true;
+
+			if (sas_spread) {
+				en_sas = true;
+				sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
+			}
+
+		}
+
+		if (en_sas) {
+			u32 reg;
+
+			reg = readl(&xcvr->afe_xcvr_control0);
+			reg |= (0x00100000 | (sas_type << 19));
+			writel(reg, &xcvr->afe_xcvr_control0);
+
+			reg = readl(&xcvr->afe_tx_ssc_control);
+			reg |= sas_spread << 8;
+			writel(reg, &xcvr->afe_tx_ssc_control);
+		}
+
+		if (en_sata) {
+			u32 reg;
+
+			reg = readl(&xcvr->afe_tx_ssc_control);
+			reg |= sata_spread;
+			writel(reg, &xcvr->afe_tx_ssc_control);
+
+			reg = readl(&llr->stp_control);
+			reg |= 1 << 12;
+			writel(reg, &llr->stp_control);
+		}
 	}
 
-	/*
-	 * The SAS specification indicates that the phy_capabilities that
-	 * are transmitted shall have an even parity.  Calculate the parity. */
+	/* The SAS specification indicates that the phy_capabilities that
+	 * are transmitted shall have an even parity.  Calculate the parity.
+	 */
 	parity_check = phy_cap.all;
 	while (parity_check != 0) {
 		if (parity_check & 0x1)
@@ -165,20 +209,20 @@
 		parity_check >>= 1;
 	}
 
-	/*
-	 * If parity indicates there are an odd number of bits set, then
-	 * set the parity bit to 1 in the phy capabilities. */
+	/* If parity indicates there are an odd number of bits set, then
+	 * set the parity bit to 1 in the phy capabilities.
+	 */
 	if ((parity_count % 2) != 0)
 		phy_cap.parity = 1;
 
-	writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+	writel(phy_cap.all, &llr->phy_capabilities);
 
 	/* Set the enable spinup period but disable the ability to send
 	 * notify enable spinup
 	 */
 	writel(SCU_ENSPINUP_GEN_VAL(COUNT,
 			phy_user->notify_enable_spin_up_insertion_frequency),
-		&iphy->link_layer_registers->notify_enable_spinup_control);
+		&llr->notify_enable_spinup_control);
 
 	/* Write the ALIGN Insertion Ferequency for connected phy and
 	 * inpendent of connected state
@@ -189,11 +233,13 @@
 	clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
 			phy_user->align_insertion_frequency);
 
-	writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+	writel(clksm_value, &llr->clock_skew_management);
 
-	/* @todo Provide a way to write this register correctly */
-	writel(0x02108421,
-		&iphy->link_layer_registers->afe_lookup_table_control);
+	if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
+		writel(0x04210400, &llr->afe_lookup_table_control);
+		writel(0x020A7C05, &llr->sas_primitive_timeout);
+	} else
+		writel(0x02108421, &llr->afe_lookup_table_control);
 
 	llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
 		(u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@
 		break;
 	}
 	llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
-	writel(llctl, &iphy->link_layer_registers->link_layer_control);
+	writel(llctl, &llr->link_layer_control);
 
-	sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+	sp_timeouts = readl(&llr->sas_phy_timeouts);
 
 	/* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
 	sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@
 	 */
 	sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
 
-	writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+	writel(sp_timeouts, &llr->sas_phy_timeouts);
 
 	if (is_a2(ihost->pdev)) {
-		/* Program the max ARB time for the PHY to 700us so we inter-operate with
-		 * the PMC expander which shuts down PHYs if the expander PHY generates too
-		 * many breaks.  This time value will guarantee that the initiator PHY will
-		 * generate the break.
+		/* Program the max ARB time for the PHY to 700us so we
+		 * inter-operate with the PMC expander which shuts down
+		 * PHYs if the expander PHY generates too many breaks.
+		 * This time value will guarantee that the initiator PHY
+		 * will generate the break.
 		 */
 		writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
-			&iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+		       &llr->maximum_arbitration_wait_timer_timeout);
 	}
 
-	/* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
-	writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+	/* Disable link layer hang detection, rely on the OS timeout for
+	 * I/O timeouts.
+	 */
+	writel(0, &llr->link_layer_hang_detection_timeout);
 
 	/* We can exit the initial state to the stopped state */
 	sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@
 	writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
 }
 
-/**
- *
- *
- * This method will start the OOB/SN state machine for this struct isci_phy object.
- */
-static void scu_link_layer_start_oob(
-	struct isci_phy *iphy)
+static void scu_link_layer_start_oob(struct isci_phy *iphy)
 {
-	u32 scu_sas_pcfg_value;
+	struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
+	u32 val;
 
-	scu_sas_pcfg_value =
-		readl(&iphy->link_layer_registers->phy_configuration);
-	scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
-	scu_sas_pcfg_value &=
-		~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
-		SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
-	writel(scu_sas_pcfg_value,
-	       &iphy->link_layer_registers->phy_configuration);
+	/** Reset OOB sequence - start */
+	val = readl(&ll->phy_configuration);
+	val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+		 SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+	writel(val, &ll->phy_configuration);
+	readl(&ll->phy_configuration); /* flush */
+	/** Reset OOB sequence - end */
+
+	/** Start OOB sequence - start */
+	val = readl(&ll->phy_configuration);
+	val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+	writel(val, &ll->phy_configuration);
+	readl(&ll->phy_configuration); /* flush */
+	/** Start OOB sequence - end */
 }
 
 /**
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index ac7f277..7c6ac58 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -114,7 +114,7 @@
  * value is returned if the specified port is not valid.  When this value is
  * returned, no data is copied to the properties output parameter.
  */
-static enum sci_status sci_port_get_properties(struct isci_port *iport,
+enum sci_status sci_port_get_properties(struct isci_port *iport,
 						struct sci_port_properties *prop)
 {
 	if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@
 	}
 }
 
-static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
-				  bool do_notify_user)
+static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+	sci_phy_resume(iphy);
+	iport->enabled_phy_mask |= 1 << iphy->phy_index;
+}
+
+static void sci_port_activate_phy(struct isci_port *iport,
+				  struct isci_phy *iphy,
+				  u8 flags)
 {
 	struct isci_host *ihost = iport->owning_controller;
 
-	if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+	if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
 		sci_phy_resume(iphy);
 
 	iport->active_phy_mask |= 1 << iphy->phy_index;
 
 	sci_controller_clear_invalid_phy(ihost, iphy);
 
-	if (do_notify_user == true)
+	if (flags & PF_NOTIFY)
 		isci_port_link_up(ihost, iport, iphy);
 }
 
@@ -669,14 +676,19 @@
 	struct isci_host *ihost = iport->owning_controller;
 
 	iport->active_phy_mask &= ~(1 << iphy->phy_index);
+	iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
 	if (!iport->active_phy_mask)
 		iport->last_active_phy = iphy->phy_index;
 
 	iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
 
-	/* Re-assign the phy back to the LP as if it were a narrow port */
-	writel(iphy->phy_index,
-		&iport->port_pe_configuration_register[iphy->phy_index]);
+	/* Re-assign the phy back to the LP as if it were a narrow port for APC
+	 * mode. For MPC mode, the phy will remain in the port.
+	 */
+	if (iport->owning_controller->oem_parameters.controller.mode_type ==
+		SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
+		writel(iphy->phy_index,
+			&iport->port_pe_configuration_register[iphy->phy_index]);
 
 	if (do_notify_user == true)
 		isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@
  * sci_port_general_link_up_handler - phy can be assigned to port?
  * @sci_port: sci_port object for which has a phy that has gone link up.
  * @sci_phy: This is the struct isci_phy object that has gone link up.
- * @do_notify_user: This parameter specifies whether to inform the user (via
- *    sci_port_link_up()) as to the fact that a new phy as become ready.
+ * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
  *
- * Determine if this phy can be assigned to this
- * port . If the phy is not a valid PHY for
- * this port then the function will notify the user. A PHY can only be
- * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
- * the same port. none
+ * Determine if this phy can be assigned to this port . If the phy is
+ * not a valid PHY for this port then the function will notify the user.
+ * A PHY can only be part of a port if it's attached SAS ADDRESS is the
+ * same as all other PHYs in the same port.
  */
 static void sci_port_general_link_up_handler(struct isci_port *iport,
-						  struct isci_phy *iphy,
-						  bool do_notify_user)
+					     struct isci_phy *iphy,
+					     u8 flags)
 {
 	struct sci_sas_address port_sas_address;
 	struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@
 	    iport->active_phy_mask == 0) {
 		struct sci_base_state_machine *sm = &iport->sm;
 
-		sci_port_activate_phy(iport, iphy, do_notify_user);
+		sci_port_activate_phy(iport, iphy, flags);
 		if (sm->current_state_id == SCI_PORT_RESETTING)
 			port_state_machine_change(iport, SCI_PORT_READY);
 	} else
@@ -781,11 +791,16 @@
 	struct isci_phy *iphy)
 {
 	if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
-	    (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
-	    sci_port_is_wide(iport)) {
-		sci_port_invalid_link_up(iport, iphy);
-
-		return false;
+	    (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+		if (sci_port_is_wide(iport)) {
+			sci_port_invalid_link_up(iport, iphy);
+			return false;
+		} else {
+			struct isci_host *ihost = iport->owning_controller;
+			struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
+			writel(iphy->phy_index,
+			       &dst_port->port_pe_configuration_register[iphy->phy_index]);
+		}
 	}
 
 	return true;
@@ -975,6 +990,13 @@
 	}
 }
 
+static void scic_sds_port_ready_substate_waiting_exit(
+					struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+	sci_port_resume_port_task_scheduler(iport);
+}
+
 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
 {
 	u32 index;
@@ -988,13 +1010,13 @@
 			writel(iport->physical_port_index,
 				&iport->port_pe_configuration_register[
 					iport->phy_table[index]->phy_index]);
+			if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
+				sci_port_resume_phy(iport, iport->phy_table[index]);
 		}
 	}
 
 	sci_port_update_viit_entry(iport);
 
-	sci_port_resume_port_task_scheduler(iport);
-
 	/*
 	 * Post the dummy task for the port so the hardware can schedule
 	 * io correctly
@@ -1061,20 +1083,9 @@
 	if (iport->active_phy_mask == 0) {
 		isci_port_not_ready(ihost, iport);
 
-		port_state_machine_change(iport,
-					  SCI_PORT_SUB_WAITING);
-	} else if (iport->started_request_count == 0)
-		port_state_machine_change(iport,
-					  SCI_PORT_SUB_OPERATIONAL);
-}
-
-static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
-{
-	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
-
-	sci_port_suspend_port_task_scheduler(iport);
-	if (iport->ready_exit)
-		sci_port_invalidate_dummy_remote_node(iport);
+		port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
+	} else
+		port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
 }
 
 enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@
 		if (status != SCI_SUCCESS)
 			return status;
 
-		sci_port_general_link_up_handler(iport, iphy, true);
+		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
 		iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
 		port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
 
@@ -1262,7 +1273,7 @@
 
 		if (status != SCI_SUCCESS)
 			return status;
-		sci_port_general_link_up_handler(iport, iphy, true);
+		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
 
 		/* Re-enter the configuring state since this may be the last phy in
 		 * the port.
@@ -1338,13 +1349,13 @@
 		/* Since this is the first phy going link up for the port we
 		 * can just enable it and continue
 		 */
-		sci_port_activate_phy(iport, iphy, true);
+		sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
 
 		port_state_machine_change(iport,
 					  SCI_PORT_SUB_OPERATIONAL);
 		return SCI_SUCCESS;
 	case SCI_PORT_SUB_OPERATIONAL:
-		sci_port_general_link_up_handler(iport, iphy, true);
+		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
 		return SCI_SUCCESS;
 	case SCI_PORT_RESETTING:
 		/* TODO We should  make  sure  that  the phy  that  has gone
@@ -1361,7 +1372,7 @@
 		/* In the resetting state we don't notify the user regarding
 		 * link up and link down notifications.
 		 */
-		sci_port_general_link_up_handler(iport, iphy, false);
+		sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
 		return SCI_SUCCESS;
 	default:
 		dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@
 	},
 	[SCI_PORT_SUB_WAITING] = {
 		.enter_state = sci_port_ready_substate_waiting_enter,
+		.exit_state  = scic_sds_port_ready_substate_waiting_exit,
 	},
 	[SCI_PORT_SUB_OPERATIONAL] = {
 		.enter_state = sci_port_ready_substate_operational_enter,
 		.exit_state  = sci_port_ready_substate_operational_exit
 	},
 	[SCI_PORT_SUB_CONFIGURING] = {
-		.enter_state = sci_port_ready_substate_configuring_enter,
-		.exit_state  = sci_port_ready_substate_configuring_exit
+		.enter_state = sci_port_ready_substate_configuring_enter
 	},
 	[SCI_PORT_RESETTING] = {
 		.exit_state  = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@
 	iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
 	iport->physical_port_index = index;
 	iport->active_phy_mask     = 0;
+	iport->enabled_phy_mask    = 0;
 	iport->last_active_phy     = 0;
 	iport->ready_exit	   = false;
 
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index cb5ffbc..0811609 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -63,6 +63,9 @@
 
 #define SCIC_SDS_DUMMY_PORT   0xFF
 
+#define PF_NOTIFY (1 << 0)
+#define PF_RESUME (1 << 1)
+
 struct isci_phy;
 struct isci_host;
 
@@ -83,6 +86,8 @@
  * @logical_port_index: software port index
  * @physical_port_index: hardware port index
  * @active_phy_mask: identifies phy members
+ * @enabled_phy_mask: phy mask for the port
+ *                    that are already part of the port
  * @reserved_tag:
  * @reserved_rni: reserver for port task scheduler workaround
  * @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@
 	u8 logical_port_index;
 	u8 physical_port_index;
 	u8 active_phy_mask;
+	u8 enabled_phy_mask;
 	u8 last_active_phy;
 	u16 reserved_rni;
 	u16 reserved_tag;
@@ -250,6 +256,10 @@
 	struct isci_port *iport,
 	struct isci_phy *iphy);
 
+enum sci_status sci_port_get_properties(
+	struct isci_port *iport,
+	struct sci_port_properties *prop);
+
 enum sci_status sci_port_link_up(struct isci_port *iport,
 				      struct isci_phy *iphy);
 enum sci_status sci_port_link_down(struct isci_port *iport,
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 38a99d2..6d1e954 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
 
 #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT    (10)
 #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT    (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (100)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (250)
 
 enum SCIC_SDS_APC_ACTIVITY {
 	SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@
 	return sci_port_configuration_agent_validate_ports(ihost, port_agent);
 }
 
+/*
+ * This routine will restart the automatic port configuration timeout
+ * timer for the next time period. This could be caused by either a link
+ * down event or a link up event where we can not yet tell to which a phy
+ * belongs.
+ */
+static void sci_apc_agent_start_timer(
+	struct sci_port_configuration_agent *port_agent,
+	u32 timeout)
+{
+	if (port_agent->timer_pending)
+		sci_del_timer(&port_agent->timer);
+
+	port_agent->timer_pending = true;
+	sci_mod_timer(&port_agent->timer, timeout);
+}
+
 static void sci_apc_agent_configure_ports(struct isci_host *ihost,
 					       struct sci_port_configuration_agent *port_agent,
 					       struct isci_phy *iphy,
@@ -565,17 +582,8 @@
 		break;
 
 	case SCIC_SDS_APC_START_TIMER:
-		/*
-		 * This can occur for either a link down event, or a link
-		 * up event where we cannot yet tell the port to which a
-		 * phy belongs.
-		 */
-		if (port_agent->timer_pending)
-			sci_del_timer(&port_agent->timer);
-
-		port_agent->timer_pending = true;
-		sci_mod_timer(&port_agent->timer,
-			      SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+		sci_apc_agent_start_timer(port_agent,
+					  SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
 		break;
 
 	case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@
 	if (!iport) {
 		/* the phy is not the part of this port */
 		port_agent->phy_ready_mask |= 1 << phy_index;
-		sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+		sci_apc_agent_start_timer(port_agent,
+					  SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
 	} else {
 		/* the phy is already the part of the port */
 		u32 port_state = iport->sm.current_state_id;
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index b5f4341..9b8117b 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -147,7 +147,7 @@
 
 	memcpy(orom, fw->data, fw->size);
 
-	if (is_c0(pdev))
+	if (is_c0(pdev) || is_c1(pdev))
 		goto out;
 
 	/*
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index 2c75248..bb0e9d4 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -152,7 +152,7 @@
 #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
 
 struct sci_oem_params;
-int sci_oem_parameters_validate(struct sci_oem_params *oem);
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
 
 struct isci_orom;
 struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@
 			0x1a, 0x04, 0xc6)
 #define ISCI_EFI_VAR_NAME	"RstScuO"
 
+#define ISCI_ROM_VER_1_0	0x10
+#define ISCI_ROM_VER_1_1	0x11
+#define ISCI_ROM_VER_1_3	0x13
+#define ISCI_ROM_VER_LATEST	ISCI_ROM_VER_1_3
+
 /* Allowed PORT configuration modes APC Automatic PORT configuration mode is
  * defined by the OEM configuration parameters providing no PHY_MASK parameters
  * for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@
 	struct {
 		uint8_t mode_type;
 		uint8_t max_concurr_spin_up;
-		uint8_t do_enable_ssc;
-		uint8_t reserved;
+		/*
+		 * This bitfield indicates the OEM's desired default Tx
+		 * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
+		 * NOTE: Default SSC Modulation Frequency is 31.5KHz.
+		 */
+		union {
+			struct {
+			/*
+			 * NOTE: Max spread for SATA is +0 / -5000 PPM.
+			 * Down-spreading SSC (only method allowed for SATA):
+			 *  SATA SSC Tx Disabled                    = 0x0
+			 *  SATA SSC Tx at +0 / -1419 PPM Spread    = 0x2
+			 *  SATA SSC Tx at +0 / -2129 PPM Spread    = 0x3
+			 *  SATA SSC Tx at +0 / -4257 PPM Spread    = 0x6
+			 *  SATA SSC Tx at +0 / -4967 PPM Spread    = 0x7
+			 */
+				uint8_t ssc_sata_tx_spread_level:4;
+			/*
+			 * SAS SSC Tx Disabled                     = 0x0
+			 *
+			 * NOTE: Max spread for SAS down-spreading +0 /
+			 *	 -2300 PPM
+			 * Down-spreading SSC:
+			 *  SAS SSC Tx at +0 / -1419 PPM Spread     = 0x2
+			 *  SAS SSC Tx at +0 / -2129 PPM Spread     = 0x3
+			 *
+			 * NOTE: Max spread for SAS center-spreading +2300 /
+			 *	 -2300 PPM
+			 * Center-spreading SSC:
+			 *  SAS SSC Tx at +1064 / -1064 PPM Spread  = 0x3
+			 *  SAS SSC Tx at +2129 / -2129 PPM Spread  = 0x6
+			 */
+				uint8_t ssc_sas_tx_spread_level:3;
+			/*
+			 * NOTE: Refer to the SSC section of the SAS 2.x
+			 * Specification for proper setting of this field.
+			 * For standard SAS Initiator SAS PHY operation it
+			 * should be 0 for Down-spreading.
+			 * SAS SSC Tx spread type:
+			 *  Down-spreading SSC      = 0
+			 *  Center-spreading SSC    = 1
+			 */
+				uint8_t ssc_sas_tx_type:1;
+			};
+			uint8_t do_enable_ssc;
+		};
+		/*
+		 * This field indicates length of the SAS/SATA cable between
+		 * host and device.
+		 * This field is used make relationship between analog
+		 * parameters of the phy in the silicon and length of the cable.
+		 * Supported cable attenuation levels:
+		 * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
+		 * 6m.
+		 *
+		 * This is bit mask field:
+		 *
+		 * BIT:      (MSB) 7     6     5     4
+		 * ASSIGNMENT:   <phy3><phy2><phy1><phy0>  - Medium cable
+		 *                                           length assignment
+		 * BIT:            3     2     1     0  (LSB)
+		 * ASSIGNMENT:   <phy3><phy2><phy1><phy0>  - Long cable length
+		 *                                           assignment
+		 *
+		 * BITS 7-4 are set when the cable length is assigned to medium
+		 * BITS 3-0 are set when the cable length is assigned to long
+		 *
+		 * The BIT positions are clear when the cable length is
+		 * assigned to short.
+		 *
+		 * Setting the bits for both long and medium cable length is
+		 * undefined.
+		 *
+		 * A value of 0x84 would assign
+		 *    phy3 - medium
+		 *    phy2 - long
+		 *    phy1 - short
+		 *    phy0 - short
+		 */
+		uint8_t cable_selection_mask;
 	} controller;
 
 	struct {
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index b207cd3..dd74b6c 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -53,6 +53,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 #include <scsi/sas.h>
+#include <linux/bitops.h>
 #include "isci.h"
 #include "port.h"
 #include "remote_device.h"
@@ -1101,6 +1102,7 @@
 						       struct isci_remote_device *idev)
 {
 	enum sci_status status;
+	struct sci_port_properties properties;
 	struct domain_device *dev = idev->domain_dev;
 
 	sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@
 	 * entries will be needed to store the remote node.
 	 */
 	idev->is_direct_attached = true;
+
+	sci_port_get_properties(iport, &properties);
+	/* Get accurate port width from port's phy mask for a DA device. */
+	idev->device_port_width = hweight32(properties.phy_mask);
+
 	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
 								  idev,
 								  &idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@
 
 	idev->connection_rate = sci_port_get_max_allowed_speed(iport);
 
-	/* / @todo Should I assign the port width by reading all of the phys on the port? */
-	idev->device_port_width = 1;
-
 	return SCI_SUCCESS;
 }
 
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 66ad3dc..f5a3f7d 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -496,7 +496,7 @@
 		}
 	}
 
-	isci_print_tmf(tmf);
+	isci_print_tmf(ihost, tmf);
 
 	if (tmf->status == SCI_SUCCESS)
 		ret =  TMF_RESP_FUNC_COMPLETE;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index bc78c0a..1b27b37 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -106,7 +106,6 @@
 	} resp;
 	unsigned char lun[8];
 	u16 io_tag;
-	struct isci_remote_device *device;
 	enum isci_tmf_function_codes tmf_code;
 	int status;
 
@@ -120,10 +119,10 @@
 
 };
 
-static inline void isci_print_tmf(struct isci_tmf *tmf)
+static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
 {
 	if (SAS_PROTOCOL_SATA == tmf->proto)
-		dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+		dev_dbg(&ihost->pdev->dev,
 			"%s: status = %x\n"
 			"tmf->resp.d2h_fis.status = %x\n"
 			"tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@
 			tmf->resp.d2h_fis.status,
 			tmf->resp.d2h_fis.error);
 	else
-		dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+		dev_dbg(&ihost->pdev->dev,
 			"%s: status = %x\n"
 			"tmf->resp.resp_iu.data_present = %x\n"
 			"tmf->resp.resp_iu.status = %x\n"
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 7269e92..1d1b0c9 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -61,7 +61,7 @@
  * Locking Note: This function expects that the lport mutex is locked before
  * calling it.
  */
-void fc_disc_stop_rports(struct fc_disc *disc)
+static void fc_disc_stop_rports(struct fc_disc *disc)
 {
 	struct fc_lport *lport;
 	struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@
  * fc_disc_stop() - Stop discovery for a given lport
  * @lport: The local port that discovery should stop on
  */
-void fc_disc_stop(struct fc_lport *lport)
+static void fc_disc_stop(struct fc_lport *lport)
 {
 	struct fc_disc *disc = &lport->disc;
 
@@ -698,7 +698,7 @@
  * This function will block until discovery has been
  * completely stopped and all rports have been deleted.
  */
-void fc_disc_stop_final(struct fc_lport *lport)
+static void fc_disc_stop_final(struct fc_lport *lport)
 {
 	fc_disc_stop(lport);
 	lport->tt.rport_flush_queue();
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index fb9161d..e17a28d 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -28,6 +28,7 @@
 #include <scsi/fc/fc_els.h>
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
+#include "fc_libfc.h"
 
 /**
  * fc_elsct_send() - Send an ELS or CT frame
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 9de9db2..4d70d96 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -91,7 +91,7 @@
  * It manages the allocation of exchange IDs.
  */
 struct fc_exch_mgr {
-	struct fc_exch_pool *pool;
+	struct fc_exch_pool __percpu *pool;
 	mempool_t	*ep_pool;
 	enum fc_class	class;
 	struct kref	kref;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 221875e..f607314 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -155,6 +155,7 @@
 		fsp->xfer_ddp = FC_XID_UNKNOWN;
 		atomic_set(&fsp->ref_cnt, 1);
 		init_timer(&fsp->timer);
+		fsp->timer.data = (unsigned long)fsp;
 		INIT_LIST_HEAD(&fsp->list);
 		spin_lock_init(&fsp->scsi_pkt_lock);
 	}
@@ -1850,9 +1851,6 @@
 	}
 	put_cpu();
 
-	init_timer(&fsp->timer);
-	fsp->timer.data = (unsigned long)fsp;
-
 	/*
 	 * send it to the lower layer
 	 * if we get -1 return then put the request in the pending
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e77094a..83750eb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -677,7 +677,8 @@
  * @lport: The local port receiving the event
  * @event: The discovery event
  */
-void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
+static void fc_lport_disc_callback(struct fc_lport *lport,
+				   enum fc_disc_event event)
 {
 	switch (event) {
 	case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
  */
-void fc_lport_enter_flogi(struct fc_lport *lport)
+static void fc_lport_enter_flogi(struct fc_lport *lport)
 {
 	struct fc_frame *fp;
 
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9e4348..83aa1ef 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -391,7 +391,7 @@
  * If it appears we are already logged in, ADISC is used to verify
  * the setup.
  */
-int fc_rport_login(struct fc_rport_priv *rdata)
+static int fc_rport_login(struct fc_rport_priv *rdata)
 {
 	mutex_lock(&rdata->rp_mutex);
 
@@ -451,7 +451,7 @@
  * function will hold the rport lock, call an _enter_*
  * function and then unlock the rport.
  */
-int fc_rport_logoff(struct fc_rport_priv *rdata)
+static int fc_rport_logoff(struct fc_rport_priv *rdata)
 {
 	mutex_lock(&rdata->rp_mutex);
 
@@ -653,8 +653,8 @@
  * @fp:	    The FLOGI response frame
  * @rp_arg: The remote port that received the FLOGI response
  */
-void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
-			 void *rp_arg)
+static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+				void *rp_arg)
 {
 	struct fc_rport_priv *rdata = rp_arg;
 	struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@
  *
  * Locking Note: Called with the lport lock held.
  */
-void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
 {
 	struct fc_seq_els_data els_data;
 
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 5c17764..15eefa1 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -306,19 +306,22 @@
 	adapter->host->sg_tablesize = adapter->sglen;
 
 
-	/* use HP firmware and bios version encoding */
+	/* use HP firmware and bios version encoding
+	   Note: fw_version[0|1] and bios_version[0|1] were originally shifted
+	   right 8 bits making them zero. This 0 value was hardcoded to fix
+	   sparse warnings. */
 	if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
 		sprintf (adapter->fw_version, "%c%d%d.%d%d",
 			 adapter->product_info.fw_version[2],
-			 adapter->product_info.fw_version[1] >> 8,
+			 0,
 			 adapter->product_info.fw_version[1] & 0x0f,
-			 adapter->product_info.fw_version[0] >> 8,
+			 0,
 			 adapter->product_info.fw_version[0] & 0x0f);
 		sprintf (adapter->bios_version, "%c%d%d.%d%d",
 			 adapter->product_info.bios_version[2],
-			 adapter->product_info.bios_version[1] >> 8,
+			 0,
 			 adapter->product_info.bios_version[1] & 0x0f,
-			 adapter->product_info.bios_version[0] >> 8,
+			 0,
 			 adapter->product_info.bios_version[0] & 0x0f);
 	} else {
 		memcpy(adapter->fw_version,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index dd94c7d..e5f416f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"00.00.06.12-rc1"
-#define MEGASAS_RELDATE				"Oct. 5, 2011"
-#define MEGASAS_EXT_VERSION			"Wed. Oct. 5 17:00:00 PDT 2011"
+#define MEGASAS_VERSION				"00.00.06.14-rc1"
+#define MEGASAS_RELDATE				"Jan. 6, 2012"
+#define MEGASAS_EXT_VERSION			"Fri. Jan. 6 17:00:00 PDT 2012"
 
 /*
  * Device IDs
@@ -773,7 +773,6 @@
 
 #define MFI_OB_INTR_STATUS_MASK			0x00000002
 #define MFI_POLL_TIMEOUT_SECS			60
-#define MEGASAS_COMPLETION_TIMER_INTERVAL      (HZ/10)
 
 #define MFI_REPLY_1078_MESSAGE_INTERRUPT	0x80000000
 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT	0x00000001
@@ -1353,7 +1352,6 @@
 	u32 mfiStatus;
 	u32 last_seq_num;
 
-	struct timer_list io_completion_timer;
 	struct list_head internal_reset_pending_q;
 
 	/* Ptr to hba specific information */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 29a994f..8b300be 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : v00.00.06.12-rc1
+ *  Version : v00.00.06.14-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
@@ -59,14 +59,6 @@
 #include "megaraid_sas.h"
 
 /*
- * poll_mode_io:1- schedule complete completion from q cmd
- */
-static unsigned int poll_mode_io;
-module_param_named(poll_mode_io, poll_mode_io, int, 0);
-MODULE_PARM_DESC(poll_mode_io,
-	"Complete cmds from IO path, (default=0)");
-
-/*
  * Number of sectors per IO command
  * Will be set in megasas_init_mfi if user does not provide
  */
@@ -1439,11 +1431,6 @@
 
 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
 				cmd->frame_count-1, instance->reg_set);
-	/*
-	 * Check if we have pend cmds to be completed
-	 */
-	if (poll_mode_io && atomic_read(&instance->fw_outstanding))
-		tasklet_schedule(&instance->isr_tasklet);
 
 	return 0;
 out_return_cmd:
@@ -3370,47 +3357,6 @@
 	return -EINVAL;
 }
 
-/**
- * megasas_start_timer - Initializes a timer object
- * @instance:		Adapter soft state
- * @timer:		timer object to be initialized
- * @fn:			timer function
- * @interval:		time interval between timer function call
- */
-static inline void
-megasas_start_timer(struct megasas_instance *instance,
-			struct timer_list *timer,
-			void *fn, unsigned long interval)
-{
-	init_timer(timer);
-	timer->expires = jiffies + interval;
-	timer->data = (unsigned long)instance;
-	timer->function = fn;
-	add_timer(timer);
-}
-
-/**
- * megasas_io_completion_timer - Timer fn
- * @instance_addr:	Address of adapter soft state
- *
- * Schedules tasklet for cmd completion
- * if poll_mode_io is set
- */
-static void
-megasas_io_completion_timer(unsigned long instance_addr)
-{
-	struct megasas_instance *instance =
-			(struct megasas_instance *)instance_addr;
-
-	if (atomic_read(&instance->fw_outstanding))
-		tasklet_schedule(&instance->isr_tasklet);
-
-	/* Restart timer */
-	if (poll_mode_io)
-		mod_timer(&instance->io_completion_timer,
-			jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
-}
-
 static u32
 megasas_init_adapter_mfi(struct megasas_instance *instance)
 {
@@ -3638,11 +3584,6 @@
 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
 		(unsigned long)instance);
 
-	/* Initialize the cmd completion timer */
-	if (poll_mode_io)
-		megasas_start_timer(instance, &instance->io_completion_timer,
-				megasas_io_completion_timer,
-				MEGASAS_COMPLETION_TIMER_INTERVAL);
 	return 0;
 
 fail_init_adapter:
@@ -4369,9 +4310,6 @@
 	host = instance->host;
 	instance->unload = 1;
 
-	if (poll_mode_io)
-		del_timer_sync(&instance->io_completion_timer);
-
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
 
@@ -4511,12 +4449,6 @@
 	}
 
 	instance->instancet->enable_intr(instance->reg_set);
-
-	/* Initialize the cmd completion timer */
-	if (poll_mode_io)
-		megasas_start_timer(instance, &instance->io_completion_timer,
-				megasas_io_completion_timer,
-				MEGASAS_COMPLETION_TIMER_INTERVAL);
 	instance->unload = 0;
 
 	/*
@@ -4570,9 +4502,6 @@
 	host = instance->host;
 	fusion = instance->ctrl_context;
 
-	if (poll_mode_io)
-		del_timer_sync(&instance->io_completion_timer);
-
 	scsi_remove_host(instance->host);
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@
 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
 	cmd->frame->hdr.context = cmd->index;
 	cmd->frame->hdr.pad_0 = 0;
+	cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+				   MFI_FRAME_SENSE64);
 
 	/*
 	 * The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@
 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
 		megasas_sysfs_set_dbg_lvl);
 
-static ssize_t
-megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
-{
-	return sprintf(buf, "%u\n", poll_mode_io);
-}
-
-static ssize_t
-megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
-				const char *buf, size_t count)
-{
-	int retval = count;
-	int tmp = poll_mode_io;
-	int i;
-	struct megasas_instance *instance;
-
-	if (sscanf(buf, "%u", &poll_mode_io) < 1) {
-		printk(KERN_ERR "megasas: could not set poll_mode_io\n");
-		retval = -EINVAL;
-	}
-
-	/*
-	 * Check if poll_mode_io is already set or is same as previous value
-	 */
-	if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
-		goto out;
-
-	if (poll_mode_io) {
-		/*
-		 * Start timers for all adapters
-		 */
-		for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-			instance = megasas_mgmt_info.instance[i];
-			if (instance) {
-				megasas_start_timer(instance,
-					&instance->io_completion_timer,
-					megasas_io_completion_timer,
-					MEGASAS_COMPLETION_TIMER_INTERVAL);
-			}
-		}
-	} else {
-		/*
-		 * Delete timers for all adapters
-		 */
-		for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-			instance = megasas_mgmt_info.instance[i];
-			if (instance)
-				del_timer_sync(&instance->io_completion_timer);
-		}
-	}
-
-out:
-	return retval;
-}
-
 static void
 megasas_aen_polling(struct work_struct *work)
 {
@@ -5502,11 +5379,6 @@
 	kfree(ev);
 }
 
-
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
-		megasas_sysfs_show_poll_mode_io,
-		megasas_sysfs_set_poll_mode_io);
-
 /**
  * megasas_init - Driver load entry point
  */
@@ -5566,11 +5438,6 @@
 	if (rval)
 		goto err_dcf_dbg_lvl;
 	rval = driver_create_file(&megasas_pci_driver.driver,
-				  &driver_attr_poll_mode_io);
-	if (rval)
-		goto err_dcf_poll_mode_io;
-
-	rval = driver_create_file(&megasas_pci_driver.driver,
 				&driver_attr_support_device_change);
 	if (rval)
 		goto err_dcf_support_device_change;
@@ -5579,10 +5446,6 @@
 
 err_dcf_support_device_change:
 	driver_remove_file(&megasas_pci_driver.driver,
-		  &driver_attr_poll_mode_io);
-
-err_dcf_poll_mode_io:
-	driver_remove_file(&megasas_pci_driver.driver,
 			   &driver_attr_dbg_lvl);
 err_dcf_dbg_lvl:
 	driver_remove_file(&megasas_pci_driver.driver,
@@ -5607,8 +5470,6 @@
 static void __exit megasas_exit(void)
 {
 	driver_remove_file(&megasas_pci_driver.driver,
-			   &driver_attr_poll_mode_io);
-	driver_remove_file(&megasas_pci_driver.driver,
 			   &driver_attr_dbg_lvl);
 	driver_remove_file(&megasas_pci_driver.driver,
 			&driver_attr_support_poll_for_event);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5255dd6..294abb0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -282,7 +282,9 @@
 	else {
 		*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
 		if ((raid->level >= 5) &&
-		    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER))
+		    ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
+		     (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
+		      raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
 		else if (raid->level == 1) {
 			/* Get alternate Pd. */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 22a3ff0..bfe6854 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,6 +150,8 @@
 #define QL4_SESS_RECOVERY_TMO		120	/* iSCSI session */
 						/* recovery timeout */
 
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+#define LSW(x) ((uint16_t)(x))
 #define LSDW(x) ((u32)((u64)(x)))
 #define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
 
@@ -671,6 +673,7 @@
 	uint16_t pri_ddb_idx;
 	uint16_t sec_ddb_idx;
 	int is_reset;
+	uint16_t temperature;
 };
 
 struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1bdfa81..90614f3 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -697,6 +697,9 @@
 			writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
 			       &ha->reg->ctrl_status);
 			readl(&ha->reg->ctrl_status);
+			writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
+			       &ha->reg->ctrl_status);
+			readl(&ha->reg->ctrl_status);
 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
 			if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
 				DEBUG2(printk("scsi%ld: %s: Get firmware "
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c259378..e1e66a4 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -219,6 +219,13 @@
 		ha->mailbox_timeout_count++;
 		mbx_sts[0] = (-1);
 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
+		if (is_qla8022(ha)) {
+			ql4_printk(KERN_INFO, ha,
+				   "disabling pause transmit on port 0 & 1.\n");
+			qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+					CRB_NIU_XG_PAUSE_CTL_P0 |
+					CRB_NIU_XG_PAUSE_CTL_P1);
+		}
 		goto mbox_exit;
 	}
 
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 8d6bc1b..78f1111 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1875,6 +1875,11 @@
 int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
 {
 	int retval;
+
+	/* clear the interrupt */
+	writel(0, &ha->qla4_8xxx_reg->host_int);
+	readl(&ha->qla4_8xxx_reg->host_int);
+
 	retval = qla4_8xxx_device_state_handler(ha);
 
 	if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 35376a1..dc45ac9 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -19,12 +19,28 @@
 #define PHAN_PEG_RCV_INITIALIZED	0xff01
 
 /*CRB_RELATED*/
-#define QLA82XX_CRB_BASE	QLA82XX_CAM_RAM(0x200)
-#define QLA82XX_REG(X)		(QLA82XX_CRB_BASE+(X))
-
+#define QLA82XX_CRB_BASE		(QLA82XX_CAM_RAM(0x200))
+#define QLA82XX_REG(X)			(QLA82XX_CRB_BASE+(X))
 #define CRB_CMDPEG_STATE		QLA82XX_REG(0x50)
 #define CRB_RCVPEG_STATE		QLA82XX_REG(0x13c)
 #define CRB_DMA_SHIFT			QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE			QLA82XX_REG(0x1b4)
+
+#define qla82xx_get_temp_val(x)		((x) >> 16)
+#define qla82xx_get_temp_state(x)	((x) & 0xffff)
+#define qla82xx_encode_temp(val, state)	(((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+	QLA82XX_TEMP_NORMAL = 0x1,	/* Normal operating range */
+	QLA82XX_TEMP_WARN,	/* Sound alert, temperature getting high */
+	QLA82XX_TEMP_PANIC	/* Fatal error, hardware has shut down. */
+};
+
+#define CRB_NIU_XG_PAUSE_CTL_P0		0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1		0x8
 
 #define QLA82XX_HW_H0_CH_HUB_ADR	0x05
 #define QLA82XX_HW_H1_CH_HUB_ADR	0x0E
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ec393a0..ce6d3b7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -35,43 +35,44 @@
 int ql4xdisablesysfsboot = 1;
 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdisablesysfsboot,
-		"Set to disable exporting boot targets to sysfs\n"
-		" 0 - Export boot targets\n"
-		" 1 - Do not export boot targets (Default)");
+		 " Set to disable exporting boot targets to sysfs.\n"
+		 "\t\t  0 - Export boot targets\n"
+		 "\t\t  1 - Do not export boot targets (Default)");
 
 int ql4xdontresethba = 0;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
-		"Don't reset the HBA for driver recovery \n"
-		" 0 - It will reset HBA (Default)\n"
-		" 1 - It will NOT reset HBA");
+		 " Don't reset the HBA for driver recovery.\n"
+		 "\t\t  0 - It will reset HBA (Default)\n"
+		 "\t\t  1 - It will NOT reset HBA");
 
-int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
+int ql4xextended_error_logging;
 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xextended_error_logging,
-		 "Option to enable extended error logging, "
-		 "Default is 0 - no logging, 1 - debug logging");
+		 " Option to enable extended error logging.\n"
+		 "\t\t  0 - no logging (Default)\n"
+		 "\t\t  2 - debug logging");
 
 int ql4xenablemsix = 1;
 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql4xenablemsix,
-		"Set to enable MSI or MSI-X interrupt mechanism.\n"
-		" 0 = enable INTx interrupt mechanism.\n"
-		" 1 = enable MSI-X interrupt mechanism (Default).\n"
-		" 2 = enable MSI interrupt mechanism.");
+		 " Set to enable MSI or MSI-X interrupt mechanism.\n"
+		 "\t\t  0 = enable INTx interrupt mechanism.\n"
+		 "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
+		 "\t\t  2 = enable MSI interrupt mechanism.");
 
 #define QL4_DEF_QDEPTH 32
 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xmaxqdepth,
-		"Maximum queue depth to report for target devices.\n"
-		" Default: 32.");
+		 " Maximum queue depth to report for target devices.\n"
+		 "\t\t  Default: 32.");
 
 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
 		"Target Session Recovery Timeout.\n"
-		" Default: 120 sec.");
+		"\t\t  Default: 120 sec.");
 
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
@@ -1630,7 +1631,9 @@
 
 	/* Update timers after login */
 	ddb_entry->default_relogin_timeout =
-				le16_to_cpu(fw_ddb_entry->def_timeout);
+		(le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
+		 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
+		 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
 	ddb_entry->default_time2wait =
 				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
 
@@ -1970,6 +1973,42 @@
 }
 
 /**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
+{
+	uint32_t temp, temp_state, temp_val;
+	int status = QLA_SUCCESS;
+
+	temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
+
+	temp_state = qla82xx_get_temp_state(temp);
+	temp_val = qla82xx_get_temp_val(temp);
+
+	if (temp_state == QLA82XX_TEMP_PANIC) {
+		ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
+			   " exceeds maximum allowed. Hardware has been shut"
+			   " down.\n", temp_val);
+		status = QLA_ERROR;
+	} else if (temp_state == QLA82XX_TEMP_WARN) {
+		if (ha->temperature == QLA82XX_TEMP_NORMAL)
+			ql4_printk(KERN_WARNING, ha, "Device temperature %d"
+				   " degrees C exceeds operating range."
+				   " Immediate action needed.\n", temp_val);
+	} else {
+		if (ha->temperature == QLA82XX_TEMP_WARN)
+			ql4_printk(KERN_INFO, ha, "Device temperature is"
+				   " now %d degrees C in normal range.\n",
+				   temp_val);
+	}
+	ha->temperature = temp_state;
+	return status;
+}
+
+/**
  * qla4_8xxx_check_fw_alive  - Check firmware health
  * @ha: Pointer to host adapter structure.
  *
@@ -2040,7 +2079,16 @@
 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
 		dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-		if (dev_state == QLA82XX_DEV_NEED_RESET &&
+
+		if (qla4_8xxx_check_temp(ha)) {
+			ql4_printk(KERN_INFO, ha, "disabling pause"
+				   " transmit on port 0 & 1.\n");
+			qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+					CRB_NIU_XG_PAUSE_CTL_P0 |
+					CRB_NIU_XG_PAUSE_CTL_P1);
+			set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+			qla4xxx_wake_dpc(ha);
+		} else if (dev_state == QLA82XX_DEV_NEED_RESET &&
 		    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
 			if (!ql4xdontresethba) {
 				ql4_printk(KERN_INFO, ha, "%s: HW State: "
@@ -2057,9 +2105,21 @@
 		} else  {
 			/* Check firmware health */
 			if (qla4_8xxx_check_fw_alive(ha)) {
+				ql4_printk(KERN_INFO, ha, "disabling pause"
+					   " transmit on port 0 & 1.\n");
+				qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+						CRB_NIU_XG_PAUSE_CTL_P0 |
+						CRB_NIU_XG_PAUSE_CTL_P1);
 				halt_status = qla4_8xxx_rd_32(ha,
 						QLA82XX_PEG_HALT_STATUS1);
 
+				if (LSW(MSB(halt_status)) == 0x67)
+					ql4_printk(KERN_ERR, ha, "%s:"
+						   " Firmware aborted with"
+						   " error code 0x00006700."
+						   " Device is being reset\n",
+						   __func__);
+
 				/* Since we cannot change dev_state in interrupt
 				 * context, set appropriate DPC flag then wakeup
 				 * DPC */
@@ -2078,7 +2138,7 @@
 	}
 }
 
-void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
 {
 	struct iscsi_session *sess;
 	struct ddb_entry *ddb_entry;
@@ -3826,16 +3886,14 @@
 	return ret;
 }
 
-static void qla4xxx_free_nt_list(struct list_head *list_nt)
+static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
 {
-	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
 
-	/* Free up the normaltargets list */
-	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
-		list_del_init(&nt_ddb_idx->list);
-		vfree(nt_ddb_idx);
+	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+		list_del_init(&ddb_idx->list);
+		vfree(ddb_idx);
 	}
-
 }
 
 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
@@ -3884,6 +3942,8 @@
 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
 					  struct ddb_entry *ddb_entry)
 {
+	uint16_t def_timeout;
+
 	ddb_entry->ddb_type = FLASH_DDB;
 	ddb_entry->fw_ddb_index = INVALID_ENTRY;
 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
@@ -3894,9 +3954,10 @@
 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
 	atomic_set(&ddb_entry->relogin_timer, 0);
 	atomic_set(&ddb_entry->relogin_retry_count, 0);
-
+	def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
 	ddb_entry->default_relogin_timeout =
-		le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+		(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+		def_timeout : LOGIN_TOV;
 	ddb_entry->default_time2wait =
 		le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
 }
@@ -3934,7 +3995,6 @@
 			    ip_state == IP_ADDRSTATE_DEPRICATED ||
 			    ip_state == IP_ADDRSTATE_DISABLING)
 				ip_idx[idx] = -1;
-
 		}
 
 		/* Break if all IP states checked */
@@ -3947,58 +4007,37 @@
 	} while (time_after(wtime, jiffies));
 }
 
-void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
+				  struct list_head *list_st)
 {
+	struct qla_ddb_index  *st_ddb_idx;
 	int max_ddbs;
+	int fw_idx_size;
+	struct dev_db_entry *fw_ddb_entry;
+	dma_addr_t fw_ddb_dma;
 	int ret;
 	uint32_t idx = 0, next_idx = 0;
 	uint32_t state = 0, conn_err = 0;
-	uint16_t conn_id;
-	struct dev_db_entry *fw_ddb_entry;
-	struct ddb_entry *ddb_entry = NULL;
-	dma_addr_t fw_ddb_dma;
-	struct iscsi_cls_session *cls_sess;
-	struct iscsi_session *sess;
-	struct iscsi_cls_conn *cls_conn;
-	struct iscsi_endpoint *ep;
-	uint16_t cmds_max = 32, tmo = 0;
-	uint32_t initial_cmdsn = 0;
-	struct list_head list_st, list_nt; /* List of sendtargets */
-	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
-	int fw_idx_size;
-	unsigned long wtime;
-	struct qla_ddb_index  *nt_ddb_idx;
-
-	if (!test_bit(AF_LINK_UP, &ha->flags)) {
-		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
-		ha->is_reset = is_reset;
-		return;
-	}
-	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
-				     MAX_DEV_DB_ENTRIES;
+	uint16_t conn_id = 0;
 
 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
 				      &fw_ddb_dma);
 	if (fw_ddb_entry == NULL) {
 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
-		goto exit_ddb_list;
+		goto exit_st_list;
 	}
 
-	INIT_LIST_HEAD(&list_st);
-	INIT_LIST_HEAD(&list_nt);
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
 	fw_idx_size = sizeof(struct qla_ddb_index);
 
 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
-		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
-					      fw_ddb_dma, NULL,
-					      &next_idx, &state, &conn_err,
-					      NULL, &conn_id);
+		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+					      NULL, &next_idx, &state,
+					      &conn_err, NULL, &conn_id);
 		if (ret == QLA_ERROR)
 			break;
 
-		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
-			goto continue_next_st;
-
 		/* Check if ST, add to the list_st */
 		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
 			goto continue_next_st;
@@ -4009,12 +4048,235 @@
 
 		st_ddb_idx->fw_ddb_idx = idx;
 
-		list_add_tail(&st_ddb_idx->list, &list_st);
+		list_add_tail(&st_ddb_idx->list, list_st);
 continue_next_st:
 		if (next_idx == 0)
 			break;
 	}
 
+exit_st_list:
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
+ * @ha: pointer to adapter structure
+ * @list_ddb: List from which failed ddb to be removed
+ *
+ * Iterate over the list of DDBs and find and remove DDBs that are either in
+ * no connection active state or failed state
+ **/
+static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
+				      struct list_head *list_ddb)
+{
+	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
+	uint32_t next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	int ret;
+
+	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+		ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
+					      NULL, 0, NULL, &next_idx, &state,
+					      &conn_err, NULL, NULL);
+		if (ret == QLA_ERROR)
+			continue;
+
+		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+		    state == DDB_DS_SESSION_FAILED) {
+			list_del_init(&ddb_idx->list);
+			vfree(ddb_idx);
+		}
+	}
+}
+
+static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
+				   struct dev_db_entry *fw_ddb_entry,
+				   int is_reset)
+{
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_session *sess;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_endpoint *ep;
+	uint16_t cmds_max = 32;
+	uint16_t conn_id = 0;
+	uint32_t initial_cmdsn = 0;
+	int ret = QLA_SUCCESS;
+
+	struct ddb_entry *ddb_entry = NULL;
+
+	/* Create session object, with INVALID_ENTRY,
+	 * the targer_id would get set when we issue the login
+	 */
+	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
+				       cmds_max, sizeof(struct ddb_entry),
+				       sizeof(struct ql4_task_data),
+				       initial_cmdsn, INVALID_ENTRY);
+	if (!cls_sess) {
+		ret = QLA_ERROR;
+		goto exit_setup;
+	}
+
+	/*
+	 * so calling module_put function to decrement the
+	 * reference count.
+	 **/
+	module_put(qla4xxx_iscsi_transport.owner);
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ddb_entry->sess = cls_sess;
+
+	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+	memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+	       sizeof(struct dev_db_entry));
+
+	qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
+
+	if (!cls_conn) {
+		ret = QLA_ERROR;
+		goto exit_setup;
+	}
+
+	ddb_entry->conn = cls_conn;
+
+	/* Setup ep, for displaying attributes in sysfs */
+	ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+	if (ep) {
+		ep->conn = cls_conn;
+		cls_conn->ep = ep;
+	} else {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
+		ret = QLA_ERROR;
+		goto exit_setup;
+	}
+
+	/* Update sess/conn params */
+	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+	if (is_reset == RESET_ADAPTER) {
+		iscsi_block_session(cls_sess);
+		/* Use the relogin path to discover new devices
+		 *  by short-circuting the logic of setting
+		 *  timer to relogin - instead set the flags
+		 *  to initiate login right away.
+		 */
+		set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+		set_bit(DF_RELOGIN, &ddb_entry->flags);
+	}
+
+exit_setup:
+	return ret;
+}
+
+static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
+				  struct list_head *list_nt, int is_reset)
+{
+	struct dev_db_entry *fw_ddb_entry;
+	dma_addr_t fw_ddb_dma;
+	int max_ddbs;
+	int fw_idx_size;
+	int ret;
+	uint32_t idx = 0, next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint16_t conn_id = 0;
+	struct qla_ddb_index  *nt_ddb_idx;
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+		goto exit_nt_list;
+	}
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+	fw_idx_size = sizeof(struct qla_ddb_index);
+
+	for (idx = 0; idx < max_ddbs; idx = next_idx) {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+					      NULL, &next_idx, &state,
+					      &conn_err, NULL, &conn_id);
+		if (ret == QLA_ERROR)
+			break;
+
+		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+			goto continue_next_nt;
+
+		/* Check if NT, then add to list it */
+		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+			goto continue_next_nt;
+
+		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
+		    state == DDB_DS_SESSION_FAILED))
+			goto continue_next_nt;
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Adding  DDB to session = 0x%x\n", idx));
+		if (is_reset == INIT_ADAPTER) {
+			nt_ddb_idx = vmalloc(fw_idx_size);
+			if (!nt_ddb_idx)
+				break;
+
+			nt_ddb_idx->fw_ddb_idx = idx;
+
+			memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+			       sizeof(struct dev_db_entry));
+
+			if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
+					fw_ddb_entry) == QLA_SUCCESS) {
+				vfree(nt_ddb_idx);
+				goto continue_next_nt;
+			}
+			list_add_tail(&nt_ddb_idx->list, list_nt);
+		} else if (is_reset == RESET_ADAPTER) {
+			if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
+								QLA_SUCCESS)
+				goto continue_next_nt;
+		}
+
+		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+		if (ret == QLA_ERROR)
+			goto exit_nt_list;
+
+continue_next_nt:
+		if (next_idx == 0)
+			break;
+	}
+
+exit_nt_list:
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_build_ddb_list - Build ddb list and setup sessions
+ * @ha: pointer to adapter structure
+ * @is_reset: Is this init path or reset path
+ *
+ * Create a list of sendtargets (st) from firmware DDBs, issue send targets
+ * using connection open, then create the list of normal targets (nt)
+ * from firmware DDBs. Based on the list of nt setup session and connection
+ * objects.
+ **/
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+	uint16_t tmo = 0;
+	struct list_head list_st, list_nt;
+	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
+	unsigned long wtime;
+
+	if (!test_bit(AF_LINK_UP, &ha->flags)) {
+		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+		ha->is_reset = is_reset;
+		return;
+	}
+
+	INIT_LIST_HEAD(&list_st);
+	INIT_LIST_HEAD(&list_nt);
+
+	qla4xxx_build_st_list(ha, &list_st);
+
 	/* Before issuing conn open mbox, ensure all IPs states are configured
 	 * Note, conn open fails if IPs are not configured
 	 */
@@ -4026,153 +4288,32 @@
 	}
 
 	/* Wait to ensure all sendtargets are done for min 12 sec wait */
-	tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+	tmo = ((ha->def_timeout > LOGIN_TOV) &&
+	       (ha->def_timeout < LOGIN_TOV * 10) ?
+	       ha->def_timeout : LOGIN_TOV);
+
 	DEBUG2(ql4_printk(KERN_INFO, ha,
 			  "Default time to wait for build ddb %d\n", tmo));
 
 	wtime = jiffies + (HZ * tmo);
 	do {
-		list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
-					 list) {
-			ret = qla4xxx_get_fwddb_entry(ha,
-						      st_ddb_idx->fw_ddb_idx,
-						      NULL, 0, NULL, &next_idx,
-						      &state, &conn_err, NULL,
-						      NULL);
-			if (ret == QLA_ERROR)
-				continue;
+		if (list_empty(&list_st))
+			break;
 
-			if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
-			    state == DDB_DS_SESSION_FAILED) {
-				list_del_init(&st_ddb_idx->list);
-				vfree(st_ddb_idx);
-			}
-		}
+		qla4xxx_remove_failed_ddb(ha, &list_st);
 		schedule_timeout_uninterruptible(HZ / 10);
 	} while (time_after(wtime, jiffies));
 
 	/* Free up the sendtargets list */
-	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
-		list_del_init(&st_ddb_idx->list);
-		vfree(st_ddb_idx);
-	}
+	qla4xxx_free_ddb_list(&list_st);
 
-	for (idx = 0; idx < max_ddbs; idx = next_idx) {
-		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
-					      fw_ddb_dma, NULL,
-					      &next_idx, &state, &conn_err,
-					      NULL, &conn_id);
-		if (ret == QLA_ERROR)
-			break;
+	qla4xxx_build_nt_list(ha, &list_nt, is_reset);
 
-		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
-			goto continue_next_nt;
-
-		/* Check if NT, then add to list it */
-		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
-			goto continue_next_nt;
-
-		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
-		    state == DDB_DS_SESSION_FAILED) {
-			DEBUG2(ql4_printk(KERN_INFO, ha,
-					  "Adding  DDB to session = 0x%x\n",
-					  idx));
-			if (is_reset == INIT_ADAPTER) {
-				nt_ddb_idx = vmalloc(fw_idx_size);
-				if (!nt_ddb_idx)
-					break;
-
-				nt_ddb_idx->fw_ddb_idx = idx;
-
-				memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
-				       sizeof(struct dev_db_entry));
-
-				if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
-						fw_ddb_entry) == QLA_SUCCESS) {
-					vfree(nt_ddb_idx);
-					goto continue_next_nt;
-				}
-				list_add_tail(&nt_ddb_idx->list, &list_nt);
-			} else if (is_reset == RESET_ADAPTER) {
-				if (qla4xxx_is_session_exists(ha,
-						   fw_ddb_entry) == QLA_SUCCESS)
-					goto continue_next_nt;
-			}
-
-			/* Create session object, with INVALID_ENTRY,
-			 * the targer_id would get set when we issue the login
-			 */
-			cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
-						ha->host, cmds_max,
-						sizeof(struct ddb_entry),
-						sizeof(struct ql4_task_data),
-						initial_cmdsn, INVALID_ENTRY);
-			if (!cls_sess)
-				goto exit_ddb_list;
-
-			/*
-			 * iscsi_session_setup increments the driver reference
-			 * count which wouldn't let the driver to be unloaded.
-			 * so calling module_put function to decrement the
-			 * reference count.
-			 **/
-			module_put(qla4xxx_iscsi_transport.owner);
-			sess = cls_sess->dd_data;
-			ddb_entry = sess->dd_data;
-			ddb_entry->sess = cls_sess;
-
-			cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
-			memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
-			       sizeof(struct dev_db_entry));
-
-			qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
-
-			cls_conn = iscsi_conn_setup(cls_sess,
-						    sizeof(struct qla_conn),
-						    conn_id);
-			if (!cls_conn)
-				goto exit_ddb_list;
-
-			ddb_entry->conn = cls_conn;
-
-			/* Setup ep, for displaying attributes in sysfs */
-			ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
-			if (ep) {
-				ep->conn = cls_conn;
-				cls_conn->ep = ep;
-			} else {
-				DEBUG2(ql4_printk(KERN_ERR, ha,
-						  "Unable to get ep\n"));
-			}
-
-			/* Update sess/conn params */
-			qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
-						 cls_conn);
-
-			if (is_reset == RESET_ADAPTER) {
-				iscsi_block_session(cls_sess);
-				/* Use the relogin path to discover new devices
-				 *  by short-circuting the logic of setting
-				 *  timer to relogin - instead set the flags
-				 *  to initiate login right away.
-				 */
-				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
-				set_bit(DF_RELOGIN, &ddb_entry->flags);
-			}
-		}
-continue_next_nt:
-		if (next_idx == 0)
-			break;
-	}
-exit_ddb_list:
-	qla4xxx_free_nt_list(&list_nt);
-	if (fw_ddb_entry)
-		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+	qla4xxx_free_ddb_list(&list_nt);
 
 	qla4xxx_free_ddb_index(ha);
 }
 
-
 /**
  * qla4xxx_probe_adapter - callback function to probe HBA
  * @pdev: pointer to pci_dev structure
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 26a3fa3..133989b 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.02.00-k10"
+#define QLA4XXX_DRIVER_VERSION	"5.02.00-k12"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f85cfa6..b2c95db 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1316,15 +1316,10 @@
 	}
 
 	if (scsi_target_is_busy(starget)) {
-		if (list_empty(&sdev->starved_entry))
-			list_add_tail(&sdev->starved_entry,
-				      &shost->starved_list);
+		list_move_tail(&sdev->starved_entry, &shost->starved_list);
 		return 0;
 	}
 
-	/* We're OK to process the command, so we can't be starved */
-	if (!list_empty(&sdev->starved_entry))
-		list_del_init(&sdev->starved_entry);
 	return 1;
 }
 
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1b21491..f59d4a0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3048,7 +3048,8 @@
 
 		spin_lock_irqsave(shost->host_lock, flags);
 		rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
-				  FC_RPORT_DEVLOSS_PENDING);
+				  FC_RPORT_DEVLOSS_PENDING |
+				  FC_RPORT_DEVLOSS_CALLBK_DONE);
 		spin_unlock_irqrestore(shost->host_lock, flags);
 
 		/* ensure any stgt delete functions are done */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 02d9998..eacd46b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2368,16 +2368,15 @@
 sg_proc_write_adio(struct file *filp, const char __user *buffer,
 		   size_t count, loff_t *off)
 {
-	int num;
-	char buff[11];
+	int err;
+	unsigned long num;
 
 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
 		return -EACCES;
-	num = (count < 10) ? count : 10;
-	if (copy_from_user(buff, buffer, num))
-		return -EFAULT;
-	buff[num] = '\0';
-	sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
+	err = kstrtoul_from_user(buffer, count, 0, &num);
+	if (err)
+		return err;
+	sg_allow_dio = num ? 1 : 0;
 	return count;
 }
 
@@ -2390,17 +2389,15 @@
 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
 		     size_t count, loff_t *off)
 {
-	int num;
+	int err;
 	unsigned long k = ULONG_MAX;
-	char buff[11];
 
 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
 		return -EACCES;
-	num = (count < 10) ? count : 10;
-	if (copy_from_user(buff, buffer, num))
-		return -EFAULT;
-	buff[num] = '\0';
-	k = simple_strtoul(buff, NULL, 10);
+
+	err = kstrtoul_from_user(buffer, count, 0, &k);
+	if (err)
+		return err;
 	if (k <= 1048576) {	/* limit "big buff" to 1 MB */
 		sg_big_buff = k;
 		return count;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index b4543f5..36d1ed7 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -839,6 +839,10 @@
 	struct sym_lcb *lp = sym_lp(tp, sdev->lun);
 	unsigned long flags;
 
+	/* if slave_alloc returned before allocating a sym_lcb, return */
+	if (!lp)
+		return;
+
 	spin_lock_irqsave(np->s.host->host_lock, flags);
 
 	if (lp->busy_itlq || lp->busy_itl) {
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 8599545..ac44af1 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -27,8 +27,7 @@
 #include <scsi/scsi_device.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_parameters.h"
@@ -284,8 +283,8 @@
 			sock_in6 = (struct sockaddr_in6 *)sockaddr;
 			sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
 
-			if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
-				    (void *)&sock_in6_e->sin6_addr.in6_u,
+			if (!memcmp(&sock_in6->sin6_addr.in6_u,
+				    &sock_in6_e->sin6_addr.in6_u,
 				    sizeof(struct in6_addr)))
 				ip_match = 1;
 
@@ -1225,7 +1224,7 @@
 
 	crypto_hash_init(hash);
 
-	sg_init_one(&sg, (u8 *)buf, payload_length);
+	sg_init_one(&sg, buf, payload_length);
 	crypto_hash_update(hash, &sg, payload_length);
 
 	if (padding) {
@@ -1603,7 +1602,7 @@
 		/*
 		 * Attach ping data to struct iscsi_cmd->buf_ptr.
 		 */
-		cmd->buf_ptr = (void *)ping_data;
+		cmd->buf_ptr = ping_data;
 		cmd->buf_ptr_size = payload_length;
 
 		pr_debug("Got %u bytes of NOPOUT ping"
@@ -3197,7 +3196,7 @@
 			end_of_buf = 1;
 			goto eob;
 		}
-		memcpy((void *)payload + payload_len, buf, len);
+		memcpy(payload + payload_len, buf, len);
 		payload_len += len;
 
 		spin_lock(&tiqn->tiqn_tpg_lock);
@@ -3229,7 +3228,7 @@
 					end_of_buf = 1;
 					goto eob;
 				}
-				memcpy((void *)payload + payload_len, buf, len);
+				memcpy(payload + payload_len, buf, len);
 				payload_len += len;
 			}
 			spin_unlock(&tpg->tpg_np_lock);
@@ -3486,7 +3485,7 @@
 	struct iscsi_conn *conn;
 	struct iscsi_queue_req *qr = NULL;
 	struct se_cmd *se_cmd;
-	struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+	struct iscsi_thread_set *ts = arg;
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
 	 * connection recovery / failure event can be triggered externally.
@@ -3775,7 +3774,7 @@
 	u8 buffer[ISCSI_HDR_LEN], opcode;
 	u32 checksum = 0, digest = 0;
 	struct iscsi_conn *conn = NULL;
-	struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+	struct iscsi_thread_set *ts = arg;
 	struct kvec iov;
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 1cd6ce3..db0cf7c 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -82,7 +82,7 @@
 	unsigned int *c_len)
 {
 	unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
-	struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+	struct iscsi_chap *chap = conn->auth_protocol;
 
 	memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
 
@@ -120,7 +120,7 @@
 	if (!conn->auth_protocol)
 		return NULL;
 
-	chap = (struct iscsi_chap *) conn->auth_protocol;
+	chap = conn->auth_protocol;
 	/*
 	 * We only support MD5 MDA presently.
 	 */
@@ -165,14 +165,15 @@
 	unsigned int *nr_out_len)
 {
 	char *endptr;
-	unsigned char id, digest[MD5_SIGNATURE_SIZE];
+	unsigned long id;
+	unsigned char digest[MD5_SIGNATURE_SIZE];
 	unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
 	unsigned char identifier[10], *challenge = NULL;
 	unsigned char *challenge_binhex = NULL;
 	unsigned char client_digest[MD5_SIGNATURE_SIZE];
 	unsigned char server_digest[MD5_SIGNATURE_SIZE];
 	unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
-	struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+	struct iscsi_chap *chap = conn->auth_protocol;
 	struct crypto_hash *tfm;
 	struct hash_desc desc;
 	struct scatterlist sg;
@@ -246,7 +247,7 @@
 		goto out;
 	}
 
-	sg_init_one(&sg, (void *)&chap->id, 1);
+	sg_init_one(&sg, &chap->id, 1);
 	ret = crypto_hash_update(&desc, &sg, 1);
 	if (ret < 0) {
 		pr_err("crypto_hash_update() failed for id\n");
@@ -254,7 +255,7 @@
 		goto out;
 	}
 
-	sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
+	sg_init_one(&sg, &auth->password, strlen(auth->password));
 	ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
 	if (ret < 0) {
 		pr_err("crypto_hash_update() failed for password\n");
@@ -262,7 +263,7 @@
 		goto out;
 	}
 
-	sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
+	sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
 	ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
 	if (ret < 0) {
 		pr_err("crypto_hash_update() failed for challenge\n");
@@ -305,14 +306,17 @@
 	}
 
 	if (type == HEX)
-		id = (unsigned char)simple_strtoul((char *)&identifier[2],
-					&endptr, 0);
+		id = simple_strtoul(&identifier[2], &endptr, 0);
 	else
-		id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
+		id = simple_strtoul(identifier, &endptr, 0);
+	if (id > 255) {
+		pr_err("chap identifier: %lu greater than 255\n", id);
+		goto out;
+	}
 	/*
 	 * RFC 1994 says Identifier is no more than octet (8 bits).
 	 */
-	pr_debug("[server] Got CHAP_I=%d\n", id);
+	pr_debug("[server] Got CHAP_I=%lu\n", id);
 	/*
 	 * Get CHAP_C.
 	 */
@@ -351,7 +355,7 @@
 		goto out;
 	}
 
-	sg_init_one(&sg, (void *)&id, 1);
+	sg_init_one(&sg, &id, 1);
 	ret = crypto_hash_update(&desc, &sg, 1);
 	if (ret < 0) {
 		pr_err("crypto_hash_update() failed for id\n");
@@ -359,7 +363,7 @@
 		goto out;
 	}
 
-	sg_init_one(&sg, (void *)auth->password_mutual,
+	sg_init_one(&sg, auth->password_mutual,
 				strlen(auth->password_mutual));
 	ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
 	if (ret < 0) {
@@ -371,7 +375,7 @@
 	/*
 	 * Convert received challenge to binary hex.
 	 */
-	sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
+	sg_init_one(&sg, challenge_binhex, challenge_len);
 	ret = crypto_hash_update(&desc, &sg, challenge_len);
 	if (ret < 0) {
 		pr_err("crypto_hash_update() failed for ma challenge\n");
@@ -414,7 +418,7 @@
 	char *nr_out_ptr,
 	unsigned int *nr_out_len)
 {
-	struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+	struct iscsi_chap *chap = conn->auth_protocol;
 
 	switch (chap->digest_type) {
 	case CHAP_DIGEST_MD5:
@@ -437,7 +441,7 @@
 	int *in_len,
 	int *out_len)
 {
-	struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+	struct iscsi_chap *chap = conn->auth_protocol;
 
 	if (!chap) {
 		chap = chap_server_open(conn, auth, in_text, out_text, out_len);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index db32784..3468caa 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -22,12 +22,8 @@
 #include <linux/configfs.h>
 #include <linux/export.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
@@ -56,8 +52,7 @@
 {
 	struct se_portal_group *se_tpg = container_of(to_config_group(item),
 					struct se_portal_group, tpg_group);
-	struct iscsi_portal_group *tpg =
-			(struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
 	int ret;
 
 	if (!tpg) {
@@ -1225,7 +1220,7 @@
 
 	ret = core_tpg_register(
 			&lio_target_fabric_configfs->tf_ops,
-			wwn, &tpg->tpg_se_tpg, (void *)tpg,
+			wwn, &tpg->tpg_se_tpg, tpg,
 			TRANSPORT_TPG_TYPE_NORMAL);
 	if (ret < 0)
 		return NULL;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index a19fa5e..f63ea35 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -21,8 +21,7 @@
 
 #include <scsi/scsi_device.h>
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b7ffc3c..4784511 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -21,7 +21,7 @@
 
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 101b1be..255c0d6 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -21,7 +21,7 @@
 #include <linux/list.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 0b8404c..1af1f21 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -21,7 +21,7 @@
 
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_datain_values.h"
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index d734bde..373b0cc 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -23,7 +23,7 @@
 #include <linux/crypto.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_tq.h"
@@ -143,7 +143,7 @@
 	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
 			sess_list) {
 
-		sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		sess_p = se_sess->fabric_sess_ptr;
 		spin_lock(&sess_p->conn_lock);
 		if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
 		    atomic_read(&sess_p->session_logout) ||
@@ -151,9 +151,9 @@
 			spin_unlock(&sess_p->conn_lock);
 			continue;
 		}
-		if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
-		   (!strcmp((void *)sess_p->sess_ops->InitiatorName,
-			    (void *)initiatorname_param->value) &&
+		if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
+		   (!strcmp(sess_p->sess_ops->InitiatorName,
+			    initiatorname_param->value) &&
 		   (sess_p->sess_ops->SessionType == sessiontype))) {
 			atomic_set(&sess_p->session_reinstatement, 1);
 			spin_unlock(&sess_p->conn_lock);
@@ -229,7 +229,7 @@
 
 	iscsi_login_set_conn_values(sess, conn, pdu->cid);
 	sess->init_task_tag	= pdu->itt;
-	memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
+	memcpy(&sess->isid, pdu->isid, 6);
 	sess->exp_cmd_sn	= pdu->cmdsn;
 	INIT_LIST_HEAD(&sess->sess_conn_list);
 	INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
@@ -440,8 +440,7 @@
 		    atomic_read(&sess_p->session_logout) ||
 		   (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
 			continue;
-		if (!memcmp((const void *)sess_p->isid,
-		     (const void *)pdu->isid, 6) &&
+		if (!memcmp(sess_p->isid, pdu->isid, 6) &&
 		     (sess_p->tsih == pdu->tsih)) {
 			iscsit_inc_session_usage_count(sess_p);
 			iscsit_stop_time2retain_timer(sess_p);
@@ -654,7 +653,7 @@
 
 	spin_lock_bh(&se_tpg->session_lock);
 	__transport_register_session(&sess->tpg->tpg_se_tpg,
-			se_sess->se_node_acl, se_sess, (void *)sess);
+			se_sess->se_node_acl, se_sess, sess);
 	pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
 	sess->session_state = TARG_SESS_STATE_LOGGED_IN;
 
@@ -811,7 +810,7 @@
 	 * Setup the np->np_sockaddr from the passed sockaddr setup
 	 * in iscsi_target_configfs.c code..
 	 */
-	memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
+	memcpy(&np->np_sockaddr, sockaddr,
 			sizeof(struct __kernel_sockaddr_storage));
 
 	if (sockaddr->ss_family == AF_INET6)
@@ -821,6 +820,7 @@
 	/*
 	 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
 	 */
+	/* FIXME: Someone please explain why this is endian-safe */
 	opt = 1;
 	if (np->np_network_transport == ISCSI_TCP) {
 		ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
@@ -832,6 +832,7 @@
 		}
 	}
 
+	/* FIXME: Someone please explain why this is endian-safe */
 	ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
 			(char *)&opt, sizeof(opt));
 	if (ret < 0) {
@@ -1206,7 +1207,7 @@
 
 int iscsi_target_login_thread(void *arg)
 {
-	struct iscsi_np *np = (struct iscsi_np *)arg;
+	struct iscsi_np *np = arg;
 	int ret;
 
 	allow_signal(SIGINT);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 98936cb..e89fa74 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -21,7 +21,7 @@
 #include <linux/ctype.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_parameters.h"
@@ -732,7 +732,7 @@
 	u32 iqn_size = strlen(param_buf), i;
 
 	for (i = 0; i < iqn_size; i++) {
-		c = (char *)&param_buf[i];
+		c = &param_buf[i];
 		if (!isupper(*c))
 			continue;
 
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index aeafbe0..b3c699c 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -19,7 +19,6 @@
  ******************************************************************************/
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_device.h"
@@ -135,7 +134,7 @@
 		spin_lock_bh(&se_nacl->nacl_sess_lock);
 		se_sess = se_nacl->nacl_sess;
 		if (se_sess) {
-			sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+			sess = se_sess->fabric_sess_ptr;
 
 			spin_lock(&sess->conn_lock);
 			list_for_each_entry(conn, &sess->sess_conn_list,
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index f1db830..421d694 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -23,7 +23,6 @@
 #include <linux/export.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
 #include <target/configfs_macros.h>
 
 #include "iscsi_target_core.h"
@@ -746,7 +745,7 @@
 	spin_lock_bh(&se_nacl->nacl_sess_lock);
 	se_sess = se_nacl->nacl_sess;
 	if (se_sess) {
-		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		sess = se_sess->fabric_sess_ptr;
 		if (sess)
 			ret = snprintf(page, PAGE_SIZE, "%u\n",
 				sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
@@ -770,7 +769,7 @@
 	spin_lock_bh(&se_nacl->nacl_sess_lock);
 	se_sess = se_nacl->nacl_sess;
 	if (se_sess) {
-		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		sess = se_sess->fabric_sess_ptr;
 		if (sess)
 			ret = snprintf(page, PAGE_SIZE, "%u\n",
 					sess->session_index);
@@ -794,7 +793,7 @@
 	spin_lock_bh(&se_nacl->nacl_sess_lock);
 	se_sess = se_nacl->nacl_sess;
 	if (se_sess) {
-		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		sess = se_sess->fabric_sess_ptr;
 		if (sess)
 			ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
 	}
@@ -817,7 +816,7 @@
 	spin_lock_bh(&se_nacl->nacl_sess_lock);
 	se_sess = se_nacl->nacl_sess;
 	if (se_sess) {
-		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		sess = se_sess->fabric_sess_ptr;
 		if (sess)
 			ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
 	}
@@ -840,7 +839,7 @@
 	spin_lock_bh(&se_nacl->nacl_sess_lock);
 	se_sess = se_nacl->nacl_sess;
 	if (se_sess) {
-		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		sess = se_sess->fabric_sess_ptr;
 		if (sess)
 			ret = snprintf(page, PAGE_SIZE, "%llu\n",
 				(unsigned long long)sess->tx_data_octets);
@@ -864,7 +863,7 @@
 	spin_lock_bh(&se_nacl->nacl_sess_lock);
 	se_sess = se_nacl->nacl_sess;
 	if (se_sess) {
-		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		sess = se_sess->fabric_sess_ptr;
 		if (sess)
 			ret = snprintf(page, PAGE_SIZE, "%llu\n",
 				(unsigned long long)sess->rx_data_octets);
@@ -888,7 +887,7 @@
 	spin_lock_bh(&se_nacl->nacl_sess_lock);
 	se_sess = se_nacl->nacl_sess;
 	if (se_sess) {
-		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		sess = se_sess->fabric_sess_ptr;
 		if (sess)
 			ret = snprintf(page, PAGE_SIZE, "%u\n",
 					sess->conn_digest_errors);
@@ -912,7 +911,7 @@
 	spin_lock_bh(&se_nacl->nacl_sess_lock);
 	se_sess = se_nacl->nacl_sess;
 	if (se_sess) {
-		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		sess = se_sess->fabric_sess_ptr;
 		if (sess)
 			ret = snprintf(page, PAGE_SIZE, "%u\n",
 					sess->conn_timeout_errors);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 490207e..255ed35 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -21,7 +21,7 @@
 #include <asm/unaligned.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index d4cf2cd..879d8d0 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -19,10 +19,8 @@
  ******************************************************************************/
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
-#include <target/target_core_tpg.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_erl0.h"
@@ -72,7 +70,7 @@
 
 	ret = core_tpg_register(
 			&lio_target_fabric_configfs->tf_ops,
-			NULL, &tpg->tpg_se_tpg, (void *)tpg,
+			NULL, &tpg->tpg_se_tpg, tpg,
 			TRANSPORT_TPG_TYPE_DISCOVERY);
 	if (ret < 0) {
 		kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 02348f7..a05ca1c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -22,9 +22,7 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
 #include "iscsi_target_core.h"
@@ -289,7 +287,7 @@
 	}
 
 	se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
-				(void *)cmd->tmr_req, tcm_function,
+				cmd->tmr_req, tcm_function,
 				GFP_KERNEL);
 	if (!se_cmd->se_tmr_req)
 		goto out;
@@ -1066,7 +1064,7 @@
 	if (tiqn) {
 		spin_lock_bh(&tiqn->sess_err_stats.lock);
 		strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
-				(void *)conn->sess->sess_ops->InitiatorName);
+				conn->sess->sess_ops->InitiatorName);
 		tiqn->sess_err_stats.last_sess_failure_type =
 				ISCSI_SESS_ERR_CXN_TIMEOUT;
 		tiqn->sess_err_stats.cxn_timeout_errors++;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 81d5832..c47ff7f 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -33,14 +33,9 @@
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
 #include <target/target_core_configfs.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_tmr.h>
 
 #include "tcm_loop.h"
 
@@ -421,11 +416,11 @@
 	.queuecommand		= tcm_loop_queuecommand,
 	.change_queue_depth	= tcm_loop_change_queue_depth,
 	.eh_device_reset_handler = tcm_loop_device_reset,
-	.can_queue		= TL_SCSI_CAN_QUEUE,
+	.can_queue		= 1024,
 	.this_id		= -1,
-	.sg_tablesize		= TL_SCSI_SG_TABLESIZE,
-	.cmd_per_lun		= TL_SCSI_CMD_PER_LUN,
-	.max_sectors		= TL_SCSI_MAX_SECTORS,
+	.sg_tablesize		= 256,
+	.cmd_per_lun		= 1024,
+	.max_sectors		= 0xFFFF,
 	.use_clustering		= DISABLE_CLUSTERING,
 	.slave_alloc		= tcm_loop_slave_alloc,
 	.slave_configure	= tcm_loop_slave_configure,
@@ -564,8 +559,7 @@
 
 static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 {
-	struct tcm_loop_tpg *tl_tpg =
-			(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 	/*
 	 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
@@ -592,8 +586,7 @@
 
 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 {
-	struct tcm_loop_tpg *tl_tpg =
-		(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 	/*
 	 * Return the passed NAA identifier for the SAS Target Port
 	 */
@@ -602,8 +595,7 @@
 
 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 {
-	struct tcm_loop_tpg *tl_tpg =
-		(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 	/*
 	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
 	 * to represent the SCSI Target Port.
@@ -623,8 +615,7 @@
 	int *format_code,
 	unsigned char *buf)
 {
-	struct tcm_loop_tpg *tl_tpg =
-			(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 
 	switch (tl_hba->tl_proto_id) {
@@ -653,8 +644,7 @@
 	struct t10_pr_registration *pr_reg,
 	int *format_code)
 {
-	struct tcm_loop_tpg *tl_tpg =
-			(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 
 	switch (tl_hba->tl_proto_id) {
@@ -687,8 +677,7 @@
 	u32 *out_tid_len,
 	char **port_nexus_ptr)
 {
-	struct tcm_loop_tpg *tl_tpg =
-			(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 
 	switch (tl_hba->tl_proto_id) {
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 6b76c7a..15a0364 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,16 +1,7 @@
 #define TCM_LOOP_VERSION		"v2.1-rc1"
 #define TL_WWN_ADDR_LEN			256
 #define TL_TPGS_PER_HBA			32
-/*
- * Defaults for struct scsi_host_template tcm_loop_driver_template
- *
- * We use large can_queue and cmd_per_lun here and let TCM enforce
- * the underlying se_device_t->queue_depth.
- */
-#define TL_SCSI_CAN_QUEUE		1024
-#define TL_SCSI_CMD_PER_LUN		1024
-#define TL_SCSI_MAX_SECTORS		1024
-#define TL_SCSI_SG_TABLESIZE		256
+
 /*
  * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
  */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 1dcbef4..1b1edd1 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -32,13 +32,12 @@
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_ua.h"
 
 static int core_alua_check_transition(int state, int *primary);
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 831468b..2f2235e 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -29,10 +29,11 @@
 #include <scsi/scsi.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
 #include "target_core_ua.h"
-#include "target_core_cdb.h"
 
 static void
 target_fill_alua_data(struct se_port *port, unsigned char *buf)
@@ -94,6 +95,18 @@
 	buf[2] = dev->transport->get_device_rev(dev);
 
 	/*
+	 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
+	 *
+	 * SPC4 says:
+	 *   A RESPONSE DATA FORMAT field set to 2h indicates that the
+	 *   standard INQUIRY data is in the format defined in this
+	 *   standard. Response data format values less than 2h are
+	 *   obsolete. Response data format values greater than 2h are
+	 *   reserved.
+	 */
+	buf[3] = 2;
+
+	/*
 	 * Enable SCCS and TPGS fields for Emulated ALUA
 	 */
 	if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
@@ -115,11 +128,9 @@
 		goto out;
 	}
 
-	snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
-	snprintf((unsigned char *)&buf[16], 16, "%s",
-		 &dev->se_sub_dev->t10_wwn.model[0]);
-	snprintf((unsigned char *)&buf[32], 4, "%s",
-		 &dev->se_sub_dev->t10_wwn.revision[0]);
+	snprintf(&buf[8], 8, "LIO-ORG");
+	snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
+	snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
 	buf[4] = 31; /* Set additional length to 31 */
 
 out:
@@ -138,8 +149,7 @@
 			SDF_EMULATED_VPD_UNIT_SERIAL) {
 		u32 unit_serial_len;
 
-		unit_serial_len =
-			strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+		unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
 		unit_serial_len++; /* For NULL Terminator */
 
 		if (((len + 4) + unit_serial_len) > cmd->data_length) {
@@ -148,8 +158,8 @@
 			buf[3] = (len & 0xff);
 			return 0;
 		}
-		len += sprintf((unsigned char *)&buf[4], "%s",
-			&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+		len += sprintf(&buf[4], "%s",
+			dev->se_sub_dev->t10_wwn.unit_serial);
 		len++; /* Extra Byte for NULL Terminator */
 		buf[3] = len;
 	}
@@ -279,14 +289,13 @@
 			len += (prod_len + unit_serial_len);
 			goto check_port;
 		}
-		id_len += sprintf((unsigned char *)&buf[off+12],
-				"%s:%s", prod,
+		id_len += sprintf(&buf[off+12], "%s:%s", prod,
 				&dev->se_sub_dev->t10_wwn.unit_serial[0]);
 	}
 	buf[off] = 0x2; /* ASCII */
 	buf[off+1] = 0x1; /* T10 Vendor ID */
 	buf[off+2] = 0x0;
-	memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
+	memcpy(&buf[off+4], "LIO-ORG", 8);
 	/* Extra Byte for NULL Terminator */
 	id_len++;
 	/* Identifier Length */
diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h
deleted file mode 100644
index ad6b1e3..0000000
--- a/drivers/target/target_core_cdb.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef TARGET_CORE_CDB_H
-#define TARGET_CORE_CDB_H
-
-int target_emulate_inquiry(struct se_task *task);
-int target_emulate_readcapacity(struct se_task *task);
-int target_emulate_readcapacity_16(struct se_task *task);
-int target_emulate_modesense(struct se_task *task);
-int target_emulate_request_sense(struct se_task *task);
-int target_emulate_unmap(struct se_task *task);
-int target_emulate_write_same(struct se_task *task);
-int target_emulate_synchronize_cache(struct se_task *task);
-int target_emulate_noop(struct se_task *task);
-
-#endif /* TARGET_CORE_CDB_H */
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 93d4f6a..0955bb8 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -39,18 +39,16 @@
 #include <linux/spinlock.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
 #include "target_core_rd.h"
-#include "target_core_stat.h"
 
 extern struct t10_alua_lu_gp *default_lu_gp;
 
@@ -1452,7 +1450,7 @@
 		return -ENOMEM;
 
 	orig = opts;
-	while ((ptr = strsep(&opts, ",")) != NULL) {
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
 		if (!*ptr)
 			continue;
 
@@ -1631,7 +1629,7 @@
 
 static ssize_t target_core_show_dev_info(void *p, char *page)
 {
-	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_subsystem_dev *se_dev = p;
 	struct se_hba *hba = se_dev->se_dev_hba;
 	struct se_subsystem_api *t = hba->transport;
 	int bl = 0;
@@ -1659,7 +1657,7 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_subsystem_dev *se_dev = p;
 	struct se_hba *hba = se_dev->se_dev_hba;
 	struct se_subsystem_api *t = hba->transport;
 
@@ -1682,7 +1680,7 @@
 
 static ssize_t target_core_show_dev_alias(void *p, char *page)
 {
-	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_subsystem_dev *se_dev = p;
 
 	if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
 		return 0;
@@ -1695,7 +1693,7 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_subsystem_dev *se_dev = p;
 	struct se_hba *hba = se_dev->se_dev_hba;
 	ssize_t read_bytes;
 
@@ -1710,6 +1708,9 @@
 	read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
 			"%s", page);
 
+	if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
+		se_dev->se_dev_alias[read_bytes - 1] = '\0';
+
 	pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
 		config_item_name(&hba->hba_group.cg_item),
 		config_item_name(&se_dev->se_dev_group.cg_item),
@@ -1728,7 +1729,7 @@
 
 static ssize_t target_core_show_dev_udev_path(void *p, char *page)
 {
-	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_subsystem_dev *se_dev = p;
 
 	if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
 		return 0;
@@ -1741,7 +1742,7 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_subsystem_dev *se_dev = p;
 	struct se_hba *hba = se_dev->se_dev_hba;
 	ssize_t read_bytes;
 
@@ -1756,6 +1757,9 @@
 	read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
 			"%s", page);
 
+	if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
+		se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+
 	pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
 		config_item_name(&hba->hba_group.cg_item),
 		config_item_name(&se_dev->se_dev_group.cg_item),
@@ -1777,7 +1781,7 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_subsystem_dev *se_dev = p;
 	struct se_device *dev;
 	struct se_hba *hba = se_dev->se_dev_hba;
 	struct se_subsystem_api *t = hba->transport;
@@ -1822,7 +1826,7 @@
 static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
 {
 	struct se_device *dev;
-	struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+	struct se_subsystem_dev *su_dev = p;
 	struct config_item *lu_ci;
 	struct t10_alua_lu_gp *lu_gp;
 	struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1860,7 +1864,7 @@
 	size_t count)
 {
 	struct se_device *dev;
-	struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+	struct se_subsystem_dev *su_dev = p;
 	struct se_hba *hba = su_dev->se_dev_hba;
 	struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
 	struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 9b86394..0c5992f 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -42,13 +42,11 @@
 #include <scsi/scsi_device.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
@@ -1134,8 +1132,6 @@
  */
 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 {
-	u32 orig_queue_depth = dev->queue_depth;
-
 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
 		pr_err("dev[%p]: Unable to change SE Device TCQ while"
 			" dev_export_obj: %d count exists\n", dev,
@@ -1169,11 +1165,6 @@
 	}
 
 	dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
-	if (queue_depth > orig_queue_depth)
-		atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
-	else if (queue_depth < orig_queue_depth)
-		atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
-
 	pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
 			dev, queue_depth);
 	return 0;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 09b6f87..4f77cce 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -36,18 +36,14 @@
 #include <linux/configfs.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
-#include "target_core_stat.h"
 
 #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
 static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index ec4249b..283a36e 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -34,13 +34,10 @@
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "target_core_hba.h"
+#include "target_core_internal.h"
 #include "target_core_pr.h"
 
 /*
@@ -402,7 +399,7 @@
 		add_len = ((buf[2] >> 8) & 0xff);
 		add_len |= (buf[3] & 0xff);
 
-		tid_len = strlen((char *)&buf[4]);
+		tid_len = strlen(&buf[4]);
 		tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
 		tid_len += 1; /* Add one byte for NULL terminator */
 		padding = ((-tid_len) & 3);
@@ -423,11 +420,11 @@
 	 * format.
 	 */
 	if (format_code == 0x40) {
-		p = strstr((char *)&buf[4], ",i,0x");
+		p = strstr(&buf[4], ",i,0x");
 		if (!p) {
 			pr_err("Unable to locate \",i,0x\" seperator"
 				" for Initiator port identifier: %s\n",
-				(char *)&buf[4]);
+				&buf[4]);
 			return NULL;
 		}
 		*p = '\0'; /* Terminate iSCSI Name */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b4864fb..7ed58e2 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,8 +37,7 @@
 #include <scsi/scsi_host.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
 
 #include "target_core_file.h"
 
@@ -86,7 +85,7 @@
 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
 {
 	struct fd_dev *fd_dev;
-	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+	struct fd_host *fd_host = hba->hba_ptr;
 
 	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
 	if (!fd_dev) {
@@ -114,8 +113,8 @@
 	struct se_device *dev;
 	struct se_dev_limits dev_limits;
 	struct queue_limits *limits;
-	struct fd_dev *fd_dev = (struct fd_dev *) p;
-	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+	struct fd_dev *fd_dev = p;
+	struct fd_host *fd_host = hba->hba_ptr;
 	mm_segment_t old_fs;
 	struct file *file;
 	struct inode *inode = NULL;
@@ -240,7 +239,7 @@
  */
 static void fd_free_device(void *p)
 {
-	struct fd_dev *fd_dev = (struct fd_dev *) p;
+	struct fd_dev *fd_dev = p;
 
 	if (fd_dev->fd_file) {
 		filp_close(fd_dev->fd_file, NULL);
@@ -498,7 +497,7 @@
 
 	orig = opts;
 
-	while ((ptr = strsep(&opts, ",")) != NULL) {
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
 		if (!*ptr)
 			continue;
 
@@ -559,7 +558,7 @@
 
 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
 {
-	struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
 
 	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
 		pr_err("Missing fd_dev_name=\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index c68019d..3dd1bd4 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -37,11 +37,10 @@
 #include <net/tcp.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 
-#include "target_core_hba.h"
+#include "target_core_internal.h"
 
 static LIST_HEAD(subsystem_list);
 static DEFINE_MUTEX(subsystem_mutex);
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
deleted file mode 100644
index bb0fea5..0000000
--- a/drivers/target/target_core_hba.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef TARGET_CORE_HBA_H
-#define TARGET_CORE_HBA_H
-
-extern struct se_hba *core_alloc_hba(const char *, u32, u32);
-extern int core_delete_hba(struct se_hba *);
-
-#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 4aa9922..cc8e6b5 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -42,8 +42,7 @@
 #include <scsi/scsi_host.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
 
 #include "target_core_iblock.h"
 
@@ -391,7 +390,7 @@
 
 	orig = opts;
 
-	while ((ptr = strsep(&opts, ",")) != NULL) {
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
 		if (!*ptr)
 			continue;
 
@@ -465,7 +464,7 @@
 	if (bd) {
 		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
 			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
-			"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
+			"" : (bd->bd_holder == ibd) ?
 			"CLAIMED: IBLOCK" : "CLAIMED: OS");
 	} else {
 		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
new file mode 100644
index 0000000..26f135e
--- /dev/null
+++ b/drivers/target/target_core_internal.h
@@ -0,0 +1,123 @@
+#ifndef TARGET_CORE_INTERNAL_H
+#define TARGET_CORE_INTERNAL_H
+
+/* target_core_alua.c */
+extern struct t10_alua_lu_gp *default_lu_gp;
+
+/* target_core_cdb.c */
+int	target_emulate_inquiry(struct se_task *task);
+int	target_emulate_readcapacity(struct se_task *task);
+int	target_emulate_readcapacity_16(struct se_task *task);
+int	target_emulate_modesense(struct se_task *task);
+int	target_emulate_request_sense(struct se_task *task);
+int	target_emulate_unmap(struct se_task *task);
+int	target_emulate_write_same(struct se_task *task);
+int	target_emulate_synchronize_cache(struct se_task *task);
+int	target_emulate_noop(struct se_task *task);
+
+/* target_core_device.c */
+struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
+int	core_free_device_list_for_node(struct se_node_acl *,
+		struct se_portal_group *);
+void	core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
+void	core_update_device_list_access(u32, u32, struct se_node_acl *);
+int	core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+		u32, u32, struct se_node_acl *, struct se_portal_group *, int);
+void	core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
+int	core_dev_export(struct se_device *, struct se_portal_group *,
+		struct se_lun *);
+void	core_dev_unexport(struct se_device *, struct se_portal_group *,
+		struct se_lun *);
+int	target_report_luns(struct se_task *);
+void	se_release_device_for_hba(struct se_device *);
+void	se_release_vpd_for_dev(struct se_device *);
+int	se_free_virtual_device(struct se_device *, struct se_hba *);
+int	se_dev_check_online(struct se_device *);
+int	se_dev_check_shutdown(struct se_device *);
+void	se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
+int	se_dev_set_task_timeout(struct se_device *, u32);
+int	se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+int	se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+int	se_dev_set_unmap_granularity(struct se_device *, u32);
+int	se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int	se_dev_set_emulate_dpo(struct se_device *, int);
+int	se_dev_set_emulate_fua_write(struct se_device *, int);
+int	se_dev_set_emulate_fua_read(struct se_device *, int);
+int	se_dev_set_emulate_write_cache(struct se_device *, int);
+int	se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+int	se_dev_set_emulate_tas(struct se_device *, int);
+int	se_dev_set_emulate_tpu(struct se_device *, int);
+int	se_dev_set_emulate_tpws(struct se_device *, int);
+int	se_dev_set_enforce_pr_isids(struct se_device *, int);
+int	se_dev_set_is_nonrot(struct se_device *, int);
+int	se_dev_set_emulate_rest_reord(struct se_device *dev, int);
+int	se_dev_set_queue_depth(struct se_device *, u32);
+int	se_dev_set_max_sectors(struct se_device *, u32);
+int	se_dev_set_optimal_sectors(struct se_device *, u32);
+int	se_dev_set_block_size(struct se_device *, u32);
+struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
+		struct se_device *, u32);
+int	core_dev_del_lun(struct se_portal_group *, u32);
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+		u32, char *, int *);
+int	core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_lun_acl *, u32, u32);
+int	core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_lun *, struct se_lun_acl *);
+void	core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_lun_acl *lacl);
+int	core_dev_setup_virtual_lun0(void);
+void	core_dev_release_virtual_lun0(void);
+
+/* target_core_hba.c */
+struct se_hba *core_alloc_hba(const char *, u32, u32);
+int	core_delete_hba(struct se_hba *);
+
+/* target_core_tmr.c */
+int	core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
+		struct list_head *, struct se_cmd *);
+
+/* target_core_tpg.c */
+extern struct se_device *g_lun0_dev;
+
+struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+		const char *);
+struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+		unsigned char *);
+void	core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
+void	core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
+struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
+int	core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
+		u32, void *);
+struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
+int	core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
+
+/* target_core_transport.c */
+extern struct kmem_cache *se_tmr_req_cache;
+
+int	init_se_kmem_caches(void);
+void	release_se_kmem_caches(void);
+u32	scsi_get_new_index(scsi_index_t);
+void	transport_subsystem_check_init(void);
+void	transport_cmd_finish_abort(struct se_cmd *, int);
+void	__transport_remove_task_from_execute_queue(struct se_task *,
+		struct se_device *);
+unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+void	transport_dump_dev_state(struct se_device *, char *, int *);
+void	transport_dump_dev_info(struct se_device *, struct se_lun *,
+		unsigned long long, char *, int *);
+void	transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
+int	transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
+int	transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
+int	transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
+bool	target_stop_task(struct se_task *task, unsigned long *flags);
+int	transport_clear_lun_from_sessions(struct se_lun *);
+void	transport_send_task_abort(struct se_cmd *);
+
+/* target_core_stat.c */
+void	target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+void	target_stat_setup_port_default_groups(struct se_lun *);
+void	target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+
+#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 95dee70..429ad72 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -33,14 +33,11 @@
 #include <asm/unaligned.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "target_core_hba.h"
+#include "target_core_internal.h"
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
@@ -2984,21 +2981,6 @@
 	}
 }
 
-int core_scsi3_check_cdb_abort_and_preempt(
-	struct list_head *preempt_and_abort_list,
-	struct se_cmd *cmd)
-{
-	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
-
-	list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
-				pr_reg_abort_list) {
-		if (pr_reg->pr_res_key == cmd->pr_res_key)
-			return 0;
-	}
-
-	return 1;
-}
-
 static int core_scsi3_pro_preempt(
 	struct se_cmd *cmd,
 	int type,
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index b97f694..7a233fe 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -60,8 +60,6 @@
 					     struct se_node_acl *);
 extern void core_scsi3_free_all_registrations(struct se_device *);
 extern unsigned char *core_scsi3_pr_dump_type(int);
-extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
-						  struct se_cmd *);
 
 extern int target_scsi3_emulate_pr_in(struct se_task *task);
 extern int target_scsi3_emulate_pr_out(struct se_task *task);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 8b15e56..d35467d 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -44,8 +44,7 @@
 #include <scsi/scsi_tcq.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
 
 #include "target_core_pscsi.h"
 
@@ -105,7 +104,7 @@
 
 static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
 {
-	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
 	struct Scsi_Host *sh = phv->phv_lld_host;
 	/*
 	 * Release the struct Scsi_Host
@@ -351,7 +350,6 @@
 	 * scsi_device_put() and the pdv->pdv_sd cleared.
 	 */
 	pdv->pdv_sd = sd;
-
 	dev = transport_add_device_to_core_hba(hba, &pscsi_template,
 				se_dev, dev_flags, pdv,
 				&dev_limits, NULL, NULL);
@@ -406,7 +404,7 @@
 	__releases(sh->host_lock)
 {
 	struct se_device *dev;
-	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
 	struct Scsi_Host *sh = sd->host;
 	struct block_device *bd;
 	u32 dev_flags = 0;
@@ -454,7 +452,7 @@
 	__releases(sh->host_lock)
 {
 	struct se_device *dev;
-	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
 	struct Scsi_Host *sh = sd->host;
 	u32 dev_flags = 0;
 
@@ -489,7 +487,7 @@
 	__releases(sh->host_lock)
 {
 	struct se_device *dev;
-	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
 	struct Scsi_Host *sh = sd->host;
 	u32 dev_flags = 0;
 
@@ -510,10 +508,10 @@
 	struct se_subsystem_dev *se_dev,
 	void *p)
 {
-	struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
+	struct pscsi_dev_virt *pdv = p;
 	struct se_device *dev;
 	struct scsi_device *sd;
-	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
 	struct Scsi_Host *sh = phv->phv_lld_host;
 	int legacy_mode_enable = 0;
 
@@ -818,7 +816,7 @@
 
 	orig = opts;
 
-	while ((ptr = strsep(&opts, ",")) != NULL) {
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
 		if (!*ptr)
 			continue;
 
@@ -1144,7 +1142,7 @@
 {
 	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
 
-	return (unsigned char *)&pt->pscsi_sense[0];
+	return pt->pscsi_sense;
 }
 
 /*	pscsi_get_device_rev():
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 02e51fa..8b68f7b 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -37,9 +37,7 @@
 #include <scsi/scsi_host.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
 
 #include "target_core_rd.h"
 
@@ -474,7 +472,7 @@
 
 	orig = opts;
 
-	while ((ptr = strsep(&opts, ",")) != NULL) {
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
 		if (!*ptr)
 			continue;
 
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 874152a..f8c2d2c 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -43,12 +43,12 @@
 #include <scsi/scsi_host.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
-#include "target_core_hba.h"
+#include "target_core_internal.h"
 
 #ifndef INITIAL_JIFFIES
 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -1755,8 +1755,7 @@
 	/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
 	memset(buf, 0, 64);
 	if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
-		tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
-				(unsigned char *)&buf[0], 64);
+		tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
 
 	ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
 	spin_unlock_irq(&nacl->nacl_sess_lock);
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h
deleted file mode 100644
index 86c252f..0000000
--- a/drivers/target/target_core_stat.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef TARGET_CORE_STAT_H
-#define TARGET_CORE_STAT_H
-
-extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
-extern void target_stat_setup_port_default_groups(struct se_lun *);
-extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
-
-#endif   /*** TARGET_CORE_STAT_H ***/
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 6845228..dcb0618 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -32,12 +32,11 @@
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
 #include "target_core_pr.h"
 
@@ -101,6 +100,21 @@
 	transport_cmd_finish_abort(cmd, 0);
 }
 
+static int target_check_cdb_and_preempt(struct list_head *list,
+		struct se_cmd *cmd)
+{
+	struct t10_pr_registration *reg;
+
+	if (!list)
+		return 0;
+	list_for_each_entry(reg, list, pr_reg_abort_list) {
+		if (reg->pr_res_key == cmd->pr_res_key)
+			return 0;
+	}
+
+	return 1;
+}
+
 static void core_tmr_drain_tmr_list(
 	struct se_device *dev,
 	struct se_tmr_req *tmr,
@@ -132,9 +146,7 @@
 		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
 		 * skip non regisration key matching TMRs.
 		 */
-		if (preempt_and_abort_list &&
-		    (core_scsi3_check_cdb_abort_and_preempt(
-					preempt_and_abort_list, cmd) != 0))
+		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 			continue;
 
 		spin_lock(&cmd->t_state_lock);
@@ -211,9 +223,7 @@
 		 * For PREEMPT_AND_ABORT usage, only process commands
 		 * with a matching reservation key.
 		 */
-		if (preempt_and_abort_list &&
-		    (core_scsi3_check_cdb_abort_and_preempt(
-					preempt_and_abort_list, cmd) != 0))
+		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 			continue;
 		/*
 		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
@@ -222,7 +232,7 @@
 			continue;
 
 		list_move_tail(&task->t_state_list, &drain_task_list);
-		atomic_set(&task->task_state_active, 0);
+		task->t_state_active = false;
 		/*
 		 * Remove from task execute list before processing drain_task_list
 		 */
@@ -321,9 +331,7 @@
 		 * For PREEMPT_AND_ABORT usage, only process commands
 		 * with a matching reservation key.
 		 */
-		if (preempt_and_abort_list &&
-		    (core_scsi3_check_cdb_abort_and_preempt(
-					preempt_and_abort_list, cmd) != 0))
+		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 			continue;
 		/*
 		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 8ddd133..b766802 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -39,13 +39,10 @@
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 
-#include "target_core_hba.h"
-#include "target_core_stat.h"
+#include "target_core_internal.h"
 
 extern struct se_device *g_lun0_dev;
 
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0257658..d3ddd13 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -45,16 +45,12 @@
 #include <scsi/scsi_tcq.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_cdb.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
@@ -72,7 +68,7 @@
 
 static int transport_generic_write_pending(struct se_cmd *);
 static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev);
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
 static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
 		struct se_device *dev);
@@ -212,14 +208,13 @@
 	return new_index;
 }
 
-void transport_init_queue_obj(struct se_queue_obj *qobj)
+static void transport_init_queue_obj(struct se_queue_obj *qobj)
 {
 	atomic_set(&qobj->queue_cnt, 0);
 	INIT_LIST_HEAD(&qobj->qobj_list);
 	init_waitqueue_head(&qobj->thread_wq);
 	spin_lock_init(&qobj->cmd_queue_lock);
 }
-EXPORT_SYMBOL(transport_init_queue_obj);
 
 void transport_subsystem_check_init(void)
 {
@@ -426,18 +421,18 @@
 		if (task->task_flags & TF_ACTIVE)
 			continue;
 
-		if (!atomic_read(&task->task_state_active))
-			continue;
-
 		spin_lock_irqsave(&dev->execute_task_lock, flags);
-		list_del(&task->t_state_list);
-		pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
-			cmd->se_tfo->get_task_tag(cmd), dev, task);
-		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+		if (task->t_state_active) {
+			pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+				cmd->se_tfo->get_task_tag(cmd), dev, task);
 
-		atomic_set(&task->task_state_active, 0);
-		atomic_dec(&cmd->t_task_cdbs_ex_left);
+			list_del(&task->t_state_list);
+			atomic_dec(&cmd->t_task_cdbs_ex_left);
+			task->t_state_active = false;
+		}
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 	}
+
 }
 
 /*	transport_cmd_check_stop():
@@ -696,12 +691,6 @@
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	unsigned long flags;
-#if 0
-	pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
-			cmd->t_task_cdb[0], dev);
-#endif
-	if (dev)
-		atomic_inc(&dev->depth_left);
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	task->task_flags &= ~TF_ACTIVE;
@@ -714,7 +703,7 @@
 	if (dev && dev->transport->transport_complete) {
 		if (dev->transport->transport_complete(task) != 0) {
 			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
-			task->task_sense = 1;
+			task->task_flags |= TF_HAS_SENSE;
 			success = 1;
 		}
 	}
@@ -743,13 +732,7 @@
 	}
 
 	if (cmd->t_tasks_failed) {
-		if (!task->task_error_status) {
-			task->task_error_status =
-				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-			cmd->scsi_sense_reason =
-				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-		}
-
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 		INIT_WORK(&cmd->work, target_complete_failure_work);
 	} else {
 		atomic_set(&cmd->t_transport_complete, 1);
@@ -824,7 +807,7 @@
 	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
 	atomic_inc(&dev->execute_tasks);
 
-	if (atomic_read(&task->task_state_active))
+	if (task->t_state_active)
 		return;
 	/*
 	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
@@ -838,7 +821,7 @@
 	else
 		list_add_tail(&task->t_state_list, &dev->state_task_list);
 
-	atomic_set(&task->task_state_active, 1);
+	task->t_state_active = true;
 
 	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
 		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
@@ -853,29 +836,26 @@
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
-		if (atomic_read(&task->task_state_active))
-			continue;
-
 		spin_lock(&dev->execute_task_lock);
-		list_add_tail(&task->t_state_list, &dev->state_task_list);
-		atomic_set(&task->task_state_active, 1);
+		if (!task->t_state_active) {
+			list_add_tail(&task->t_state_list,
+				      &dev->state_task_list);
+			task->t_state_active = true;
 
-		pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
-			task->task_se_cmd->se_tfo->get_task_tag(
-			task->task_se_cmd), task, dev);
-
+			pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+				task->task_se_cmd->se_tfo->get_task_tag(
+				task->task_se_cmd), task, dev);
+		}
 		spin_unlock(&dev->execute_task_lock);
 	}
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
-static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct se_task *task, *task_prev = NULL;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev->execute_task_lock, flags);
 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
 		if (!list_empty(&task->t_execute_list))
 			continue;
@@ -886,6 +866,15 @@
 		__transport_add_task_to_execute_queue(task, task_prev, dev);
 		task_prev = task;
 	}
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+	unsigned long flags;
+	struct se_device *dev = cmd->se_dev;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	__transport_add_tasks_from_cmd(cmd);
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
@@ -896,7 +885,7 @@
 	atomic_dec(&dev->execute_tasks);
 }
 
-void transport_remove_task_from_execute_queue(
+static void transport_remove_task_from_execute_queue(
 	struct se_task *task,
 	struct se_device *dev)
 {
@@ -983,9 +972,8 @@
 		break;
 	}
 
-	*bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
-		atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
-		dev->queue_depth);
+	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
+		atomic_read(&dev->execute_tasks), dev->queue_depth);
 	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
 		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
 	*bl += sprintf(b + *bl, "        ");
@@ -1340,9 +1328,6 @@
 	spin_lock_init(&dev->se_port_lock);
 	spin_lock_init(&dev->se_tmr_lock);
 	spin_lock_init(&dev->qf_cmd_lock);
-
-	dev->queue_depth	= dev_limits->queue_depth;
-	atomic_set(&dev->depth_left, dev->queue_depth);
 	atomic_set(&dev->dev_ordered_id, 0);
 
 	se_dev_set_default_attribs(dev, dev_limits);
@@ -1654,6 +1639,80 @@
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
 
+/**
+ * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @cdb: pointer to SCSI CDB
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @data_length: fabric expected data transfer length
+ * @task_addr: SAM task attribute
+ * @data_dir: DMA data direction
+ * @flags: flags for command submission from target_sc_flags_tables
+ *
+ * This may only be called from process context, and also currently
+ * assumes internal allocation of fabric payload buffer by target-core.
+ **/
+int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+		u32 data_length, int task_attr, int data_dir, int flags)
+{
+	struct se_portal_group *se_tpg;
+	int rc;
+
+	se_tpg = se_sess->se_tpg;
+	BUG_ON(!se_tpg);
+	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
+	BUG_ON(in_interrupt());
+	/*
+	 * Initialize se_cmd for target operation.  From this point
+	 * exceptions are handled by sending exception status via
+	 * target_core_fabric_ops->queue_status() callback
+	 */
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+				data_length, data_dir, task_attr, sense);
+	/*
+	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
+	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
+	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+	 * kref_put() to happen during fabric packet acknowledgement.
+	 */
+	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+	/*
+	 * Signal bidirectional data payloads to target-core
+	 */
+	if (flags & TARGET_SCF_BIDI_OP)
+		se_cmd->se_cmd_flags |= SCF_BIDI;
+	/*
+	 * Locate se_lun pointer and attach it to struct se_cmd
+	 */
+	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0)
+		goto out_check_cond;
+	/*
+	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
+	 * allocate the necessary tasks to complete the received CDB+data
+	 */
+	rc = transport_generic_allocate_tasks(se_cmd, cdb);
+	if (rc != 0)
+		goto out_check_cond;
+	/*
+	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
+	 * for immediate execution of READs, otherwise wait for
+	 * transport_generic_handle_data() to be called for WRITEs
+	 * when fabric has filled the incoming buffer.
+	 */
+	transport_handle_cdb_direct(se_cmd);
+	return 0;
+
+out_check_cond:
+	transport_send_check_condition_and_sense(se_cmd,
+				se_cmd->scsi_sense_reason, 0);
+	return 0;
+}
+EXPORT_SYMBOL(target_submit_cmd);
+
 /*
  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@@ -1920,18 +1979,6 @@
 	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 }
 
-static inline int transport_tcq_window_closed(struct se_device *dev)
-{
-	if (dev->dev_tcq_window_closed++ <
-			PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
-		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
-	} else
-		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
-
-	wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
-	return 0;
-}
-
 /*
  * Called from Fabric Module context from transport_execute_tasks()
  *
@@ -2014,13 +2061,7 @@
 static int transport_execute_tasks(struct se_cmd *cmd)
 {
 	int add_tasks;
-
-	if (se_dev_check_online(cmd->se_dev) != 0) {
-		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-		transport_generic_request_failure(cmd);
-		return 0;
-	}
-
+	struct se_device *se_dev = cmd->se_dev;
 	/*
 	 * Call transport_cmd_check_stop() to see if a fabric exception
 	 * has occurred that prevents execution.
@@ -2034,19 +2075,16 @@
 		if (!add_tasks)
 			goto execute_tasks;
 		/*
-		 * This calls transport_add_tasks_from_cmd() to handle
-		 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
-		 * (if enabled) in __transport_add_task_to_execute_queue() and
-		 * transport_add_task_check_sam_attr().
+		 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
+		 * adds associated se_tasks while holding dev->execute_task_lock
+		 * before I/O dispath to avoid a double spinlock access.
 		 */
-		transport_add_tasks_from_cmd(cmd);
+		__transport_execute_tasks(se_dev, cmd);
+		return 0;
 	}
-	/*
-	 * Kick the execution queue for the cmd associated struct se_device
-	 * storage object.
-	 */
+
 execute_tasks:
-	__transport_execute_tasks(cmd->se_dev);
+	__transport_execute_tasks(se_dev, NULL);
 	return 0;
 }
 
@@ -2056,24 +2094,18 @@
  *
  * Called from transport_processing_thread()
  */
-static int __transport_execute_tasks(struct se_device *dev)
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
 {
 	int error;
 	struct se_cmd *cmd = NULL;
 	struct se_task *task = NULL;
 	unsigned long flags;
 
-	/*
-	 * Check if there is enough room in the device and HBA queue to send
-	 * struct se_tasks to the selected transport.
-	 */
 check_depth:
-	if (!atomic_read(&dev->depth_left))
-		return transport_tcq_window_closed(dev);
-
-	dev->dev_tcq_window_closed = 0;
-
 	spin_lock_irq(&dev->execute_task_lock);
+	if (new_cmd != NULL)
+		__transport_add_tasks_from_cmd(new_cmd);
+
 	if (list_empty(&dev->execute_task_list)) {
 		spin_unlock_irq(&dev->execute_task_lock);
 		return 0;
@@ -2083,10 +2115,7 @@
 	__transport_remove_task_from_execute_queue(task, dev);
 	spin_unlock_irq(&dev->execute_task_lock);
 
-	atomic_dec(&dev->depth_left);
-
 	cmd = task->task_se_cmd;
-
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	task->task_flags |= (TF_ACTIVE | TF_SENT);
 	atomic_inc(&cmd->t_task_cdbs_sent);
@@ -2107,10 +2136,10 @@
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		atomic_set(&cmd->t_transport_sent, 0);
 		transport_stop_tasks_for_cmd(cmd);
-		atomic_inc(&dev->depth_left);
 		transport_generic_request_failure(cmd);
 	}
 
+	new_cmd = NULL;
 	goto check_depth;
 
 	return 0;
@@ -2351,7 +2380,7 @@
 
 	list_for_each_entry_safe(task, task_tmp,
 				&cmd->t_task_list, t_list) {
-		if (!task->task_sense)
+		if (!(task->task_flags & TF_HAS_SENSE))
 			continue;
 
 		if (!dev->transport->get_sense_buffer) {
@@ -3346,6 +3375,32 @@
 }
 
 /**
+ * transport_release_cmd - free a command
+ * @cmd:       command to free
+ *
+ * This routine unconditionally frees a command, and reference counting
+ * or list removal must be done in the caller.
+ */
+static void transport_release_cmd(struct se_cmd *cmd)
+{
+	BUG_ON(!cmd->se_tfo);
+
+	if (cmd->se_tmr_req)
+		core_tmr_release_req(cmd->se_tmr_req);
+	if (cmd->t_task_cdb != cmd->__t_task_cdb)
+		kfree(cmd->t_task_cdb);
+	/*
+	 * If this cmd has been setup with target_get_sess_cmd(), drop
+	 * the kref and call ->release_cmd() in kref callback.
+	 */
+	 if (cmd->check_release != 0) {
+		target_put_sess_cmd(cmd->se_sess, cmd);
+		return;
+	}
+	cmd->se_tfo->release_cmd(cmd);
+}
+
+/**
  * transport_put_cmd - release a reference to a command
  * @cmd:       command to release
  *
@@ -3870,33 +3925,6 @@
 	return 0;
 }
 
-/**
- * transport_release_cmd - free a command
- * @cmd:       command to free
- *
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
- */
-void transport_release_cmd(struct se_cmd *cmd)
-{
-	BUG_ON(!cmd->se_tfo);
-
-	if (cmd->se_tmr_req)
-		core_tmr_release_req(cmd->se_tmr_req);
-	if (cmd->t_task_cdb != cmd->__t_task_cdb)
-		kfree(cmd->t_task_cdb);
-	/*
-	 * Check if target_wait_for_sess_cmds() is expecting to
-	 * release se_cmd directly here..
-	 */
-	if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
-		if (cmd->se_tfo->check_release_cmd(cmd) != 0)
-			return;
-
-	cmd->se_tfo->release_cmd(cmd);
-}
-EXPORT_SYMBOL(transport_release_cmd);
-
 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 {
 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -3923,11 +3951,22 @@
 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
  * @se_sess:	session to reference
  * @se_cmd:	command descriptor to add
+ * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
  */
-void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+			bool ack_kref)
 {
 	unsigned long flags;
 
+	kref_init(&se_cmd->cmd_kref);
+	/*
+	 * Add a second kref if the fabric caller is expecting to handle
+	 * fabric acknowledgement that requires two target_put_sess_cmd()
+	 * invocations before se_cmd descriptor release.
+	 */
+	if (ack_kref == true)
+		kref_get(&se_cmd->cmd_kref);
+
 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
 	se_cmd->check_release = 1;
@@ -3935,30 +3974,36 @@
 }
 EXPORT_SYMBOL(target_get_sess_cmd);
 
-/* target_put_sess_cmd - Check for active I/O shutdown or list delete
- * @se_sess: 	session to reference
- * @se_cmd:	command descriptor to drop
- */
-int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+static void target_release_cmd_kref(struct kref *kref)
 {
+	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+	struct se_session *se_sess = se_cmd->se_sess;
 	unsigned long flags;
 
 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 	if (list_empty(&se_cmd->se_cmd_list)) {
 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 		WARN_ON(1);
-		return 0;
+		return;
 	}
-
 	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 		complete(&se_cmd->cmd_wait_comp);
-		return 1;
+		return;
 	}
 	list_del(&se_cmd->se_cmd_list);
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
-	return 0;
+	se_cmd->se_tfo->release_cmd(se_cmd);
+}
+
+/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
+ * @se_sess:	session to reference
+ * @se_cmd:	command descriptor to drop
+ */
+int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
 }
 EXPORT_SYMBOL(target_put_sess_cmd);
 
@@ -4174,7 +4219,7 @@
 
 static int transport_clear_lun_thread(void *p)
 {
-	struct se_lun *lun = (struct se_lun *)p;
+	struct se_lun *lun = p;
 
 	__transport_clear_lun_from_sessions(lun);
 	complete(&lun->lun_shutdown_comp);
@@ -4353,6 +4398,7 @@
 	case TCM_NON_EXISTENT_LUN:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ILLEGAL REQUEST */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
 		/* LOGICAL UNIT NOT SUPPORTED */
@@ -4362,6 +4408,7 @@
 	case TCM_SECTOR_COUNT_TOO_MANY:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ILLEGAL REQUEST */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
 		/* INVALID COMMAND OPERATION CODE */
@@ -4370,6 +4417,7 @@
 	case TCM_UNKNOWN_MODE_PAGE:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ILLEGAL REQUEST */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
 		/* INVALID FIELD IN CDB */
@@ -4378,6 +4426,7 @@
 	case TCM_CHECK_CONDITION_ABORT_CMD:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ABORTED COMMAND */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
 		/* BUS DEVICE RESET FUNCTION OCCURRED */
@@ -4387,6 +4436,7 @@
 	case TCM_INCORRECT_AMOUNT_OF_DATA:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ABORTED COMMAND */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
 		/* WRITE ERROR */
@@ -4397,6 +4447,7 @@
 	case TCM_INVALID_CDB_FIELD:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ABORTED COMMAND */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
 		/* INVALID FIELD IN CDB */
@@ -4405,6 +4456,7 @@
 	case TCM_INVALID_PARAMETER_LIST:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ABORTED COMMAND */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
 		/* INVALID FIELD IN PARAMETER LIST */
@@ -4413,6 +4465,7 @@
 	case TCM_UNEXPECTED_UNSOLICITED_DATA:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ABORTED COMMAND */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
 		/* WRITE ERROR */
@@ -4423,6 +4476,7 @@
 	case TCM_SERVICE_CRC_ERROR:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ABORTED COMMAND */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
 		/* PROTOCOL SERVICE CRC ERROR */
@@ -4433,6 +4487,7 @@
 	case TCM_SNACK_REJECTED:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ABORTED COMMAND */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
 		/* READ ERROR */
@@ -4443,6 +4498,7 @@
 	case TCM_WRITE_PROTECTED:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* DATA PROTECT */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
 		/* WRITE PROTECTED */
@@ -4451,6 +4507,7 @@
 	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* UNIT ATTENTION */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
 		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
@@ -4460,6 +4517,7 @@
 	case TCM_CHECK_CONDITION_NOT_READY:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* Not Ready */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
 		transport_get_sense_codes(cmd, &asc, &ascq);
@@ -4470,6 +4528,7 @@
 	default:
 		/* CURRENT ERROR */
 		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
 		/* ILLEGAL REQUEST */
 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
 		/* LOGICAL UNIT COMMUNICATION FAILURE */
@@ -4545,11 +4604,7 @@
 	cmd->se_tfo->queue_status(cmd);
 }
 
-/*	transport_generic_do_tmr():
- *
- *
- */
-int transport_generic_do_tmr(struct se_cmd *cmd)
+static int transport_generic_do_tmr(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct se_tmr_req *tmr = cmd->se_tmr_req;
@@ -4597,7 +4652,7 @@
 {
 	int ret;
 	struct se_cmd *cmd;
-	struct se_device *dev = (struct se_device *) param;
+	struct se_device *dev = param;
 
 	while (!kthread_should_stop()) {
 		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
@@ -4607,8 +4662,6 @@
 			goto out;
 
 get_cmd:
-		__transport_execute_tasks(dev);
-
 		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
 		if (!cmd)
 			continue;
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 50a480d..3e12f6b 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -30,13 +30,11 @@
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 71fc9ce..addc18f 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -39,12 +39,8 @@
 #include <scsi/fc_encode.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
-#include <target/target_core_tmr.h>
 #include <target/configfs_macros.h>
 
 #include "tcm_fc.h"
@@ -367,6 +363,11 @@
 	struct ft_sess *sess;
 	u8 tm_func;
 
+	transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
+			cmd->sess->se_sess, 0, DMA_NONE, 0,
+			&cmd->ft_sense_buffer[0]);
+	target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false);
+
 	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
 
 	switch (fcp->fc_tm_flags) {
@@ -420,7 +421,6 @@
 			sess = cmd->sess;
 			transport_send_check_condition_and_sense(&cmd->se_cmd,
 				cmd->se_cmd.scsi_sense_reason, 0);
-			transport_generic_free_cmd(&cmd->se_cmd, 0);
 			ft_sess_put(sess);
 			return;
 		}
@@ -536,7 +536,6 @@
 {
 	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
 	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
-	struct se_cmd *se_cmd;
 	struct fcp_cmnd *fcp;
 	int data_dir = 0;
 	u32 data_len;
@@ -591,15 +590,6 @@
 		data_len = ntohl(fcp->fc_dl);
 		cmd->cdb = fcp->fc_cdb;
 	}
-
-	se_cmd = &cmd->se_cmd;
-	/*
-	 * Initialize struct se_cmd descriptor from target_core_mod
-	 * infrastructure
-	 */
-	transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
-			      data_len, data_dir, task_attr,
-			      &cmd->ft_sense_buffer[0]);
 	/*
 	 * Check for FCP task management flags
 	 */
@@ -607,39 +597,20 @@
 		ft_send_tm(cmd);
 		return;
 	}
-
 	fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
-
 	cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
-	ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
+	/*
+	 * Use a single se_cmd->cmd_kref as we expect to release se_cmd
+	 * directly from ft_check_stop_free callback in response path.
+	 */
+	ret = target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb,
+				&cmd->ft_sense_buffer[0], cmd->lun, data_len,
+				task_attr, data_dir, 0);
+	pr_debug("r_ctl %x alloc target_submit_cmd %d\n", fh->fh_r_ctl, ret);
 	if (ret < 0) {
 		ft_dump_cmd(cmd, __func__);
-		transport_send_check_condition_and_sense(&cmd->se_cmd,
-			cmd->se_cmd.scsi_sense_reason, 0);
 		return;
 	}
-
-	ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
-
-	pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
-	ft_dump_cmd(cmd, __func__);
-
-	if (ret == -ENOMEM) {
-		transport_send_check_condition_and_sense(se_cmd,
-				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
-		transport_generic_free_cmd(se_cmd, 0);
-		return;
-	}
-	if (ret == -EINVAL) {
-		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
-			ft_queue_status(se_cmd);
-		else
-			transport_send_check_condition_and_sense(se_cmd,
-					se_cmd->scsi_sense_reason, 0);
-		transport_generic_free_cmd(se_cmd, 0);
-		return;
-	}
-	transport_handle_cdb_direct(se_cmd);
 	return;
 
 err:
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 9402b73..73852fb 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -41,12 +41,8 @@
 #include <scsi/libfc.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1369b1c..d8cabc2 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -48,10 +48,7 @@
 #include <scsi/fc_encode.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 3269213..4c0507c 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -40,10 +40,7 @@
 #include <scsi/libfc.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 66bc74d..378276c 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -146,7 +146,7 @@
 
 	ret = adp8860_read(client, reg, &reg_val);
 
-	if (!ret && ((reg_val & bit_mask) == 0)) {
+	if (!ret && ((reg_val & bit_mask) != bit_mask)) {
 		reg_val |= bit_mask;
 		ret = adp8860_write(client, reg, reg_val);
 	}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 6c68a68..6735059 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -160,7 +160,7 @@
 
 	ret = adp8870_read(client, reg, &reg_val);
 
-	if (!ret && ((reg_val & bit_mask) == 0)) {
+	if (!ret && ((reg_val & bit_mask) != bit_mask)) {
 		reg_val |= bit_mask;
 		ret = adp8870_write(client, reg, reg_val);
 	}
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 4f5d1c4..27d1d7a 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -190,6 +190,7 @@
 
 	priv->io_reg = regulator_get(&spi->dev, "vdd");
 	if (IS_ERR(priv->io_reg)) {
+		ret = PTR_ERR(priv->io_reg);
 		dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
 		       __func__);
 		goto err3;
@@ -197,6 +198,7 @@
 
 	priv->core_reg = regulator_get(&spi->dev, "vcore");
 	if (IS_ERR(priv->core_reg)) {
+		ret = PTR_ERR(priv->core_reg);
 		dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
 		       __func__);
 		goto err4;
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index ba6eda4..0edb91c 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -1,5 +1,6 @@
 #include <linux/bio.h>
 #include <linux/io.h>
+#include <linux/export.h>
 #include <xen/page.h>
 
 bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
@@ -11,3 +12,4 @@
 	return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
 		((mfn1 == mfn2) || ((mfn1+1) == mfn2));
 }
+EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/firmware/Makefile b/firmware/Makefile
index 5f43bfb..0d15a3d 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -82,7 +82,6 @@
 fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
 fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
 				      advansys/3550.bin advansys/38C0800.bin
-fw-shipped-$(CONFIG_SCSI_ISCI) += isci/isci_firmware.bin
 fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
 					 qlogic/12160.bin
 fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
diff --git a/firmware/isci/isci_firmware.bin.ihex b/firmware/isci/isci_firmware.bin.ihex
deleted file mode 100644
index 2e66195..0000000
--- a/firmware/isci/isci_firmware.bin.ihex
+++ /dev/null
@@ -1,16 +0,0 @@
-:10000000495343554F454D42E80018100002000087
-:1000100000000000000000000101000000000000DE
-:10002000FFFFCF5F0100000008DD0B0000FC0F00A8
-:10003000097C0B006EFC0A00FFFFCF5F010000008F
-:1000400008DD0B0000FC0F00097C0B006EFC0A00B1
-:10005000FFFFCF5F0100000008DD0B0000FC0F0078
-:10006000097C0B006EFC0A00FFFFCF5F010000005F
-:1000700008DD0B0000FC0F00097C0B006EFC0A0081
-:100080000101000000000000FFFFCF5F0200000040
-:1000900008DD0B0000FC0F00097C0B006EFC0A0061
-:1000A000FFFFCF5F0200000008DD0B0000FC0F0027
-:1000B000097C0B006EFC0A00FFFFCF5F020000000E
-:1000C00008DD0B0000FC0F00097C0B006EFC0A0031
-:1000D000FFFFCF5F0200000008DD0B0000FC0F00F7
-:0800E000097C0B006EFC0A0014
-:00000001FF
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index f66cc16..0554b00 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -140,7 +140,6 @@
 
 config CIFS_FSCACHE
 	  bool "Provide CIFS client caching support (EXPERIMENTAL)"
-	  depends on EXPERIMENTAL
 	  depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
 	  help
 	    Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
@@ -149,7 +148,7 @@
 
 config CIFS_ACL
 	  bool "Provide CIFS ACL support (EXPERIMENTAL)"
-	  depends on EXPERIMENTAL && CIFS_XATTR && KEYS
+	  depends on CIFS_XATTR && KEYS
 	  help
 	    Allows to fetch CIFS/NTFS ACL from the server.  The DACL blob
 	    is handed over to the application/caller.
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 84e8c07..24b3dfc 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -676,14 +676,23 @@
 {
 	char c;
 	int rc;
+	static bool warned;
 
 	rc = get_user(c, buffer);
 	if (rc)
 		return rc;
 	if (c == '0' || c == 'n' || c == 'N')
 		multiuser_mount = 0;
-	else if (c == '1' || c == 'y' || c == 'Y')
+	else if (c == '1' || c == 'y' || c == 'Y') {
 		multiuser_mount = 1;
+		if (!warned) {
+			warned = true;
+			printk(KERN_WARNING "CIFS VFS: The legacy multiuser "
+				"mount code is scheduled to be deprecated in "
+				"3.5. Please switch to using the multiuser "
+				"mount option.");
+		}
+	}
 
 	return count;
 }
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 2272fd5..e622863 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -113,9 +113,11 @@
 		   MAX_MECH_STR_LEN +
 		   UID_KEY_LEN + (sizeof(uid_t) * 2) +
 		   CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
-		   USER_KEY_LEN + strlen(sesInfo->user_name) +
 		   PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
 
+	if (sesInfo->user_name)
+		desc_len += USER_KEY_LEN + strlen(sesInfo->user_name);
+
 	spnego_key = ERR_PTR(-ENOMEM);
 	description = kzalloc(desc_len, GFP_KERNEL);
 	if (description == NULL)
@@ -152,8 +154,10 @@
 	dp = description + strlen(description);
 	sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
 
-	dp = description + strlen(description);
-	sprintf(dp, ";user=%s", sesInfo->user_name);
+	if (sesInfo->user_name) {
+		dp = description + strlen(description);
+		sprintf(dp, ";user=%s", sesInfo->user_name);
+	}
 
 	dp = description + strlen(description);
 	sprintf(dp, ";pid=0x%x", current->pid);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 1b2e180..fbb9da9 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -27,17 +27,17 @@
 #include "cifs_debug.h"
 
 /*
- * cifs_ucs2_bytes - how long will a string be after conversion?
- * @ucs - pointer to input string
+ * cifs_utf16_bytes - how long will a string be after conversion?
+ * @utf16 - pointer to input string
  * @maxbytes - don't go past this many bytes of input string
  * @codepage - destination codepage
  *
- * Walk a ucs2le string and return the number of bytes that the string will
+ * Walk a utf16le string and return the number of bytes that the string will
  * be after being converted to the given charset, not including any null
  * termination required. Don't walk past maxbytes in the source buffer.
  */
 int
-cifs_ucs2_bytes(const __le16 *from, int maxbytes,
+cifs_utf16_bytes(const __le16 *from, int maxbytes,
 		const struct nls_table *codepage)
 {
 	int i;
@@ -122,7 +122,7 @@
 }
 
 /*
- * cifs_from_ucs2 - convert utf16le string to local charset
+ * cifs_from_utf16 - convert utf16le string to local charset
  * @to - destination buffer
  * @from - source buffer
  * @tolen - destination buffer size (in bytes)
@@ -130,7 +130,7 @@
  * @codepage - codepage to which characters should be converted
  * @mapchar - should characters be remapped according to the mapchars option?
  *
- * Convert a little-endian ucs2le string (as sent by the server) to a string
+ * Convert a little-endian utf16le string (as sent by the server) to a string
  * in the provided codepage. The tolen and fromlen parameters are to ensure
  * that the code doesn't walk off of the end of the buffer (which is always
  * a danger if the alignment of the source buffer is off). The destination
@@ -139,12 +139,12 @@
  * null terminator).
  *
  * Note that some windows versions actually send multiword UTF-16 characters
- * instead of straight UCS-2. The linux nls routines however aren't able to
+ * instead of straight UTF16-2. The linux nls routines however aren't able to
  * deal with those characters properly. In the event that we get some of
  * those characters, they won't be translated properly.
  */
 int
-cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
+cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
 		 const struct nls_table *codepage, bool mapchar)
 {
 	int i, charlen, safelen;
@@ -190,13 +190,13 @@
 }
 
 /*
- * NAME:	cifs_strtoUCS()
+ * NAME:	cifs_strtoUTF16()
  *
  * FUNCTION:	Convert character string to unicode string
  *
  */
 int
-cifs_strtoUCS(__le16 *to, const char *from, int len,
+cifs_strtoUTF16(__le16 *to, const char *from, int len,
 	      const struct nls_table *codepage)
 {
 	int charlen;
@@ -206,7 +206,7 @@
 	for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
 		charlen = codepage->char2uni(from, len, &wchar_to);
 		if (charlen < 1) {
-			cERROR(1, "strtoUCS: char2uni of 0x%x returned %d",
+			cERROR(1, "strtoUTF16: char2uni of 0x%x returned %d",
 				*from, charlen);
 			/* A question mark */
 			wchar_to = 0x003f;
@@ -220,7 +220,8 @@
 }
 
 /*
- * cifs_strndup_from_ucs - copy a string from wire format to the local codepage
+ * cifs_strndup_from_utf16 - copy a string from wire format to the local
+ * codepage
  * @src - source string
  * @maxlen - don't walk past this many bytes in the source string
  * @is_unicode - is this a unicode string?
@@ -231,19 +232,19 @@
  * error.
  */
 char *
-cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
-	     const struct nls_table *codepage)
+cifs_strndup_from_utf16(const char *src, const int maxlen,
+			const bool is_unicode, const struct nls_table *codepage)
 {
 	int len;
 	char *dst;
 
 	if (is_unicode) {
-		len = cifs_ucs2_bytes((__le16 *) src, maxlen, codepage);
+		len = cifs_utf16_bytes((__le16 *) src, maxlen, codepage);
 		len += nls_nullsize(codepage);
 		dst = kmalloc(len, GFP_KERNEL);
 		if (!dst)
 			return NULL;
-		cifs_from_ucs2(dst, (__le16 *) src, len, maxlen, codepage,
+		cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
 			       false);
 	} else {
 		len = strnlen(src, maxlen);
@@ -264,7 +265,7 @@
  * names are little endian 16 bit Unicode on the wire
  */
 int
-cifsConvertToUCS(__le16 *target, const char *source, int srclen,
+cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
 		 const struct nls_table *cp, int mapChars)
 {
 	int i, j, charlen;
@@ -273,7 +274,7 @@
 	wchar_t tmp;
 
 	if (!mapChars)
-		return cifs_strtoUCS(target, source, PATH_MAX, cp);
+		return cifs_strtoUTF16(target, source, PATH_MAX, cp);
 
 	for (i = 0, j = 0; i < srclen; j++) {
 		src_char = source[i];
@@ -281,7 +282,7 @@
 		switch (src_char) {
 		case 0:
 			put_unaligned(0, &target[j]);
-			goto ctoUCS_out;
+			goto ctoUTF16_out;
 		case ':':
 			dst_char = cpu_to_le16(UNI_COLON);
 			break;
@@ -326,7 +327,7 @@
 		put_unaligned(dst_char, &target[j]);
 	}
 
-ctoUCS_out:
+ctoUTF16_out:
 	return i;
 }
 
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 6d02fd5..a513a54 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -74,16 +74,16 @@
 #endif				/* UNIUPR_NOLOWER */
 
 #ifdef __KERNEL__
-int cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
-		   const struct nls_table *codepage, bool mapchar);
-int cifs_ucs2_bytes(const __le16 *from, int maxbytes,
-		    const struct nls_table *codepage);
-int cifs_strtoUCS(__le16 *, const char *, int, const struct nls_table *);
-char *cifs_strndup_from_ucs(const char *src, const int maxlen,
-			    const bool is_unicode,
-			    const struct nls_table *codepage);
-extern int cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
-			const struct nls_table *cp, int mapChars);
+int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+		    const struct nls_table *codepage, bool mapchar);
+int cifs_utf16_bytes(const __le16 *from, int maxbytes,
+		     const struct nls_table *codepage);
+int cifs_strtoUTF16(__le16 *, const char *, int, const struct nls_table *);
+char *cifs_strndup_from_utf16(const char *src, const int maxlen,
+			      const bool is_unicode,
+			      const struct nls_table *codepage);
+extern int cifsConvertToUTF16(__le16 *target, const char *source, int maxlen,
+			      const struct nls_table *cp, int mapChars);
 
 #endif
 
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 72ddf23..c1b2544 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -909,6 +909,8 @@
 		umode_t group_mask = S_IRWXG;
 		umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
 
+		if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
+			return;
 		ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
 				GFP_KERNEL);
 		if (!ppace) {
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 5d9b9ac..63c460e 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -327,7 +327,7 @@
 	attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
 	attrptr->length = cpu_to_le16(2 * dlen);
 	blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-	cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
+	cifs_strtoUTF16((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
 
 	return 0;
 }
@@ -376,7 +376,7 @@
 					kmalloc(attrsize + 1, GFP_KERNEL);
 				if (!ses->domainName)
 						return -ENOMEM;
-				cifs_from_ucs2(ses->domainName,
+				cifs_from_utf16(ses->domainName,
 					(__le16 *)blobptr, attrsize, attrsize,
 					nls_cp, false);
 				break;
@@ -420,15 +420,20 @@
 	}
 
 	/* convert ses->user_name to unicode and uppercase */
-	len = strlen(ses->user_name);
+	len = ses->user_name ? strlen(ses->user_name) : 0;
 	user = kmalloc(2 + (len * 2), GFP_KERNEL);
 	if (user == NULL) {
 		cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
 		rc = -ENOMEM;
 		return rc;
 	}
-	len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
-	UniStrupr(user);
+
+	if (len) {
+		len = cifs_strtoUTF16((__le16 *)user, ses->user_name, len, nls_cp);
+		UniStrupr(user);
+	} else {
+		memset(user, '\0', 2);
+	}
 
 	rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
 				(char *)user, 2 * len);
@@ -448,8 +453,8 @@
 			rc = -ENOMEM;
 			return rc;
 		}
-		len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
-					nls_cp);
+		len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len,
+				      nls_cp);
 		rc =
 		crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
 					(char *)domain, 2 * len);
@@ -468,7 +473,7 @@
 			rc = -ENOMEM;
 			return rc;
 		}
-		len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
+		len = cifs_strtoUTF16((__le16 *)server, ses->serverName, len,
 					nls_cp);
 		rc =
 		crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ba53c1c..76e7d8b 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -879,6 +879,8 @@
 #define   CIFSSEC_MASK          0xB70B7 /* current flags supported if weak */
 #endif /* UPCALL */
 #else /* do not allow weak pw hash */
+#define   CIFSSEC_MUST_LANMAN	0
+#define   CIFSSEC_MUST_PLNTXT	0
 #ifdef CONFIG_CIFS_UPCALL
 #define   CIFSSEC_MASK          0x8F08F /* flags supported if no weak allowed */
 #else
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 6600aa2..8b7794c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -821,8 +821,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else { /* BB add path length overrun check */
@@ -893,8 +893,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->fileName, fileName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->fileName, fileName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {		/* BB improve check for buffer overruns BB */
@@ -938,8 +938,8 @@
 		return rc;
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-		name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, dirName,
-					 PATH_MAX, nls_codepage, remap);
+		name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, dirName,
+					      PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {		/* BB improve check for buffer overruns BB */
@@ -981,8 +981,8 @@
 		return rc;
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-		name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name,
-					    PATH_MAX, nls_codepage, remap);
+		name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name,
+					      PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {		/* BB improve check for buffer overruns BB */
@@ -1030,8 +1030,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, name,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, name,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -1197,8 +1197,8 @@
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		count = 1;      /* account for one byte pad to word boundary */
 		name_len =
-		   cifsConvertToUCS((__le16 *) (pSMB->fileName + 1),
-				    fileName, PATH_MAX, nls_codepage, remap);
+		   cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1),
+				      fileName, PATH_MAX, nls_codepage, remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
 	} else {                /* BB improve check for buffer overruns BB */
@@ -1304,8 +1304,8 @@
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		count = 1;	/* account for one byte pad to word boundary */
 		name_len =
-		    cifsConvertToUCS((__le16 *) (pSMB->fileName + 1),
-				     fileName, PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1),
+				       fileName, PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 		pSMB->NameLength = cpu_to_le16(name_len);
@@ -2649,16 +2649,16 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 		pSMB->OldFileName[name_len] = 0x04;	/* pad */
 	/* protocol requires ASCII signature byte on Unicode string */
 		pSMB->OldFileName[name_len + 1] = 0x00;
 		name_len2 =
-		    cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
-				     toName, PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
+				       toName, PATH_MAX, nls_codepage, remap);
 		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
 		name_len2 *= 2;	/* convert to bytes */
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -2738,10 +2738,12 @@
 	/* unicode only call */
 	if (target_name == NULL) {
 		sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid);
-		len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
+		len_of_str =
+			cifsConvertToUTF16((__le16 *)rename_info->target_name,
 					dummy_string, 24, nls_codepage, remap);
 	} else {
-		len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
+		len_of_str =
+			cifsConvertToUTF16((__le16 *)rename_info->target_name,
 					target_name, PATH_MAX, nls_codepage,
 					remap);
 	}
@@ -2795,17 +2797,17 @@
 	pSMB->Flags = cpu_to_le16(flags & COPY_TREE);
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-		name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName,
-					    fromName, PATH_MAX, nls_codepage,
-					    remap);
+		name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName,
+					      fromName, PATH_MAX, nls_codepage,
+					      remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
 		pSMB->OldFileName[name_len] = 0x04;     /* pad */
 		/* protocol requires ASCII signature byte on Unicode string */
 		pSMB->OldFileName[name_len + 1] = 0x00;
 		name_len2 =
-		    cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
-				toName, PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
+				       toName, PATH_MAX, nls_codepage, remap);
 		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
 		name_len2 *= 2; /* convert to bytes */
 	} else { 	/* BB improve the check for buffer overruns BB */
@@ -2861,9 +2863,9 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifs_strtoUCS((__le16 *) pSMB->FileName, fromName, PATH_MAX
-				  /* find define for this maxpathcomponent */
-				  , nls_codepage);
+		    cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName,
+				    /* find define for this maxpathcomponent */
+				    PATH_MAX, nls_codepage);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 
@@ -2885,9 +2887,9 @@
 	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len_target =
-		    cifs_strtoUCS((__le16 *) data_offset, toName, PATH_MAX
-				  /* find define for this maxpathcomponent */
-				  , nls_codepage);
+		    cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX
+				    /* find define for this maxpathcomponent */
+				    , nls_codepage);
 		name_len_target++;	/* trailing null */
 		name_len_target *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -2949,8 +2951,8 @@
 		return rc;
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-		name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, toName,
-					    PATH_MAX, nls_codepage, remap);
+		name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, toName,
+					      PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 
@@ -2972,8 +2974,8 @@
 	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len_target =
-		    cifsConvertToUCS((__le16 *) data_offset, fromName, PATH_MAX,
-				     nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) data_offset, fromName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len_target++;	/* trailing null */
 		name_len_target *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -3042,8 +3044,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 
@@ -3051,8 +3053,8 @@
 		pSMB->OldFileName[name_len] = 0x04;
 		pSMB->OldFileName[name_len + 1] = 0x00; /* pad */
 		name_len2 =
-		    cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
-				     toName, PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
+				       toName, PATH_MAX, nls_codepage, remap);
 		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
 		name_len2 *= 2;	/* convert to bytes */
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -3108,8 +3110,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifs_strtoUCS((__le16 *) pSMB->FileName, searchName,
-				  PATH_MAX, nls_codepage);
+			cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName,
+					PATH_MAX, nls_codepage);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -3166,8 +3168,8 @@
 				is_unicode = false;
 
 			/* BB FIXME investigate remapping reserved chars here */
-			*symlinkinfo = cifs_strndup_from_ucs(data_start, count,
-						    is_unicode, nls_codepage);
+			*symlinkinfo = cifs_strndup_from_utf16(data_start,
+					count, is_unicode, nls_codepage);
 			if (!*symlinkinfo)
 				rc = -ENOMEM;
 		}
@@ -3450,8 +3452,9 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-			cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-					 PATH_MAX, nls_codepage, remap);
+			cifsConvertToUTF16((__le16 *) pSMB->FileName,
+					   searchName, PATH_MAX, nls_codepage,
+					   remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
 		pSMB->FileName[name_len] = 0;
@@ -3537,8 +3540,8 @@
 		return rc;
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-			cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-				      PATH_MAX, nls_codepage, remap);
+			cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+					   PATH_MAX, nls_codepage, remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -3948,8 +3951,9 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-			cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-					PATH_MAX, nls_codepage, remap);
+			cifsConvertToUTF16((__le16 *) pSMB->FileName,
+					   searchName, PATH_MAX, nls_codepage,
+					   remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
 	} else {
@@ -4086,8 +4090,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -4255,8 +4259,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-				  PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -4344,8 +4348,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-				 PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+				       PATH_MAX, nls_codepage, remap);
 		/* We can not add the asterik earlier in case
 		it got remapped to 0xF03A as if it were part of the
 		directory name instead of a wildcard */
@@ -4656,8 +4660,9 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-			cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-					 PATH_MAX, nls_codepage, remap);
+			cifsConvertToUTF16((__le16 *) pSMB->FileName,
+					   searchName, PATH_MAX, nls_codepage,
+					   remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -4794,9 +4799,9 @@
 				rc = -ENOMEM;
 				goto parse_DFS_referrals_exit;
 			}
-			cifsConvertToUCS((__le16 *) tmp, searchName,
-					PATH_MAX, nls_codepage, remap);
-			node->path_consumed = cifs_ucs2_bytes(tmp,
+			cifsConvertToUTF16((__le16 *) tmp, searchName,
+					   PATH_MAX, nls_codepage, remap);
+			node->path_consumed = cifs_utf16_bytes(tmp,
 					le16_to_cpu(pSMBr->PathConsumed),
 					nls_codepage);
 			kfree(tmp);
@@ -4809,8 +4814,8 @@
 		/* copy DfsPath */
 		temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
 		max_len = data_end - temp;
-		node->path_name = cifs_strndup_from_ucs(temp, max_len,
-						      is_unicode, nls_codepage);
+		node->path_name = cifs_strndup_from_utf16(temp, max_len,
+						is_unicode, nls_codepage);
 		if (!node->path_name) {
 			rc = -ENOMEM;
 			goto parse_DFS_referrals_exit;
@@ -4819,8 +4824,8 @@
 		/* copy link target UNC */
 		temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
 		max_len = data_end - temp;
-		node->node_name = cifs_strndup_from_ucs(temp, max_len,
-						      is_unicode, nls_codepage);
+		node->node_name = cifs_strndup_from_utf16(temp, max_len,
+						is_unicode, nls_codepage);
 		if (!node->node_name)
 			rc = -ENOMEM;
 	}
@@ -4873,8 +4878,9 @@
 	if (ses->capabilities & CAP_UNICODE) {
 		pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->RequestFileName,
-				     searchName, PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->RequestFileName,
+				       searchName, PATH_MAX, nls_codepage,
+				       remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -5506,8 +5512,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -5796,8 +5802,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -5877,8 +5883,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-			ConvertToUCS((__le16 *) pSMB->fileName, fileName,
-				PATH_MAX, nls_codepage);
+			ConvertToUTF16((__le16 *) pSMB->fileName, fileName,
+				       PATH_MAX, nls_codepage);
 		name_len++;     /* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -6030,8 +6036,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -6123,8 +6129,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		list_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+				       PATH_MAX, nls_codepage, remap);
 		list_len++;	/* trailing null */
 		list_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
@@ -6301,8 +6307,8 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-				     PATH_MAX, nls_codepage, remap);
+		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+				       PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
 	} else {	/* BB improve the check for buffer overruns BB */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4666780..986709a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -38,6 +38,7 @@
 #include <asm/processor.h>
 #include <linux/inet.h>
 #include <linux/module.h>
+#include <keys/user-type.h>
 #include <net/ipv6.h>
 #include "cifspdu.h"
 #include "cifsglob.h"
@@ -225,74 +226,90 @@
 
 static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
 {
-	struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond;
+	struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)psecond;
 	struct smb_t2_rsp *pSMBt  = (struct smb_t2_rsp *)pTargetSMB;
-	char *data_area_of_target;
-	char *data_area_of_buf2;
+	char *data_area_of_tgt;
+	char *data_area_of_src;
 	int remaining;
-	unsigned int byte_count, total_in_buf;
-	__u16 total_data_size, total_in_buf2;
+	unsigned int byte_count, total_in_tgt;
+	__u16 tgt_total_cnt, src_total_cnt, total_in_src;
 
-	total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+	src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
+	tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
 
-	if (total_data_size !=
-	    get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount))
-		cFYI(1, "total data size of primary and secondary t2 differ");
+	if (tgt_total_cnt != src_total_cnt)
+		cFYI(1, "total data count of primary and secondary t2 differ "
+			"source=%hu target=%hu", src_total_cnt, tgt_total_cnt);
 
-	total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
+	total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
 
-	remaining = total_data_size - total_in_buf;
+	remaining = tgt_total_cnt - total_in_tgt;
 
-	if (remaining < 0)
+	if (remaining < 0) {
+		cFYI(1, "Server sent too much data. tgt_total_cnt=%hu "
+			"total_in_tgt=%hu", tgt_total_cnt, total_in_tgt);
 		return -EPROTO;
-
-	if (remaining == 0) /* nothing to do, ignore */
-		return 0;
-
-	total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount);
-	if (remaining < total_in_buf2) {
-		cFYI(1, "transact2 2nd response contains too much data");
 	}
 
+	if (remaining == 0) {
+		/* nothing to do, ignore */
+		cFYI(1, "no more data remains");
+		return 0;
+	}
+
+	total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
+	if (remaining < total_in_src)
+		cFYI(1, "transact2 2nd response contains too much data");
+
 	/* find end of first SMB data area */
-	data_area_of_target = (char *)&pSMBt->hdr.Protocol +
+	data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
 				get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
+
 	/* validate target area */
+	data_area_of_src = (char *)&pSMBs->hdr.Protocol +
+				get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
 
-	data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol +
-				get_unaligned_le16(&pSMB2->t2_rsp.DataOffset);
+	data_area_of_tgt += total_in_tgt;
 
-	data_area_of_target += total_in_buf;
-
-	/* copy second buffer into end of first buffer */
-	total_in_buf += total_in_buf2;
+	total_in_tgt += total_in_src;
 	/* is the result too big for the field? */
-	if (total_in_buf > USHRT_MAX)
+	if (total_in_tgt > USHRT_MAX) {
+		cFYI(1, "coalesced DataCount too large (%u)", total_in_tgt);
 		return -EPROTO;
-	put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount);
+	}
+	put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
 
 	/* fix up the BCC */
 	byte_count = get_bcc(pTargetSMB);
-	byte_count += total_in_buf2;
+	byte_count += total_in_src;
 	/* is the result too big for the field? */
-	if (byte_count > USHRT_MAX)
+	if (byte_count > USHRT_MAX) {
+		cFYI(1, "coalesced BCC too large (%u)", byte_count);
 		return -EPROTO;
+	}
 	put_bcc(byte_count, pTargetSMB);
 
 	byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
-	byte_count += total_in_buf2;
+	byte_count += total_in_src;
 	/* don't allow buffer to overflow */
-	if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
+	if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+		cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
 		return -ENOBUFS;
+	}
 	pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
 
-	memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
+	/* copy second buffer into end of first buffer */
+	memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
 
-	if (remaining == total_in_buf2) {
-		cFYI(1, "found the last secondary response");
-		return 0; /* we are done */
-	} else /* more responses to go */
+	if (remaining != total_in_src) {
+		/* more responses to go */
+		cFYI(1, "waiting for more secondary responses");
 		return 1;
+	}
+
+	/* we are done */
+	cFYI(1, "found the last secondary response");
+	return 0;
 }
 
 static void
@@ -1578,11 +1595,14 @@
 		}
 	}
 
-	if (vol->multiuser && !(vol->secFlg & CIFSSEC_MAY_KRB5)) {
-		cERROR(1, "Multiuser mounts currently require krb5 "
-			  "authentication!");
+#ifndef CONFIG_KEYS
+	/* Muliuser mounts require CONFIG_KEYS support */
+	if (vol->multiuser) {
+		cERROR(1, "Multiuser mounts require kernels with "
+			  "CONFIG_KEYS enabled.");
 		goto cifs_parse_mount_err;
 	}
+#endif
 
 	if (vol->UNCip == NULL)
 		vol->UNCip = &vol->UNC[2];
@@ -1981,10 +2001,16 @@
 			return 0;
 		break;
 	default:
+		/* NULL username means anonymous session */
+		if (ses->user_name == NULL) {
+			if (!vol->nullauth)
+				return 0;
+			break;
+		}
+
 		/* anything else takes username/password */
-		if (ses->user_name == NULL)
-			return 0;
-		if (strncmp(ses->user_name, vol->username,
+		if (strncmp(ses->user_name,
+			    vol->username ? vol->username : "",
 			    MAX_USERNAME_SIZE))
 			return 0;
 		if (strlen(vol->username) != 0 &&
@@ -2039,6 +2065,132 @@
 	cifs_put_tcp_session(server);
 }
 
+#ifdef CONFIG_KEYS
+
+/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
+#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
+
+/* Populate username and pw fields from keyring if possible */
+static int
+cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
+{
+	int rc = 0;
+	char *desc, *delim, *payload;
+	ssize_t len;
+	struct key *key;
+	struct TCP_Server_Info *server = ses->server;
+	struct sockaddr_in *sa;
+	struct sockaddr_in6 *sa6;
+	struct user_key_payload *upayload;
+
+	desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	/* try to find an address key first */
+	switch (server->dstaddr.ss_family) {
+	case AF_INET:
+		sa = (struct sockaddr_in *)&server->dstaddr;
+		sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
+		break;
+	case AF_INET6:
+		sa6 = (struct sockaddr_in6 *)&server->dstaddr;
+		sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
+		break;
+	default:
+		cFYI(1, "Bad ss_family (%hu)", server->dstaddr.ss_family);
+		rc = -EINVAL;
+		goto out_err;
+	}
+
+	cFYI(1, "%s: desc=%s", __func__, desc);
+	key = request_key(&key_type_logon, desc, "");
+	if (IS_ERR(key)) {
+		if (!ses->domainName) {
+			cFYI(1, "domainName is NULL");
+			rc = PTR_ERR(key);
+			goto out_err;
+		}
+
+		/* didn't work, try to find a domain key */
+		sprintf(desc, "cifs:d:%s", ses->domainName);
+		cFYI(1, "%s: desc=%s", __func__, desc);
+		key = request_key(&key_type_logon, desc, "");
+		if (IS_ERR(key)) {
+			rc = PTR_ERR(key);
+			goto out_err;
+		}
+	}
+
+	down_read(&key->sem);
+	upayload = key->payload.data;
+	if (IS_ERR_OR_NULL(upayload)) {
+		rc = PTR_ERR(key);
+		goto out_key_put;
+	}
+
+	/* find first : in payload */
+	payload = (char *)upayload->data;
+	delim = strnchr(payload, upayload->datalen, ':');
+	cFYI(1, "payload=%s", payload);
+	if (!delim) {
+		cFYI(1, "Unable to find ':' in payload (datalen=%d)",
+				upayload->datalen);
+		rc = -EINVAL;
+		goto out_key_put;
+	}
+
+	len = delim - payload;
+	if (len > MAX_USERNAME_SIZE || len <= 0) {
+		cFYI(1, "Bad value from username search (len=%ld)", len);
+		rc = -EINVAL;
+		goto out_key_put;
+	}
+
+	vol->username = kstrndup(payload, len, GFP_KERNEL);
+	if (!vol->username) {
+		cFYI(1, "Unable to allocate %ld bytes for username", len);
+		rc = -ENOMEM;
+		goto out_key_put;
+	}
+	cFYI(1, "%s: username=%s", __func__, vol->username);
+
+	len = key->datalen - (len + 1);
+	if (len > MAX_PASSWORD_SIZE || len <= 0) {
+		cFYI(1, "Bad len for password search (len=%ld)", len);
+		rc = -EINVAL;
+		kfree(vol->username);
+		vol->username = NULL;
+		goto out_key_put;
+	}
+
+	++delim;
+	vol->password = kstrndup(delim, len, GFP_KERNEL);
+	if (!vol->password) {
+		cFYI(1, "Unable to allocate %ld bytes for password", len);
+		rc = -ENOMEM;
+		kfree(vol->username);
+		vol->username = NULL;
+		goto out_key_put;
+	}
+
+out_key_put:
+	up_read(&key->sem);
+	key_put(key);
+out_err:
+	kfree(desc);
+	cFYI(1, "%s: returning %d", __func__, rc);
+	return rc;
+}
+#else /* ! CONFIG_KEYS */
+static inline int
+cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
+		   struct cifs_ses *ses __attribute__((unused)))
+{
+	return -ENOSYS;
+}
+#endif /* CONFIG_KEYS */
+
 static bool warned_on_ntlm;  /* globals init to false automatically */
 
 static struct cifs_ses *
@@ -2914,18 +3066,33 @@
 #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
 
 /*
- * Windows only supports a max of 60k reads. Default to that when posix
- * extensions aren't in force.
+ * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
+ * those values when posix extensions aren't in force. In actuality here, we
+ * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
+ * to be ok with the extra byte even though Windows doesn't send writes that
+ * are that large.
+ *
+ * Citation:
+ *
+ * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
  */
 #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
+#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
 
 static unsigned int
 cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
 {
 	__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
 	struct TCP_Server_Info *server = tcon->ses->server;
-	unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
-				CIFS_DEFAULT_IOSIZE;
+	unsigned int wsize;
+
+	/* start with specified wsize, or default */
+	if (pvolume_info->wsize)
+		wsize = pvolume_info->wsize;
+	else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
+		wsize = CIFS_DEFAULT_IOSIZE;
+	else
+		wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
 
 	/* can server support 24-bit write sizes? (via UNIX extensions) */
 	if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
@@ -3136,10 +3303,9 @@
 		return -EINVAL;
 
 	if (volume_info->nullauth) {
-		cFYI(1, "null user");
-		volume_info->username = kzalloc(1, GFP_KERNEL);
-		if (volume_info->username == NULL)
-			return -ENOMEM;
+		cFYI(1, "Anonymous login");
+		kfree(volume_info->username);
+		volume_info->username = NULL;
 	} else if (volume_info->username) {
 		/* BB fixme parse for domain name here */
 		cFYI(1, "Username: %s", volume_info->username);
@@ -3478,7 +3644,7 @@
 	if (ses->capabilities & CAP_UNICODE) {
 		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
 		length =
-		    cifs_strtoUCS((__le16 *) bcc_ptr, tree,
+		    cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
 			6 /* max utf8 char length in bytes */ *
 			(/* server len*/ + 256 /* share len */), nls_codepage);
 		bcc_ptr += 2 * length;	/* convert num 16 bit words to bytes */
@@ -3533,7 +3699,7 @@
 
 		/* mostly informational -- no need to fail on error here */
 		kfree(tcon->nativeFileSystem);
-		tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr,
+		tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
 						      bytes_left, is_unicode,
 						      nls_codepage);
 
@@ -3657,16 +3823,38 @@
 	return rc;
 }
 
+static int
+cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses)
+{
+	switch (ses->server->secType) {
+	case Kerberos:
+		vol->secFlg = CIFSSEC_MUST_KRB5;
+		return 0;
+	case NTLMv2:
+		vol->secFlg = CIFSSEC_MUST_NTLMV2;
+		break;
+	case NTLM:
+		vol->secFlg = CIFSSEC_MUST_NTLM;
+		break;
+	case RawNTLMSSP:
+		vol->secFlg = CIFSSEC_MUST_NTLMSSP;
+		break;
+	case LANMAN:
+		vol->secFlg = CIFSSEC_MUST_LANMAN;
+		break;
+	}
+
+	return cifs_set_cifscreds(vol, ses);
+}
+
 static struct cifs_tcon *
 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
 {
+	int rc;
 	struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
 	struct cifs_ses *ses;
 	struct cifs_tcon *tcon = NULL;
 	struct smb_vol *vol_info;
-	char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */
-			   /* We used to have this as MAX_USERNAME which is   */
-			   /* way too big now (256 instead of 32) */
 
 	vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL);
 	if (vol_info == NULL) {
@@ -3674,8 +3862,6 @@
 		goto out;
 	}
 
-	snprintf(username, sizeof(username), "krb50x%x", fsuid);
-	vol_info->username = username;
 	vol_info->local_nls = cifs_sb->local_nls;
 	vol_info->linux_uid = fsuid;
 	vol_info->cred_uid = fsuid;
@@ -3685,8 +3871,11 @@
 	vol_info->local_lease = master_tcon->local_lease;
 	vol_info->no_linux_ext = !master_tcon->unix_ext;
 
-	/* FIXME: allow for other secFlg settings */
-	vol_info->secFlg = CIFSSEC_MUST_KRB5;
+	rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
+	if (rc) {
+		tcon = ERR_PTR(rc);
+		goto out;
+	}
 
 	/* get a reference for the same TCP session */
 	spin_lock(&cifs_tcp_ses_lock);
@@ -3709,6 +3898,8 @@
 	if (ses->capabilities & CAP_UNIX)
 		reset_cifs_unix_caps(0, tcon, NULL, vol_info);
 out:
+	kfree(vol_info->username);
+	kfree(vol_info->password);
 	kfree(vol_info);
 
 	return tcon;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index a090bbe..e2bbc68 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -647,10 +647,11 @@
 
 		name.name = scratch_buf;
 		name.len =
-			cifs_from_ucs2((char *)name.name, (__le16 *)de.name,
-				       UNICODE_NAME_MAX,
-				       min(de.namelen, (size_t)max_len), nlt,
-				       cifs_sb->mnt_cifs_flags &
+			cifs_from_utf16((char *)name.name, (__le16 *)de.name,
+					UNICODE_NAME_MAX,
+					min_t(size_t, de.namelen,
+					      (size_t)max_len), nlt,
+					cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 		name.len -= nls_nullsize(nlt);
 	} else {
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 4ec3ee9..d85efad 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -167,16 +167,16 @@
 	int bytes_ret = 0;
 
 	/* Copy OS version */
-	bytes_ret = cifs_strtoUCS((__le16 *)bcc_ptr, "Linux version ", 32,
-				  nls_cp);
+	bytes_ret = cifs_strtoUTF16((__le16 *)bcc_ptr, "Linux version ", 32,
+				    nls_cp);
 	bcc_ptr += 2 * bytes_ret;
-	bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, init_utsname()->release,
-				  32, nls_cp);
+	bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, init_utsname()->release,
+				    32, nls_cp);
 	bcc_ptr += 2 * bytes_ret;
 	bcc_ptr += 2; /* trailing null */
 
-	bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
-				  32, nls_cp);
+	bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
+				    32, nls_cp);
 	bcc_ptr += 2 * bytes_ret;
 	bcc_ptr += 2; /* trailing null */
 
@@ -197,8 +197,8 @@
 		*(bcc_ptr+1) = 0;
 		bytes_ret = 0;
 	} else
-		bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
-					  256, nls_cp);
+		bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
+					    256, nls_cp);
 	bcc_ptr += 2 * bytes_ret;
 	bcc_ptr += 2;  /* account for null terminator */
 
@@ -226,8 +226,8 @@
 		*bcc_ptr = 0;
 		*(bcc_ptr+1) = 0;
 	} else {
-		bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name,
-					  MAX_USERNAME_SIZE, nls_cp);
+		bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->user_name,
+					    MAX_USERNAME_SIZE, nls_cp);
 	}
 	bcc_ptr += 2 * bytes_ret;
 	bcc_ptr += 2; /* account for null termination */
@@ -287,7 +287,7 @@
 	cFYI(1, "bleft %d", bleft);
 
 	kfree(ses->serverOS);
-	ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
+	ses->serverOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
 	cFYI(1, "serverOS=%s", ses->serverOS);
 	len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
 	data += len;
@@ -296,7 +296,7 @@
 		return;
 
 	kfree(ses->serverNOS);
-	ses->serverNOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
+	ses->serverNOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
 	cFYI(1, "serverNOS=%s", ses->serverNOS);
 	len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
 	data += len;
@@ -305,7 +305,7 @@
 		return;
 
 	kfree(ses->serverDomain);
-	ses->serverDomain = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
+	ses->serverDomain = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
 	cFYI(1, "serverDomain=%s", ses->serverDomain);
 
 	return;
@@ -502,8 +502,8 @@
 		tmp += 2;
 	} else {
 		int len;
-		len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
-				    MAX_USERNAME_SIZE, nls_cp);
+		len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
+				      MAX_USERNAME_SIZE, nls_cp);
 		len *= 2; /* unicode is 2 bytes each */
 		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
 		sec_blob->DomainName.Length = cpu_to_le16(len);
@@ -518,8 +518,8 @@
 		tmp += 2;
 	} else {
 		int len;
-		len = cifs_strtoUCS((__le16 *)tmp, ses->user_name,
-				    MAX_USERNAME_SIZE, nls_cp);
+		len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
+				      MAX_USERNAME_SIZE, nls_cp);
 		len *= 2; /* unicode is 2 bytes each */
 		sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
 		sec_blob->UserName.Length = cpu_to_le16(len);
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 80d8508..d5cd9aa 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -213,7 +213,7 @@
 
 	/* Password cannot be longer than 128 characters */
 	if (passwd) /* Password must be converted to NT unicode */
-		len = cifs_strtoUCS(wpwd, passwd, 128, codepage);
+		len = cifs_strtoUTF16(wpwd, passwd, 128, codepage);
 	else {
 		len = 0;
 		*wpwd = 0; /* Ensure string is null terminated */
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index f65d445..ef023ee 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -540,7 +540,7 @@
  * debugfs_print_regs32 - use seq_print to describe a set of registers
  * @s: the seq_file structure being used to generate output
  * @regs: an array if struct debugfs_reg32 structures
- * @mregs: the length of the above array
+ * @nregs: the length of the above array
  * @base: the base address to be used in reading the registers
  * @prefix: a string to be prefixed to every output line
  *
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 1089f76..2de655f 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -77,10 +77,11 @@
 		flags = flags & EXT2_FL_USER_MODIFIABLE;
 		flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
 		ei->i_flags = flags;
-		mutex_unlock(&inode->i_mutex);
 
 		ext2_set_inode_flags(inode);
 		inode->i_ctime = CURRENT_TIME_SEC;
+		mutex_unlock(&inode->i_mutex);
+
 		mark_inode_dirty(inode);
 setflags_out:
 		mnt_drop_write_file(filp);
@@ -88,20 +89,29 @@
 	}
 	case EXT2_IOC_GETVERSION:
 		return put_user(inode->i_generation, (int __user *) arg);
-	case EXT2_IOC_SETVERSION:
+	case EXT2_IOC_SETVERSION: {
+		__u32 generation;
+
 		if (!inode_owner_or_capable(inode))
 			return -EPERM;
 		ret = mnt_want_write_file(filp);
 		if (ret)
 			return ret;
-		if (get_user(inode->i_generation, (int __user *) arg)) {
+		if (get_user(generation, (int __user *) arg)) {
 			ret = -EFAULT;
-		} else {
-			inode->i_ctime = CURRENT_TIME_SEC;
-			mark_inode_dirty(inode);
+			goto setversion_out;
 		}
+
+		mutex_lock(&inode->i_mutex);
+		inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_generation = generation;
+		mutex_unlock(&inode->i_mutex);
+
+		mark_inode_dirty(inode);
+setversion_out:
 		mnt_drop_write_file(filp);
 		return ret;
+	}
 	case EXT2_IOC_GETRSVSZ:
 		if (test_opt(inode->i_sb, RESERVATION)
 			&& S_ISREG(inode->i_mode)
diff --git a/fs/inode.c b/fs/inode.c
index 4fa4f09..fb10d86 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -322,9 +322,6 @@
 void set_nlink(struct inode *inode, unsigned int nlink)
 {
 	if (!nlink) {
-		printk_ratelimited(KERN_INFO
-			"set_nlink() clearing i_nlink on %s inode %li\n",
-			inode->i_sb->s_type->name, inode->i_ino);
 		clear_nlink(inode);
 	} else {
 		/* Yes, some filesystems do change nlink from zero to one */
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 5d1a00a..05f0754 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -453,8 +453,6 @@
  *
  * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
  *
- * Called with the journal lock held.
- *
  * This is the only part of the journaling code which really needs to be
  * aware of transaction aborts.  Checkpointing involves writing to the
  * main filesystem area rather than to the journal, so it can proceed
@@ -472,13 +470,14 @@
 	if (is_journal_aborted(journal))
 		return 1;
 
-	/* OK, work out the oldest transaction remaining in the log, and
+	/*
+	 * OK, work out the oldest transaction remaining in the log, and
 	 * the log block it starts at.
 	 *
 	 * If the log is now empty, we need to work out which is the
 	 * next transaction ID we will write, and where it will
-	 * start. */
-
+	 * start.
+	 */
 	spin_lock(&journal->j_state_lock);
 	spin_lock(&journal->j_list_lock);
 	transaction = journal->j_checkpoint_transactions;
@@ -504,7 +503,25 @@
 		spin_unlock(&journal->j_state_lock);
 		return 1;
 	}
+	spin_unlock(&journal->j_state_lock);
 
+	/*
+	 * We need to make sure that any blocks that were recently written out
+	 * --- perhaps by log_do_checkpoint() --- are flushed out before we
+	 * drop the transactions from the journal. It's unlikely this will be
+	 * necessary, especially with an appropriately sized journal, but we
+	 * need this to guarantee correctness.  Fortunately
+	 * cleanup_journal_tail() doesn't get called all that often.
+	 */
+	if (journal->j_flags & JFS_BARRIER)
+		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+
+	spin_lock(&journal->j_state_lock);
+	if (!tid_gt(first_tid, journal->j_tail_sequence)) {
+		spin_unlock(&journal->j_state_lock);
+		/* Someone else cleaned up journal so return 0 */
+		return 0;
+	}
 	/* OK, update the superblock to recover the freed space.
 	 * Physical blocks come first: have we wrapped beyond the end of
 	 * the log?  */
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 5b43e96..008bf06 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -20,6 +20,7 @@
 #include <linux/fs.h>
 #include <linux/jbd.h>
 #include <linux/errno.h>
+#include <linux/blkdev.h>
 #endif
 
 /*
@@ -263,6 +264,9 @@
 	err2 = sync_blockdev(journal->j_fs_dev);
 	if (!err)
 		err = err2;
+	/* Flush disk caches to get replayed data on the permanent storage */
+	if (journal->j_flags & JFS_BARRIER)
+		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
 
 	return err;
 }
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index d76ca6a..121f77c 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -77,6 +77,8 @@
 		steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
 		guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
 		guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
+		sum += kstat_cpu_irqs_sum(i);
+		sum += arch_irq_stat_cpu(i);
 
 		for (j = 0; j < NR_SOFTIRQS; j++) {
 			unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e418c5a..7dcd2a2 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -518,6 +518,9 @@
 		if (!page)
 			continue;
 
+		if (PageReserved(page))
+			continue;
+
 		/* Clear accessed and referenced bits. */
 		ptep_test_and_clear_young(vma, addr, pte);
 		ClearPageReferenced(page);
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 2bfd987..6b00954 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -179,47 +179,33 @@
 	struct qnx4_inode_entry *rootdir;
 	int rd, rl;
 	int i, j;
-	int found = 0;
 
-	if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') {
+	if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/')
 		return "no qnx4 filesystem (no root dir).";
-	} else {
-		QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
-		rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
-		rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
-		for (j = 0; j < rl; j++) {
-			bh = sb_bread(sb, rd + j);	/* root dir, first block */
-			if (bh == NULL) {
-				return "unable to read root entry.";
-			}
-			for (i = 0; i < QNX4_INODES_PER_BLOCK; i++) {
-				rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE);
-				if (rootdir->di_fname != NULL) {
-					QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
-					if (!strcmp(rootdir->di_fname,
-						    QNX4_BMNAME)) {
-						found = 1;
-						qnx4_sb(sb)->BitMap = kmemdup(rootdir,
-									      sizeof(struct qnx4_inode_entry),
-									      GFP_KERNEL);
-						if (!qnx4_sb(sb)->BitMap) {
-							brelse (bh);
-							return "not enough memory for bitmap inode";
-						}/* keep bitmap inode known */
-						break;
-					}
-				}
-			}
+	QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
+	rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
+	rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
+	for (j = 0; j < rl; j++) {
+		bh = sb_bread(sb, rd + j);	/* root dir, first block */
+		if (bh == NULL)
+			return "unable to read root entry.";
+		rootdir = (struct qnx4_inode_entry *) bh->b_data;
+		for (i = 0; i < QNX4_INODES_PER_BLOCK; i++, rootdir++) {
+			QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
+			if (strcmp(rootdir->di_fname, QNX4_BMNAME) != 0)
+				continue;
+			qnx4_sb(sb)->BitMap = kmemdup(rootdir,
+						      sizeof(struct qnx4_inode_entry),
+						      GFP_KERNEL);
 			brelse(bh);
-			if (found != 0) {
-				break;
-			}
+			if (!qnx4_sb(sb)->BitMap)
+				return "not enough memory for bitmap inode";
+			/* keep bitmap inode known */
+			return NULL;
 		}
-		if (found == 0) {
-			return "bitmap file not found.";
-		}
+		brelse(bh);
 	}
-	return NULL;
+	return "bitmap file not found.";
 }
 
 static int qnx4_fill_super(struct super_block *s, void *data, int silent)
@@ -270,7 +256,7 @@
 	if (IS_ERR(root)) {
 		printk(KERN_ERR "qnx4: get inode failed\n");
 		ret = PTR_ERR(root);
- 		goto out;
+ 		goto outb;
  	}
 
 	ret = -ENOMEM;
@@ -283,6 +269,8 @@
 
       outi:
 	iput(root);
+      outb:
+	kfree(qs->BitMap);
       out:
 	brelse(bh);
       outnobh:
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 5ec59b2..4674197 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2125,6 +2125,8 @@
 		mutex_unlock(&dqopt->dqio_mutex);
 		goto out_file_init;
 	}
+	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
+		dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
 	mutex_unlock(&dqopt->dqio_mutex);
 	spin_lock(&dq_state_lock);
 	dqopt->flags |= dquot_state_flag(flags, type);
@@ -2464,7 +2466,7 @@
 	spin_lock(&dq_data_lock);
 	ii->dqi_bgrace = mi->dqi_bgrace;
 	ii->dqi_igrace = mi->dqi_igrace;
-	ii->dqi_flags = mi->dqi_flags & DQF_MASK;
+	ii->dqi_flags = mi->dqi_flags & DQF_GETINFO_MASK;
 	ii->dqi_valid = IIF_ALL;
 	spin_unlock(&dq_data_lock);
 	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
@@ -2490,8 +2492,8 @@
 	if (ii->dqi_valid & IIF_IGRACE)
 		mi->dqi_igrace = ii->dqi_igrace;
 	if (ii->dqi_valid & IIF_FLAGS)
-		mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
-				(ii->dqi_flags & DQF_MASK);
+		mi->dqi_flags = (mi->dqi_flags & ~DQF_SETINFO_MASK) |
+				(ii->dqi_flags & DQF_SETINFO_MASK);
 	spin_unlock(&dq_data_lock);
 	mark_info_dirty(sb, type);
 	/* Force write to disk */
diff --git a/fs/super.c b/fs/super.c
index de41e1e..6015c02 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1186,6 +1186,8 @@
 			printk(KERN_ERR
 				"VFS:Filesystem freeze failed\n");
 			sb->s_frozen = SB_UNFROZEN;
+			smp_wmb();
+			wake_up(&sb->s_wait_unfrozen);
 			deactivate_locked_super(sb);
 			return ret;
 		}
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index fc1575f..5b5af0d 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -58,6 +58,7 @@
 #define METHOD_NAME__PRT        "_PRT"
 #define METHOD_NAME__CRS        "_CRS"
 #define METHOD_NAME__PRS        "_PRS"
+#define METHOD_NAME__AEI        "_AEI"
 #define METHOD_NAME__PRW        "_PRW"
 #define METHOD_NAME__SRS        "_SRS"
 
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index 1739726..451823c 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -15,6 +15,7 @@
 extern int node_to_pxm(int);
 extern void __acpi_map_pxm_to_node(int, int);
 extern int acpi_map_pxm_to_node(int);
+extern unsigned char acpi_srat_revision;
 
 #endif				/* CONFIG_ACPI_NUMA */
 #endif				/* __ACP_NUMA_H */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 83062ed..2fe8639 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -238,13 +238,6 @@
 /*
  * Miscellaneous
  */
-acpi_status
-acpi_os_validate_address(u8 space_id, acpi_physical_address address,
-			 acpi_size length, char *name);
-acpi_status
-acpi_os_invalidate_address(u8 space_id, acpi_physical_address address,
-			 acpi_size length);
-
 u64 acpi_os_get_timer(void);
 
 acpi_status acpi_os_signal(u32 function, void *info);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 7762bc2..a28da35 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20110623
+#define ACPI_CA_VERSION                 0x20120111
 
 #include "actypes.h"
 #include "actbl.h"
@@ -74,6 +74,7 @@
 extern u32 acpi_current_gpe_count;
 extern struct acpi_table_fadt acpi_gbl_FADT;
 extern u8 acpi_gbl_system_awake_and_running;
+extern u8 acpi_gbl_reduced_hardware;	/* ACPI 5.0 */
 
 extern u32 acpi_rsdt_forced;
 /*
@@ -111,6 +112,11 @@
 
 acpi_status acpi_remove_interface(acpi_string interface_name);
 
+u32
+acpi_check_address_range(acpi_adr_space_type space_id,
+			 acpi_physical_address address,
+			 acpi_size length, u8 warn);
+
 /*
  * ACPI Memory management
  */
@@ -276,12 +282,23 @@
 acpi_status acpi_install_interface_handler(acpi_interface_handler handler);
 
 /*
- * Event interfaces
+ * Global Lock interfaces
  */
 acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle);
 
 acpi_status acpi_release_global_lock(u32 handle);
 
+/*
+ * Interfaces to AML mutex objects
+ */
+acpi_status
+acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout);
+
+acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname);
+
+/*
+ * Fixed Event interfaces
+ */
 acpi_status acpi_enable_event(u32 event, u32 flags);
 
 acpi_status acpi_disable_event(u32 event, u32 flags);
@@ -291,7 +308,7 @@
 acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
 
 /*
- * GPE Interfaces
+ * General Purpose Event (GPE) Interfaces
  */
 acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
 
@@ -346,6 +363,10 @@
 #endif
 
 acpi_status
+acpi_get_event_resources(acpi_handle device_handle,
+			 struct acpi_buffer *ret_buffer);
+
+acpi_status
 acpi_walk_resources(acpi_handle device,
 		    char *name,
 		    acpi_walk_resource_callback user_function, void *context);
@@ -360,6 +381,11 @@
 acpi_resource_to_address64(struct acpi_resource *resource,
 			   struct acpi_resource_address64 *out);
 
+acpi_status
+acpi_buffer_to_resource(u8 *aml_buffer,
+			u16 aml_buffer_length,
+			struct acpi_resource **resource_ptr);
+
 /*
  * Hardware (ACPI device) interfaces
  */
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 0a66cc4..3506e39 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -61,11 +61,14 @@
 #define ACPI_WRITE_COMBINING_MEMORY     (u8) 0x02
 #define ACPI_PREFETCHABLE_MEMORY        (u8) 0x03
 
+/*! [Begin] no source code translation */
 /*
  * IO Attributes
- * The ISA IO ranges are:     n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh.
- * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh.
+ * The ISA IO ranges are:     n000-n0FFh,  n400-n4FFh, n800-n8FFh, nC00-nCFFh.
+ * The non-ISA IO ranges are: n100-n3FFh,  n500-n7FFh, n900-nBFFh, nCD0-nFFFh.
  */
+/*! [End] no source code translation !*/
+
 #define ACPI_NON_ISA_ONLY_RANGES        (u8) 0x01
 #define ACPI_ISA_ONLY_RANGES            (u8) 0x02
 #define ACPI_ENTIRE_RANGE               (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES)
@@ -81,16 +84,26 @@
 #define ACPI_DECODE_16                  (u8) 0x01	/* 16-bit IO address decode */
 
 /*
- * IRQ Attributes
+ * Interrupt attributes - used in multiple descriptors
  */
+
+/* Triggering */
+
 #define ACPI_LEVEL_SENSITIVE            (u8) 0x00
 #define ACPI_EDGE_SENSITIVE             (u8) 0x01
 
+/* Polarity */
+
 #define ACPI_ACTIVE_HIGH                (u8) 0x00
 #define ACPI_ACTIVE_LOW                 (u8) 0x01
+#define ACPI_ACTIVE_BOTH                (u8) 0x02
+
+/* Sharing */
 
 #define ACPI_EXCLUSIVE                  (u8) 0x00
 #define ACPI_SHARED                     (u8) 0x01
+#define ACPI_EXCLUSIVE_AND_WAKE         (u8) 0x02
+#define ACPI_SHARED_AND_WAKE            (u8) 0x03
 
 /*
  * DMA Attributes
@@ -127,6 +140,8 @@
 #define ACPI_POS_DECODE                 (u8) 0x00
 #define ACPI_SUB_DECODE                 (u8) 0x01
 
+/* Producer/Consumer */
+
 #define ACPI_PRODUCER                   (u8) 0x00
 #define ACPI_CONSUMER                   (u8) 0x01
 
@@ -192,6 +207,21 @@
 	u8 address_length;
 };
 
+struct acpi_resource_fixed_dma {
+	u16 request_lines;
+	u16 channels;
+	u8 width;
+};
+
+/* Values for Width field above */
+
+#define ACPI_DMA_WIDTH8                         0
+#define ACPI_DMA_WIDTH16                        1
+#define ACPI_DMA_WIDTH32                        2
+#define ACPI_DMA_WIDTH64                        3
+#define ACPI_DMA_WIDTH128                       4
+#define ACPI_DMA_WIDTH256                       5
+
 struct acpi_resource_vendor {
 	u16 byte_length;
 	u8 byte_data[1];
@@ -329,6 +359,166 @@
 	u64 address;
 };
 
+struct acpi_resource_gpio {
+	u8 revision_id;
+	u8 connection_type;
+	u8 producer_consumer;	/* For values, see Producer/Consumer above */
+	u8 pin_config;
+	u8 sharable;		/* For values, see Interrupt Attributes above */
+	u8 io_restriction;
+	u8 triggering;		/* For values, see Interrupt Attributes above */
+	u8 polarity;		/* For values, see Interrupt Attributes above */
+	u16 drive_strength;
+	u16 debounce_timeout;
+	u16 pin_table_length;
+	u16 vendor_length;
+	struct acpi_resource_source resource_source;
+	u16 *pin_table;
+	u8 *vendor_data;
+};
+
+/* Values for GPIO connection_type field above */
+
+#define ACPI_RESOURCE_GPIO_TYPE_INT             0
+#define ACPI_RESOURCE_GPIO_TYPE_IO              1
+
+/* Values for pin_config field above */
+
+#define ACPI_PIN_CONFIG_DEFAULT                 0
+#define ACPI_PIN_CONFIG_PULLUP                  1
+#define ACPI_PIN_CONFIG_PULLDOWN                2
+#define ACPI_PIN_CONFIG_NOPULL                  3
+
+/* Values for io_restriction field above */
+
+#define ACPI_IO_RESTRICT_NONE                   0
+#define ACPI_IO_RESTRICT_INPUT                  1
+#define ACPI_IO_RESTRICT_OUTPUT                 2
+#define ACPI_IO_RESTRICT_NONE_PRESERVE          3
+
+/* Common structure for I2C, SPI, and UART serial descriptors */
+
+#define ACPI_RESOURCE_SERIAL_COMMON \
+	u8                                      revision_id; \
+	u8                                      type; \
+	u8                                      producer_consumer;   /* For values, see Producer/Consumer above */\
+	u8                                      slave_mode; \
+	u8                                      type_revision_id; \
+	u16                                     type_data_length; \
+	u16                                     vendor_length; \
+	struct acpi_resource_source             resource_source; \
+	u8                                      *vendor_data;
+
+struct acpi_resource_common_serialbus {
+ACPI_RESOURCE_SERIAL_COMMON};
+
+/* Values for the Type field above */
+
+#define ACPI_RESOURCE_SERIAL_TYPE_I2C           1
+#define ACPI_RESOURCE_SERIAL_TYPE_SPI           2
+#define ACPI_RESOURCE_SERIAL_TYPE_UART          3
+
+/* Values for slave_mode field above */
+
+#define ACPI_CONTROLLER_INITIATED               0
+#define ACPI_DEVICE_INITIATED                   1
+
+struct acpi_resource_i2c_serialbus {
+	ACPI_RESOURCE_SERIAL_COMMON u8 access_mode;
+	u16 slave_address;
+	u32 connection_speed;
+};
+
+/* Values for access_mode field above */
+
+#define ACPI_I2C_7BIT_MODE                      0
+#define ACPI_I2C_10BIT_MODE                     1
+
+struct acpi_resource_spi_serialbus {
+	ACPI_RESOURCE_SERIAL_COMMON u8 wire_mode;
+	u8 device_polarity;
+	u8 data_bit_length;
+	u8 clock_phase;
+	u8 clock_polarity;
+	u16 device_selection;
+	u32 connection_speed;
+};
+
+/* Values for wire_mode field above */
+
+#define ACPI_SPI_4WIRE_MODE                     0
+#define ACPI_SPI_3WIRE_MODE                     1
+
+/* Values for device_polarity field above */
+
+#define ACPI_SPI_ACTIVE_LOW                     0
+#define ACPI_SPI_ACTIVE_HIGH                    1
+
+/* Values for clock_phase field above */
+
+#define ACPI_SPI_FIRST_PHASE                    0
+#define ACPI_SPI_SECOND_PHASE                   1
+
+/* Values for clock_polarity field above */
+
+#define ACPI_SPI_START_LOW                      0
+#define ACPI_SPI_START_HIGH                     1
+
+struct acpi_resource_uart_serialbus {
+	ACPI_RESOURCE_SERIAL_COMMON u8 endian;
+	u8 data_bits;
+	u8 stop_bits;
+	u8 flow_control;
+	u8 parity;
+	u8 lines_enabled;
+	u16 rx_fifo_size;
+	u16 tx_fifo_size;
+	u32 default_baud_rate;
+};
+
+/* Values for Endian field above */
+
+#define ACPI_UART_LITTLE_ENDIAN                 0
+#define ACPI_UART_BIG_ENDIAN                    1
+
+/* Values for data_bits field above */
+
+#define ACPI_UART_5_DATA_BITS                   0
+#define ACPI_UART_6_DATA_BITS                   1
+#define ACPI_UART_7_DATA_BITS                   2
+#define ACPI_UART_8_DATA_BITS                   3
+#define ACPI_UART_9_DATA_BITS                   4
+
+/* Values for stop_bits field above */
+
+#define ACPI_UART_NO_STOP_BITS                  0
+#define ACPI_UART_1_STOP_BIT                    1
+#define ACPI_UART_1P5_STOP_BITS                 2
+#define ACPI_UART_2_STOP_BITS                   3
+
+/* Values for flow_control field above */
+
+#define ACPI_UART_FLOW_CONTROL_NONE             0
+#define ACPI_UART_FLOW_CONTROL_HW               1
+#define ACPI_UART_FLOW_CONTROL_XON_XOFF         2
+
+/* Values for Parity field above */
+
+#define ACPI_UART_PARITY_NONE                   0
+#define ACPI_UART_PARITY_EVEN                   1
+#define ACPI_UART_PARITY_ODD                    2
+#define ACPI_UART_PARITY_MARK                   3
+#define ACPI_UART_PARITY_SPACE                  4
+
+/* Values for lines_enabled bitfield above */
+
+#define ACPI_UART_CARRIER_DETECT                (1<<2)
+#define ACPI_UART_RING_INDICATOR                (1<<3)
+#define ACPI_UART_DATA_SET_READY                (1<<4)
+#define ACPI_UART_DATA_TERMINAL_READY           (1<<5)
+#define ACPI_UART_CLEAR_TO_SEND                 (1<<6)
+#define ACPI_UART_REQUEST_TO_SEND               (1<<7)
+
 /* ACPI_RESOURCE_TYPEs */
 
 #define ACPI_RESOURCE_TYPE_IRQ                  0
@@ -348,7 +538,10 @@
 #define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64   14	/* ACPI 3.0 */
 #define ACPI_RESOURCE_TYPE_EXTENDED_IRQ         15
 #define ACPI_RESOURCE_TYPE_GENERIC_REGISTER     16
-#define ACPI_RESOURCE_TYPE_MAX                  16
+#define ACPI_RESOURCE_TYPE_GPIO                 17	/* ACPI 5.0 */
+#define ACPI_RESOURCE_TYPE_FIXED_DMA            18	/* ACPI 5.0 */
+#define ACPI_RESOURCE_TYPE_SERIAL_BUS           19	/* ACPI 5.0 */
+#define ACPI_RESOURCE_TYPE_MAX                  19
 
 /* Master union for resource descriptors */
 
@@ -358,6 +551,7 @@
 	struct acpi_resource_start_dependent start_dpf;
 	struct acpi_resource_io io;
 	struct acpi_resource_fixed_io fixed_io;
+	struct acpi_resource_fixed_dma fixed_dma;
 	struct acpi_resource_vendor vendor;
 	struct acpi_resource_vendor_typed vendor_typed;
 	struct acpi_resource_end_tag end_tag;
@@ -370,6 +564,11 @@
 	struct acpi_resource_extended_address64 ext_address64;
 	struct acpi_resource_extended_irq extended_irq;
 	struct acpi_resource_generic_register generic_reg;
+	struct acpi_resource_gpio gpio;
+	struct acpi_resource_i2c_serialbus i2c_serial_bus;
+	struct acpi_resource_spi_serialbus spi_serial_bus;
+	struct acpi_resource_uart_serialbus uart_serial_bus;
+	struct acpi_resource_common_serialbus common_serial_bus;
 
 	/* Common fields */
 
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index f138028..8e1b92f 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -255,6 +255,8 @@
 	struct acpi_generic_address xpm_timer_block;	/* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
 	struct acpi_generic_address xgpe0_block;	/* 64-bit Extended General Purpose Event 0 Reg Blk address */
 	struct acpi_generic_address xgpe1_block;	/* 64-bit Extended General Purpose Event 1 Reg Blk address */
+	struct acpi_generic_address sleep_control;	/* 64-bit Sleep Control register */
+	struct acpi_generic_address sleep_status;	/* 64-bit Sleep Status register */
 };
 
 /* Masks for FADT Boot Architecture Flags (boot_flags) */
@@ -264,6 +266,7 @@
 #define ACPI_FADT_NO_VGA            (1<<2)	/* 02: [V4] It is not safe to probe for VGA hardware */
 #define ACPI_FADT_NO_MSI            (1<<3)	/* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */
 #define ACPI_FADT_NO_ASPM           (1<<4)	/* 04: [V4] PCIe ASPM control must not be enabled */
+#define ACPI_FADT_NO_CMOS_RTC       (1<<5)	/* 05: [V5] No CMOS real-time clock present */
 
 #define FADT2_REVISION_ID               3
 
@@ -289,6 +292,8 @@
 #define ACPI_FADT_REMOTE_POWER_ON   (1<<17)	/* 17: [V4] System is compatible with remote power on (ACPI 3.0) */
 #define ACPI_FADT_APIC_CLUSTER      (1<<18)	/* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
 #define ACPI_FADT_APIC_PHYSICAL     (1<<19)	/* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
+#define ACPI_FADT_HW_REDUCED        (1<<20)	/* 20: [V5] ACPI hardware is not implemented (ACPI 5.0) */
+#define ACPI_FADT_LOW_POWER_S0      (1<<21)	/* 21: [V5] S0 power savings are equal or better than S3 (ACPI 5.0) */
 
 /* Values for preferred_profile (Preferred Power Management Profiles) */
 
@@ -299,14 +304,16 @@
 	PM_WORKSTATION = 3,
 	PM_ENTERPRISE_SERVER = 4,
 	PM_SOHO_SERVER = 5,
-	PM_APPLIANCE_PC = 6
+	PM_APPLIANCE_PC = 6,
+	PM_PERFORMANCE_SERVER = 7,
+	PM_TABLET = 8
 };
 
 /* Reset to default packing */
 
 #pragma pack()
 
-#define ACPI_FADT_OFFSET(f)             (u8) ACPI_OFFSET (struct acpi_table_fadt, f)
+#define ACPI_FADT_OFFSET(f)             (u16) ACPI_OFFSET (struct acpi_table_fadt, f)
 
 /*
  * Internal table-related structures
@@ -342,6 +349,7 @@
 
 #include <acpi/actbl1.h>
 #include <acpi/actbl2.h>
+#include <acpi/actbl3.h>
 
 /*
  * Sizes of the various flavors of FADT. We need to look closely
@@ -351,12 +359,15 @@
  * FADT is the bottom line as to what the version really is.
  *
  * For reference, the values below are as follows:
- *     FADT V1  size: 0x74
- *     FADT V2  size: 0x84
- *     FADT V3+ size: 0xF4
+ *     FADT V1  size: 0x074
+ *     FADT V2  size: 0x084
+ *     FADT V3  size: 0x0F4
+ *     FADT V4  size: 0x0F4
+ *     FADT V5  size: 0x10C
  */
 #define ACPI_FADT_V1_SIZE       (u32) (ACPI_FADT_OFFSET (flags) + 4)
 #define ACPI_FADT_V2_SIZE       (u32) (ACPI_FADT_OFFSET (reserved4[0]) + 3)
-#define ACPI_FADT_V3_SIZE       (u32) (sizeof (struct acpi_table_fadt))
+#define ACPI_FADT_V3_SIZE       (u32) (ACPI_FADT_OFFSET (sleep_control))
+#define ACPI_FADT_V5_SIZE       (u32) (sizeof (struct acpi_table_fadt))
 
 #endif				/* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7504bc9..71e747b 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -228,7 +228,8 @@
 	ACPI_EINJ_EXECUTE_OPERATION = 5,
 	ACPI_EINJ_CHECK_BUSY_STATUS = 6,
 	ACPI_EINJ_GET_COMMAND_STATUS = 7,
-	ACPI_EINJ_ACTION_RESERVED = 8,	/* 8 and greater are reserved */
+	ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
+	ACPI_EINJ_ACTION_RESERVED = 9,	/* 9 and greater are reserved */
 	ACPI_EINJ_TRIGGER_ERROR = 0xFF	/* Except for this value */
 };
 
@@ -240,7 +241,27 @@
 	ACPI_EINJ_WRITE_REGISTER = 2,
 	ACPI_EINJ_WRITE_REGISTER_VALUE = 3,
 	ACPI_EINJ_NOOP = 4,
-	ACPI_EINJ_INSTRUCTION_RESERVED = 5	/* 5 and greater are reserved */
+	ACPI_EINJ_FLUSH_CACHELINE = 5,
+	ACPI_EINJ_INSTRUCTION_RESERVED = 6	/* 6 and greater are reserved */
+};
+
+struct acpi_einj_error_type_with_addr {
+	u32 error_type;
+	u32 vendor_struct_offset;
+	u32 flags;
+	u32 apic_id;
+	u64 address;
+	u64 range;
+	u32 pcie_id;
+};
+
+struct acpi_einj_vendor {
+	u32 length;
+	u32 pcie_id;
+	u16 vendor_id;
+	u16 device_id;
+	u8 revision_id;
+	u8 reserved[3];
 };
 
 /* EINJ Trigger Error Action Table */
@@ -275,6 +296,7 @@
 #define ACPI_EINJ_PLATFORM_CORRECTABLE      (1<<9)
 #define ACPI_EINJ_PLATFORM_UNCORRECTABLE    (1<<10)
 #define ACPI_EINJ_PLATFORM_FATAL            (1<<11)
+#define ACPI_EINJ_VENDOR_DEFINED            (1<<31)
 
 /*******************************************************************************
  *
@@ -631,7 +653,9 @@
 	ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8,
 	ACPI_MADT_TYPE_LOCAL_X2APIC = 9,
 	ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10,
-	ACPI_MADT_TYPE_RESERVED = 11	/* 11 and greater are reserved */
+	ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11,
+	ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
+	ACPI_MADT_TYPE_RESERVED = 13	/* 13 and greater are reserved */
 };
 
 /*
@@ -752,11 +776,36 @@
 	u8 reserved[3];
 };
 
+/* 11: Generic Interrupt (ACPI 5.0) */
+
+struct acpi_madt_generic_interrupt {
+	struct acpi_subtable_header header;
+	u16 reserved;		/* Reserved - must be zero */
+	u32 gic_id;
+	u32 uid;
+	u32 flags;
+	u32 parking_version;
+	u32 performance_interrupt;
+	u64 parked_address;
+	u64 base_address;
+};
+
+/* 12: Generic Distributor (ACPI 5.0) */
+
+struct acpi_madt_generic_distributor {
+	struct acpi_subtable_header header;
+	u16 reserved;		/* Reserved - must be zero */
+	u32 gic_id;
+	u64 base_address;
+	u32 global_irq_base;
+	u32 reserved2;		/* Reserved - must be zero */
+};
+
 /*
  * Common flags fields for MADT subtables
  */
 
-/* MADT Local APIC flags (lapic_flags) */
+/* MADT Local APIC flags (lapic_flags) and GIC flags */
 
 #define ACPI_MADT_ENABLED           (1)	/* 00: Processor is usable if set */
 
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
new file mode 100644
index 0000000..c22ce80
--- /dev/null
+++ b/include/acpi/actbl3.h
@@ -0,0 +1,552 @@
+/******************************************************************************
+ *
+ * Name: actbl3.h - ACPI Table Definitions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACTBL3_H__
+#define __ACTBL3_H__
+
+/*******************************************************************************
+ *
+ * Additional ACPI Tables (3)
+ *
+ * These tables are not consumed directly by the ACPICA subsystem, but are
+ * included here to support device drivers and the AML disassembler.
+ *
+ * The tables in this file are fully defined within the ACPI specification.
+ *
+ ******************************************************************************/
+
+/*
+ * Values for description table header signatures for tables defined in this
+ * file. Useful because they make it more difficult to inadvertently type in
+ * the wrong signature.
+ */
+#define ACPI_SIG_BGRT           "BGRT"	/* Boot Graphics Resource Table */
+#define ACPI_SIG_DRTM           "DRTM"	/* Dynamic Root of Trust for Measurement table */
+#define ACPI_SIG_FPDT           "FPDT"	/* Firmware Performance Data Table */
+#define ACPI_SIG_GTDT           "GTDT"	/* Generic Timer Description Table */
+#define ACPI_SIG_MPST           "MPST"	/* Memory Power State Table */
+#define ACPI_SIG_PCCT           "PCCT"	/* Platform Communications Channel Table */
+#define ACPI_SIG_PMTT           "PMTT"	/* Platform Memory Topology Table */
+#define ACPI_SIG_RASF           "RASF"	/* RAS Feature table */
+
+#define ACPI_SIG_S3PT           "S3PT"	/* S3 Performance (sub)Table */
+#define ACPI_SIG_PCCS           "PCC"	/* PCC Shared Memory Region */
+
+/* Reserved table signatures */
+
+#define ACPI_SIG_CSRT           "CSRT"	/* Core System Resources Table */
+#define ACPI_SIG_DBG2           "DBG2"	/* Debug Port table 2 */
+#define ACPI_SIG_MATR           "MATR"	/* Memory Address Translation Table */
+#define ACPI_SIG_MSDM           "MSDM"	/* Microsoft Data Management Table */
+#define ACPI_SIG_WPBT           "WPBT"	/* Windows Platform Binary Table */
+
+/*
+ * All tables must be byte-packed to match the ACPI specification, since
+ * the tables are provided by the system BIOS.
+ */
+#pragma pack(1)
+
+/*
+ * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
+ * This is the only type that is even remotely portable. Anything else is not
+ * portable, so do not use any other bitfield types.
+ */
+
+/*******************************************************************************
+ *
+ * BGRT - Boot Graphics Resource Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_bgrt {
+	struct acpi_table_header header;	/* Common ACPI table header */
+	u16 version;
+	u8 status;
+	u8 image_type;
+	u64 image_address;
+	u32 image_offset_x;
+	u32 image_offset_y;
+};
+
+/*******************************************************************************
+ *
+ * DRTM - Dynamic Root of Trust for Measurement table
+ *
+ ******************************************************************************/
+
+struct acpi_table_drtm {
+	struct acpi_table_header header;	/* Common ACPI table header */
+	u64 entry_base_address;
+	u64 entry_length;
+	u32 entry_address32;
+	u64 entry_address64;
+	u64 exit_address;
+	u64 log_area_address;
+	u32 log_area_length;
+	u64 arch_dependent_address;
+	u32 flags;
+};
+
+/* 1) Validated Tables List */
+
+struct acpi_drtm_vtl_list {
+	u32 validated_table_list_count;
+};
+
+/* 2) Resources List */
+
+struct acpi_drtm_resource_list {
+	u32 resource_list_count;
+};
+
+/* 3) Platform-specific Identifiers List */
+
+struct acpi_drtm_id_list {
+	u32 id_list_count;
+};
+
+/*******************************************************************************
+ *
+ * FPDT - Firmware Performance Data Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_fpdt {
+	struct acpi_table_header header;	/* Common ACPI table header */
+};
+
+/* FPDT subtable header */
+
+struct acpi_fpdt_header {
+	u16 type;
+	u8 length;
+	u8 revision;
+};
+
+/* Values for Type field above */
+
+enum acpi_fpdt_type {
+	ACPI_FPDT_TYPE_BOOT = 0,
+	ACPI_FPDT_TYPE_S3PERF = 1,
+};
+
+/*
+ * FPDT subtables
+ */
+
+/* 0: Firmware Basic Boot Performance Record */
+
+struct acpi_fpdt_boot {
+	struct acpi_fpdt_header header;
+	u8 reserved[4];
+	u64 reset_end;
+	u64 load_start;
+	u64 startup_start;
+	u64 exit_services_entry;
+	u64 exit_services_exit;
+};
+
+/* 1: S3 Performance Table Pointer Record */
+
+struct acpi_fpdt_s3pt_ptr {
+	struct acpi_fpdt_header header;
+	u8 reserved[4];
+	u64 address;
+};
+
+/*
+ * S3PT - S3 Performance Table. This table is pointed to by the
+ * FPDT S3 Pointer Record above.
+ */
+struct acpi_table_s3pt {
+	u8 signature[4];	/* "S3PT" */
+	u32 length;
+};
+
+/*
+ * S3PT Subtables
+ */
+struct acpi_s3pt_header {
+	u16 type;
+	u8 length;
+	u8 revision;
+};
+
+/* Values for Type field above */
+
+enum acpi_s3pt_type {
+	ACPI_S3PT_TYPE_RESUME = 0,
+	ACPI_S3PT_TYPE_SUSPEND = 1,
+};
+
+struct acpi_s3pt_resume {
+	struct acpi_s3pt_header header;
+	u32 resume_count;
+	u64 full_resume;
+	u64 average_resume;
+};
+
+struct acpi_s3pt_suspend {
+	struct acpi_s3pt_header header;
+	u64 suspend_start;
+	u64 suspend_end;
+};
+
+/*******************************************************************************
+ *
+ * GTDT - Generic Timer Description Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_gtdt {
+	struct acpi_table_header header;	/* Common ACPI table header */
+	u64 address;
+	u32 flags;
+	u32 secure_pl1_interrupt;
+	u32 secure_pl1_flags;
+	u32 non_secure_pl1_interrupt;
+	u32 non_secure_pl1_flags;
+	u32 virtual_timer_interrupt;
+	u32 virtual_timer_flags;
+	u32 non_secure_pl2_interrupt;
+	u32 non_secure_pl2_flags;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_GTDT_MAPPED_BLOCK_PRESENT      1
+
+/* Values for all "TimerFlags" fields above */
+
+#define ACPI_GTDT_INTERRUPT_MODE            1
+#define ACPI_GTDT_INTERRUPT_POLARITY        2
+
+/*******************************************************************************
+ *
+ * MPST - Memory Power State Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+#define ACPI_MPST_CHANNEL_INFO \
+	u16                             reserved1; \
+	u8                              channel_id; \
+	u8                              reserved2; \
+	u16                             power_node_count;
+
+/* Main table */
+
+struct acpi_table_mpst {
+	struct acpi_table_header header;	/* Common ACPI table header */
+	 ACPI_MPST_CHANNEL_INFO	/* Platform Communication Channel */
+};
+
+/* Memory Platform Communication Channel Info */
+
+struct acpi_mpst_channel {
+	ACPI_MPST_CHANNEL_INFO	/* Platform Communication Channel */
+};
+
+/* Memory Power Node Structure */
+
+struct acpi_mpst_power_node {
+	u8 flags;
+	u8 reserved1;
+	u16 node_id;
+	u32 length;
+	u64 range_address;
+	u64 range_length;
+	u8 num_power_states;
+	u8 num_physical_components;
+	u16 reserved2;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_MPST_ENABLED               1
+#define ACPI_MPST_POWER_MANAGED         2
+#define ACPI_MPST_HOT_PLUG_CAPABLE      4
+
+/* Memory Power State Structure (follows POWER_NODE above) */
+
+struct acpi_mpst_power_state {
+	u8 power_state;
+	u8 info_index;
+};
+
+/* Physical Component ID Structure (follows POWER_STATE above) */
+
+struct acpi_mpst_component {
+	u16 component_id;
+};
+
+/* Memory Power State Characteristics Structure (follows all POWER_NODEs) */
+
+struct acpi_mpst_data_hdr {
+	u16 characteristics_count;
+};
+
+struct acpi_mpst_power_data {
+	u8 revision;
+	u8 flags;
+	u16 reserved1;
+	u32 average_power;
+	u32 power_saving;
+	u64 exit_latency;
+	u64 reserved2;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_MPST_PRESERVE              1
+#define ACPI_MPST_AUTOENTRY             2
+#define ACPI_MPST_AUTOEXIT              4
+
+/* Shared Memory Region (not part of an ACPI table) */
+
+struct acpi_mpst_shared {
+	u32 signature;
+	u16 pcc_command;
+	u16 pcc_status;
+	u16 command_register;
+	u16 status_register;
+	u16 power_state_id;
+	u16 power_node_id;
+	u64 energy_consumed;
+	u64 average_power;
+};
+
+/*******************************************************************************
+ *
+ * PCCT - Platform Communications Channel Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_pcct {
+	struct acpi_table_header header;	/* Common ACPI table header */
+	u32 flags;
+	u32 latency;
+	u32 reserved;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_PCCT_DOORBELL              1
+
+/*
+ * PCCT subtables
+ */
+
+/* 0: Generic Communications Subspace */
+
+struct acpi_pcct_subspace {
+	struct acpi_subtable_header header;
+	u8 reserved[6];
+	u64 base_address;
+	u64 length;
+	struct acpi_generic_address doorbell_register;
+	u64 preserve_mask;
+	u64 write_mask;
+};
+
+/*
+ * PCC memory structures (not part of the ACPI table)
+ */
+
+/* Shared Memory Region */
+
+struct acpi_pcct_shared_memory {
+	u32 signature;
+	u16 command;
+	u16 status;
+};
+
+/*******************************************************************************
+ *
+ * PMTT - Platform Memory Topology Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_pmtt {
+	struct acpi_table_header header;	/* Common ACPI table header */
+	u32 reserved;
+};
+
+/* Common header for PMTT subtables that follow main table */
+
+struct acpi_pmtt_header {
+	u8 type;
+	u8 reserved1;
+	u16 length;
+	u16 flags;
+	u16 reserved2;
+};
+
+/* Values for Type field above */
+
+#define ACPI_PMTT_TYPE_SOCKET           0
+#define ACPI_PMTT_TYPE_CONTROLLER       1
+#define ACPI_PMTT_TYPE_DIMM             2
+#define ACPI_PMTT_TYPE_RESERVED         3	/* 0x03-0xFF are reserved */
+
+/* Values for Flags field above */
+
+#define ACPI_PMTT_TOP_LEVEL             0x0001
+#define ACPI_PMTT_PHYSICAL              0x0002
+#define ACPI_PMTT_MEMORY_TYPE           0x000C
+
+/*
+ * PMTT subtables, correspond to Type in struct acpi_pmtt_header
+ */
+
+/* 0: Socket Structure */
+
+struct acpi_pmtt_socket {
+	struct acpi_pmtt_header header;
+	u16 socket_id;
+	u16 reserved;
+};
+
+/* 1: Memory Controller subtable */
+
+struct acpi_pmtt_controller {
+	struct acpi_pmtt_header header;
+	u32 read_latency;
+	u32 write_latency;
+	u32 read_bandwidth;
+	u32 write_bandwidth;
+	u16 access_width;
+	u16 alignment;
+	u16 reserved;
+	u16 domain_count;
+};
+
+/* 1a: Proximity Domain substructure */
+
+struct acpi_pmtt_domain {
+	u32 proximity_domain;
+};
+
+/* 2: Physical Component Identifier (DIMM) */
+
+struct acpi_pmtt_physical_component {
+	struct acpi_pmtt_header header;
+	u16 component_id;
+	u16 reserved;
+	u32 memory_size;
+	u32 bios_handle;
+};
+
+/*******************************************************************************
+ *
+ * RASF - RAS Feature Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_rasf {
+	struct acpi_table_header header;	/* Common ACPI table header */
+	u8 channel_id[12];
+};
+
+/* RASF Platform Communication Channel Shared Memory Region */
+
+struct acpi_rasf_shared_memory {
+	u32 signature;
+	u16 command;
+	u16 status;
+	u64 requested_address;
+	u64 requested_length;
+	u64 actual_address;
+	u64 actual_length;
+	u16 flags;
+	u8 speed;
+};
+
+/* Masks for Flags and Speed fields above */
+
+#define ACPI_RASF_SCRUBBER_RUNNING      1
+#define ACPI_RASF_SPEED                 (7<<1)
+
+/* Channel Commands */
+
+enum acpi_rasf_commands {
+	ACPI_RASF_GET_RAS_CAPABILITIES = 1,
+	ACPI_RASF_GET_PATROL_PARAMETERS = 2,
+	ACPI_RASF_START_PATROL_SCRUBBER = 3,
+	ACPI_RASF_STOP_PATROL_SCRUBBER = 4
+};
+
+/* Channel Command flags */
+
+#define ACPI_RASF_GENERATE_SCI          (1<<15)
+
+/* Status values */
+
+enum acpi_rasf_status {
+	ACPI_RASF_SUCCESS = 0,
+	ACPI_RASF_NOT_VALID = 1,
+	ACPI_RASF_NOT_SUPPORTED = 2,
+	ACPI_RASF_BUSY = 3,
+	ACPI_RASF_FAILED = 4,
+	ACPI_RASF_ABORTED = 5,
+	ACPI_RASF_INVALID_DATA = 6
+};
+
+/* Status flags */
+
+#define ACPI_RASF_COMMAND_COMPLETE      (1)
+#define ACPI_RASF_SCI_DOORBELL          (1<<1)
+#define ACPI_RASF_ERROR                 (1<<2)
+#define ACPI_RASF_STATUS                (0x1F<<3)
+
+/* Reset to default packing */
+
+#pragma pack()
+
+#endif				/* __ACTBL3_H__ */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index ed73f67..d5dee7c 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -712,8 +712,10 @@
 #define ACPI_ADR_SPACE_CMOS             (acpi_adr_space_type) 5
 #define ACPI_ADR_SPACE_PCI_BAR_TARGET   (acpi_adr_space_type) 6
 #define ACPI_ADR_SPACE_IPMI             (acpi_adr_space_type) 7
+#define ACPI_ADR_SPACE_GPIO             (acpi_adr_space_type) 8
+#define ACPI_ADR_SPACE_GSBUS            (acpi_adr_space_type) 9
 
-#define ACPI_NUM_PREDEFINED_REGIONS     8
+#define ACPI_NUM_PREDEFINED_REGIONS     10
 
 /*
  * Special Address Spaces
@@ -957,6 +959,14 @@
 
 #define ACPI_DEFAULT_HANDLER            NULL
 
+/* Special Context data for generic_serial_bus/general_purpose_io (ACPI 5.0) */
+
+struct acpi_connection_info {
+	u8 *connection;
+	u16 length;
+	u8 access_length;
+};
+
 typedef
 acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
 				    u32 function,
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index c37c342..bc9ec1d 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -17,7 +17,7 @@
 
 /*****************************************************************************/
 /*
- * the payload for a key of type "user"
+ * the payload for a key of type "user" or "logon"
  * - once filled in and attached to a key:
  *   - the payload struct is invariant may not be changed, only replaced
  *   - the payload must be read with RCU procedures or with the key semaphore
@@ -33,6 +33,7 @@
 };
 
 extern struct key_type key_type_user;
+extern struct key_type key_type_logon;
 
 extern int user_instantiate(struct key *key, const void *data, size_t datalen);
 extern int user_update(struct key *key, const void *data, size_t datalen);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 627a3a4..3f96866 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -310,6 +310,11 @@
 					     u32 *mask, u32 req);
 extern void acpi_early_init(void);
 
+extern int acpi_nvs_register(__u64 start, __u64 size);
+
+extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
+				    void *data);
+
 #else	/* !CONFIG_ACPI */
 
 #define acpi_disabled 1
@@ -352,15 +357,18 @@
 {
 	return -1;
 }
-#endif	/* !CONFIG_ACPI */
 
-#ifdef CONFIG_ACPI_SLEEP
-int suspend_nvs_register(unsigned long start, unsigned long size);
-#else
-static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+static inline int acpi_nvs_register(__u64 start, __u64 size)
 {
 	return 0;
 }
-#endif
+
+static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
+					   void *data)
+{
+	return 0;
+}
+
+#endif	/* !CONFIG_ACPI */
 
 #endif	/*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_io.h b/include/linux/acpi_io.h
index 4afd710..b0ffa21 100644
--- a/include/linux/acpi_io.h
+++ b/include/linux/acpi_io.h
@@ -12,4 +12,7 @@
 
 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
 
+int acpi_os_map_generic_address(struct acpi_generic_address *addr);
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
+
 #endif
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 23f81de..712abcc 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -186,7 +186,14 @@
 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
 extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
 
+#ifdef CONFIG_INTEL_IDLE
+extern int intel_idle_cpu_init(int cpu);
 #else
+static inline int intel_idle_cpu_init(int cpu) { return -1; }
+#endif
+
+#else
+static inline int intel_idle_cpu_init(int cpu) { return -1; }
 
 static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
 {return 0;}
diff --git a/include/linux/device.h b/include/linux/device.h
index 5b3adb8..b63fb39 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -279,11 +279,11 @@
 
 /**
  * struct subsys_interface - interfaces to device functions
- * @name        name of the device function
- * @subsystem   subsytem of the devices to attach to
- * @node        the list of functions registered at the subsystem
- * @add         device hookup to device function handler
- * @remove      device hookup to device function handler
+ * @name:       name of the device function
+ * @subsys:     subsytem of the devices to attach to
+ * @node:       the list of functions registered at the subsystem
+ * @add_dev:    device hookup to device function handler
+ * @remove_dev: device hookup to device function handler
  *
  * Simple interfaces attached to a subsystem. Multiple interfaces can
  * attach to a subsystem and its devices. Unlike drivers, they do not
@@ -612,6 +612,7 @@
  * @archdata:	For arch-specific additions.
  * @of_node:	Associated device tree node.
  * @devt:	For creating the sysfs "dev".
+ * @id:		device instance
  * @devres_lock: Spinlock to protect the resource of the device.
  * @devres_head: The resources list of the device.
  * @knode_class: The node used to add the device to the class list.
@@ -1003,6 +1004,10 @@
  * Each module may only use this macro once, and calling it replaces
  * module_init() and module_exit().
  *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @__unregister: unregister function for this driver type
+ *
  * Use this macro to construct bus specific macros for registering
  * drivers, and do not use it on its own.
  */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0244082..386da09 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -396,6 +396,7 @@
 #include <linux/rculist_bl.h>
 #include <linux/atomic.h>
 #include <linux/shrinker.h>
+#include <linux/migrate_mode.h>
 
 #include <asm/byteorder.h>
 
@@ -526,7 +527,6 @@
 struct page;
 struct address_space;
 struct writeback_control;
-enum migrate_mode;
 
 struct iov_iter {
 	const struct iovec *iov;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 2fa0901..0d7d6a1 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -50,9 +50,11 @@
  * note header.  For kdump, the code in vmcore.c runs in the context
  * of the second kernel to combine them into one note.
  */
+#ifndef KEXEC_NOTE_BYTES
 #define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) +		\
 			    KEXEC_CORE_NOTE_NAME_BYTES +		\
 			    KEXEC_CORE_NOTE_DESC_BYTES )
+#endif
 
 /*
  * This structure is used to hold the arguments that are used when loading
diff --git a/include/linux/key.h b/include/linux/key.h
index bfc014c..5253471 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -271,7 +271,7 @@
 
 extern struct key *key_lookup(key_serial_t id);
 
-static inline key_serial_t key_serial(struct key *key)
+static inline key_serial_t key_serial(const struct key *key)
 {
 	return key ? key->serial : 0;
 }
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index eaf8674..05ed282 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -3,22 +3,10 @@
 
 #include <linux/mm.h>
 #include <linux/mempolicy.h>
+#include <linux/migrate_mode.h>
 
 typedef struct page *new_page_t(struct page *, unsigned long private, int **);
 
-/*
- * MIGRATE_ASYNC means never block
- * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
- *	on most operations but not ->writepage as the potential stall time
- *	is too significant
- * MIGRATE_SYNC will block when migrating pages
- */
-enum migrate_mode {
-	MIGRATE_ASYNC,
-	MIGRATE_SYNC_LIGHT,
-	MIGRATE_SYNC,
-};
-
 #ifdef CONFIG_MIGRATION
 #define PAGE_MIGRATION 1
 
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
new file mode 100644
index 0000000..ebf3d89
--- /dev/null
+++ b/include/linux/migrate_mode.h
@@ -0,0 +1,16 @@
+#ifndef MIGRATE_MODE_H_INCLUDED
+#define MIGRATE_MODE_H_INCLUDED
+/*
+ * MIGRATE_ASYNC means never block
+ * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
+ *	on most operations but not ->writepage as the potential stall time
+ *	is too significant
+ * MIGRATE_SYNC will block when migrating pages
+ */
+enum migrate_mode {
+	MIGRATE_ASYNC,
+	MIGRATE_SYNC_LIGHT,
+	MIGRATE_SYNC,
+};
+
+#endif		/* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
new file mode 100644
index 0000000..9490a00
--- /dev/null
+++ b/include/linux/nvme.h
@@ -0,0 +1,434 @@
+/*
+ * Definitions for the NVM Express interface
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _LINUX_NVME_H
+#define _LINUX_NVME_H
+
+#include <linux/types.h>
+
+struct nvme_bar {
+	__u64			cap;	/* Controller Capabilities */
+	__u32			vs;	/* Version */
+	__u32			intms;	/* Interrupt Mask Set */
+	__u32			intmc;	/* Interrupt Mask Clear */
+	__u32			cc;	/* Controller Configuration */
+	__u32			rsvd1;	/* Reserved */
+	__u32			csts;	/* Controller Status */
+	__u32			rsvd2;	/* Reserved */
+	__u32			aqa;	/* Admin Queue Attributes */
+	__u64			asq;	/* Admin SQ Base Address */
+	__u64			acq;	/* Admin CQ Base Address */
+};
+
+#define NVME_CAP_TIMEOUT(cap)	(((cap) >> 24) & 0xff)
+#define NVME_CAP_STRIDE(cap)	(((cap) >> 32) & 0xf)
+
+enum {
+	NVME_CC_ENABLE		= 1 << 0,
+	NVME_CC_CSS_NVM		= 0 << 4,
+	NVME_CC_MPS_SHIFT	= 7,
+	NVME_CC_ARB_RR		= 0 << 11,
+	NVME_CC_ARB_WRRU	= 1 << 11,
+	NVME_CC_ARB_VS		= 7 << 11,
+	NVME_CC_SHN_NONE	= 0 << 14,
+	NVME_CC_SHN_NORMAL	= 1 << 14,
+	NVME_CC_SHN_ABRUPT	= 2 << 14,
+	NVME_CC_IOSQES		= 6 << 16,
+	NVME_CC_IOCQES		= 4 << 20,
+	NVME_CSTS_RDY		= 1 << 0,
+	NVME_CSTS_CFS		= 1 << 1,
+	NVME_CSTS_SHST_NORMAL	= 0 << 2,
+	NVME_CSTS_SHST_OCCUR	= 1 << 2,
+	NVME_CSTS_SHST_CMPLT	= 2 << 2,
+};
+
+struct nvme_id_power_state {
+	__le16			max_power;	/* centiwatts */
+	__u16			rsvd2;
+	__le32			entry_lat;	/* microseconds */
+	__le32			exit_lat;	/* microseconds */
+	__u8			read_tput;
+	__u8			read_lat;
+	__u8			write_tput;
+	__u8			write_lat;
+	__u8			rsvd16[16];
+};
+
+#define NVME_VS(major, minor)	(major << 16 | minor)
+
+struct nvme_id_ctrl {
+	__le16			vid;
+	__le16			ssvid;
+	char			sn[20];
+	char			mn[40];
+	char			fr[8];
+	__u8			rab;
+	__u8			ieee[3];
+	__u8			mic;
+	__u8			mdts;
+	__u8			rsvd78[178];
+	__le16			oacs;
+	__u8			acl;
+	__u8			aerl;
+	__u8			frmw;
+	__u8			lpa;
+	__u8			elpe;
+	__u8			npss;
+	__u8			rsvd264[248];
+	__u8			sqes;
+	__u8			cqes;
+	__u8			rsvd514[2];
+	__le32			nn;
+	__le16			oncs;
+	__le16			fuses;
+	__u8			fna;
+	__u8			vwc;
+	__le16			awun;
+	__le16			awupf;
+	__u8			rsvd530[1518];
+	struct nvme_id_power_state	psd[32];
+	__u8			vs[1024];
+};
+
+struct nvme_lbaf {
+	__le16			ms;
+	__u8			ds;
+	__u8			rp;
+};
+
+struct nvme_id_ns {
+	__le64			nsze;
+	__le64			ncap;
+	__le64			nuse;
+	__u8			nsfeat;
+	__u8			nlbaf;
+	__u8			flbas;
+	__u8			mc;
+	__u8			dpc;
+	__u8			dps;
+	__u8			rsvd30[98];
+	struct nvme_lbaf	lbaf[16];
+	__u8			rsvd192[192];
+	__u8			vs[3712];
+};
+
+enum {
+	NVME_NS_FEAT_THIN	= 1 << 0,
+	NVME_LBAF_RP_BEST	= 0,
+	NVME_LBAF_RP_BETTER	= 1,
+	NVME_LBAF_RP_GOOD	= 2,
+	NVME_LBAF_RP_DEGRADED	= 3,
+};
+
+struct nvme_lba_range_type {
+	__u8			type;
+	__u8			attributes;
+	__u8			rsvd2[14];
+	__u64			slba;
+	__u64			nlb;
+	__u8			guid[16];
+	__u8			rsvd48[16];
+};
+
+enum {
+	NVME_LBART_TYPE_FS	= 0x01,
+	NVME_LBART_TYPE_RAID	= 0x02,
+	NVME_LBART_TYPE_CACHE	= 0x03,
+	NVME_LBART_TYPE_SWAP	= 0x04,
+
+	NVME_LBART_ATTRIB_TEMP	= 1 << 0,
+	NVME_LBART_ATTRIB_HIDE	= 1 << 1,
+};
+
+/* I/O commands */
+
+enum nvme_opcode {
+	nvme_cmd_flush		= 0x00,
+	nvme_cmd_write		= 0x01,
+	nvme_cmd_read		= 0x02,
+	nvme_cmd_write_uncor	= 0x04,
+	nvme_cmd_compare	= 0x05,
+	nvme_cmd_dsm		= 0x09,
+};
+
+struct nvme_common_command {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u32			cdw2[2];
+	__le64			metadata;
+	__le64			prp1;
+	__le64			prp2;
+	__u32			cdw10[6];
+};
+
+struct nvme_rw_command {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2;
+	__le64			metadata;
+	__le64			prp1;
+	__le64			prp2;
+	__le64			slba;
+	__le16			length;
+	__le16			control;
+	__le32			dsmgmt;
+	__le32			reftag;
+	__le16			apptag;
+	__le16			appmask;
+};
+
+enum {
+	NVME_RW_LR			= 1 << 15,
+	NVME_RW_FUA			= 1 << 14,
+	NVME_RW_DSM_FREQ_UNSPEC		= 0,
+	NVME_RW_DSM_FREQ_TYPICAL	= 1,
+	NVME_RW_DSM_FREQ_RARE		= 2,
+	NVME_RW_DSM_FREQ_READS		= 3,
+	NVME_RW_DSM_FREQ_WRITES		= 4,
+	NVME_RW_DSM_FREQ_RW		= 5,
+	NVME_RW_DSM_FREQ_ONCE		= 6,
+	NVME_RW_DSM_FREQ_PREFETCH	= 7,
+	NVME_RW_DSM_FREQ_TEMP		= 8,
+	NVME_RW_DSM_LATENCY_NONE	= 0 << 4,
+	NVME_RW_DSM_LATENCY_IDLE	= 1 << 4,
+	NVME_RW_DSM_LATENCY_NORM	= 2 << 4,
+	NVME_RW_DSM_LATENCY_LOW		= 3 << 4,
+	NVME_RW_DSM_SEQ_REQ		= 1 << 6,
+	NVME_RW_DSM_COMPRESSED		= 1 << 7,
+};
+
+/* Admin commands */
+
+enum nvme_admin_opcode {
+	nvme_admin_delete_sq		= 0x00,
+	nvme_admin_create_sq		= 0x01,
+	nvme_admin_get_log_page		= 0x02,
+	nvme_admin_delete_cq		= 0x04,
+	nvme_admin_create_cq		= 0x05,
+	nvme_admin_identify		= 0x06,
+	nvme_admin_abort_cmd		= 0x08,
+	nvme_admin_set_features		= 0x09,
+	nvme_admin_get_features		= 0x0a,
+	nvme_admin_async_event		= 0x0c,
+	nvme_admin_activate_fw		= 0x10,
+	nvme_admin_download_fw		= 0x11,
+	nvme_admin_format_nvm		= 0x80,
+	nvme_admin_security_send	= 0x81,
+	nvme_admin_security_recv	= 0x82,
+};
+
+enum {
+	NVME_QUEUE_PHYS_CONTIG	= (1 << 0),
+	NVME_CQ_IRQ_ENABLED	= (1 << 1),
+	NVME_SQ_PRIO_URGENT	= (0 << 1),
+	NVME_SQ_PRIO_HIGH	= (1 << 1),
+	NVME_SQ_PRIO_MEDIUM	= (2 << 1),
+	NVME_SQ_PRIO_LOW	= (3 << 1),
+	NVME_FEAT_ARBITRATION	= 0x01,
+	NVME_FEAT_POWER_MGMT	= 0x02,
+	NVME_FEAT_LBA_RANGE	= 0x03,
+	NVME_FEAT_TEMP_THRESH	= 0x04,
+	NVME_FEAT_ERR_RECOVERY	= 0x05,
+	NVME_FEAT_VOLATILE_WC	= 0x06,
+	NVME_FEAT_NUM_QUEUES	= 0x07,
+	NVME_FEAT_IRQ_COALESCE	= 0x08,
+	NVME_FEAT_IRQ_CONFIG	= 0x09,
+	NVME_FEAT_WRITE_ATOMIC	= 0x0a,
+	NVME_FEAT_ASYNC_EVENT	= 0x0b,
+	NVME_FEAT_SW_PROGRESS	= 0x0c,
+};
+
+struct nvme_identify {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2[2];
+	__le64			prp1;
+	__le64			prp2;
+	__le32			cns;
+	__u32			rsvd11[5];
+};
+
+struct nvme_features {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2[2];
+	__le64			prp1;
+	__le64			prp2;
+	__le32			fid;
+	__le32			dword11;
+	__u32			rsvd12[4];
+};
+
+struct nvme_create_cq {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[5];
+	__le64			prp1;
+	__u64			rsvd8;
+	__le16			cqid;
+	__le16			qsize;
+	__le16			cq_flags;
+	__le16			irq_vector;
+	__u32			rsvd12[4];
+};
+
+struct nvme_create_sq {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[5];
+	__le64			prp1;
+	__u64			rsvd8;
+	__le16			sqid;
+	__le16			qsize;
+	__le16			sq_flags;
+	__le16			cqid;
+	__u32			rsvd12[4];
+};
+
+struct nvme_delete_queue {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[9];
+	__le16			qid;
+	__u16			rsvd10;
+	__u32			rsvd11[5];
+};
+
+struct nvme_download_firmware {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[5];
+	__le64			prp1;
+	__le64			prp2;
+	__le32			numd;
+	__le32			offset;
+	__u32			rsvd12[4];
+};
+
+struct nvme_command {
+	union {
+		struct nvme_common_command common;
+		struct nvme_rw_command rw;
+		struct nvme_identify identify;
+		struct nvme_features features;
+		struct nvme_create_cq create_cq;
+		struct nvme_create_sq create_sq;
+		struct nvme_delete_queue delete_queue;
+		struct nvme_download_firmware dlfw;
+	};
+};
+
+enum {
+	NVME_SC_SUCCESS			= 0x0,
+	NVME_SC_INVALID_OPCODE		= 0x1,
+	NVME_SC_INVALID_FIELD		= 0x2,
+	NVME_SC_CMDID_CONFLICT		= 0x3,
+	NVME_SC_DATA_XFER_ERROR		= 0x4,
+	NVME_SC_POWER_LOSS		= 0x5,
+	NVME_SC_INTERNAL		= 0x6,
+	NVME_SC_ABORT_REQ		= 0x7,
+	NVME_SC_ABORT_QUEUE		= 0x8,
+	NVME_SC_FUSED_FAIL		= 0x9,
+	NVME_SC_FUSED_MISSING		= 0xa,
+	NVME_SC_INVALID_NS		= 0xb,
+	NVME_SC_LBA_RANGE		= 0x80,
+	NVME_SC_CAP_EXCEEDED		= 0x81,
+	NVME_SC_NS_NOT_READY		= 0x82,
+	NVME_SC_CQ_INVALID		= 0x100,
+	NVME_SC_QID_INVALID		= 0x101,
+	NVME_SC_QUEUE_SIZE		= 0x102,
+	NVME_SC_ABORT_LIMIT		= 0x103,
+	NVME_SC_ABORT_MISSING		= 0x104,
+	NVME_SC_ASYNC_LIMIT		= 0x105,
+	NVME_SC_FIRMWARE_SLOT		= 0x106,
+	NVME_SC_FIRMWARE_IMAGE		= 0x107,
+	NVME_SC_INVALID_VECTOR		= 0x108,
+	NVME_SC_INVALID_LOG_PAGE	= 0x109,
+	NVME_SC_INVALID_FORMAT		= 0x10a,
+	NVME_SC_BAD_ATTRIBUTES		= 0x180,
+	NVME_SC_WRITE_FAULT		= 0x280,
+	NVME_SC_READ_ERROR		= 0x281,
+	NVME_SC_GUARD_CHECK		= 0x282,
+	NVME_SC_APPTAG_CHECK		= 0x283,
+	NVME_SC_REFTAG_CHECK		= 0x284,
+	NVME_SC_COMPARE_FAILED		= 0x285,
+	NVME_SC_ACCESS_DENIED		= 0x286,
+};
+
+struct nvme_completion {
+	__le32	result;		/* Used by admin commands to return data */
+	__u32	rsvd;
+	__le16	sq_head;	/* how much of this queue may be reclaimed */
+	__le16	sq_id;		/* submission queue that generated this entry */
+	__u16	command_id;	/* of the command which completed */
+	__le16	status;		/* did the command fail, and if so, why? */
+};
+
+struct nvme_user_io {
+	__u8	opcode;
+	__u8	flags;
+	__u16	control;
+	__u16	nblocks;
+	__u16	rsvd;
+	__u64	metadata;
+	__u64	addr;
+	__u64	slba;
+	__u32	dsmgmt;
+	__u32	reftag;
+	__u16	apptag;
+	__u16	appmask;
+};
+
+struct nvme_admin_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__u16	rsvd1;
+	__u32	nsid;
+	__u32	cdw2;
+	__u32	cdw3;
+	__u64	metadata;
+	__u64	addr;
+	__u32	metadata_len;
+	__u32	data_len;
+	__u32	cdw10;
+	__u32	cdw11;
+	__u32	cdw12;
+	__u32	cdw13;
+	__u32	cdw14;
+	__u32	cdw15;
+	__u32	timeout_ms;
+	__u32	result;
+};
+
+#define NVME_IOCTL_ID		_IO('N', 0x40)
+#define NVME_IOCTL_ADMIN_CMD	_IOWR('N', 0x41, struct nvme_admin_cmd)
+#define NVME_IOCTL_SUBMIT_IO	_IOW('N', 0x42, struct nvme_user_io)
+
+#endif /* _LINUX_NVME_H */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index cb78556..c09fa04 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -230,7 +230,11 @@
 struct super_block;
 
 #define DQF_MASK 0xffff		/* Mask for format specific flags */
-#define DQF_INFO_DIRTY_B 16
+#define DQF_GETINFO_MASK 0x1ffff	/* Mask for flags passed to userspace */
+#define DQF_SETINFO_MASK 0xffff		/* Mask for flags modifiable from userspace */
+#define DQF_SYS_FILE_B		16
+#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B)	/* Quota file stored as system file */
+#define DQF_INFO_DIRTY_B	31
 #define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B)	/* Is info dirty? */
 
 extern void mark_info_dirty(struct super_block *sb, int type);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4032ec1..513f524 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2088,7 +2088,7 @@
 extern struct task_struct *idle_task(int cpu);
 /**
  * is_idle_task - is the specified task an idle task?
- * @tsk: the task in question.
+ * @p: the task in question.
  */
 static inline bool is_idle_task(struct task_struct *p)
 {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index e4c711c..79ab255 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -48,6 +48,7 @@
 					loff_t size, unsigned long flags);
 extern int shmem_zero_setup(struct vm_area_struct *);
 extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern void shmem_unlock_mapping(struct address_space *mapping);
 extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
 					pgoff_t index, gfp_t gfp_mask);
 extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 95040cc..91784a4 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -357,14 +357,29 @@
 
 static inline void lock_system_sleep(void)
 {
-	freezer_do_not_count();
+	current->flags |= PF_FREEZER_SKIP;
 	mutex_lock(&pm_mutex);
 }
 
 static inline void unlock_system_sleep(void)
 {
+	/*
+	 * Don't use freezer_count() because we don't want the call to
+	 * try_to_freeze() here.
+	 *
+	 * Reason:
+	 * Fundamentally, we just don't need it, because freezing condition
+	 * doesn't come into effect until we release the pm_mutex lock,
+	 * since the freezer always works with pm_mutex held.
+	 *
+	 * More importantly, in the case of hibernation,
+	 * unlock_system_sleep() gets called in snapshot_read() and
+	 * snapshot_write() when the freezing condition is still in effect.
+	 * Which means, if we use try_to_freeze() here, it would make them
+	 * enter the refrigerator, thus causing hibernation to lockup.
+	 */
+	current->flags &= ~PF_FREEZER_SKIP;
 	mutex_unlock(&pm_mutex);
-	freezer_count();
 }
 
 #else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 06061a7..3e60228 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -273,7 +273,7 @@
 #endif
 
 extern int page_evictable(struct page *page, struct vm_area_struct *vma);
-extern void scan_mapping_unevictable_pages(struct address_space *);
+extern void check_move_unevictable_pages(struct page **, int nr_pages);
 
 extern unsigned long scan_unevictable_pages;
 extern int scan_unevictable_handler(struct ctl_table *, int,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 27a4e16..69d8457 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1073,6 +1073,7 @@
  *	which the host controller driver should use in preference to the
  *	transfer_buffer.
  * @sg: scatter gather buffer list
+ * @num_mapped_sgs: (internal) number of mapped sg entries
  * @num_sgs: number of entries in the sg list
  * @transfer_buffer_length: How big is transfer_buffer.  The transfer may
  *	be broken up into chunks according to the current maximum packet
diff --git a/include/media/tuner.h b/include/media/tuner.h
index 89c290b..29e1920 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -127,7 +127,6 @@
 #define TUNER_PHILIPS_FMD1216MEX_MK3	78
 #define TUNER_PHILIPS_FM1216MK5		79
 #define TUNER_PHILIPS_FQ1216LME_MK3	80	/* Active loopthrough, no FM */
-#define TUNER_XC4000			81	/* Xceive Silicon Tuner */
 
 #define TUNER_PARTSNIC_PTI_5NF05	81
 #define TUNER_PHILIPS_CU1216L           82
@@ -136,6 +135,8 @@
 #define TUNER_PHILIPS_FQ1236_MK5	85	/* NTSC, TDA9885, no FM radio */
 #define TUNER_TENA_TNF_5337		86
 
+#define TUNER_XC4000			87	/* Xceive Silicon Tuner */
+
 /* tv card specific */
 #define TDA9887_PRESENT 		(1<<0)
 #define TDA9887_PORT1_INACTIVE 		(1<<1)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 15f4be7..a067d30 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1140,6 +1140,7 @@
  * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not
  *	search for IBSSs with a different BSSID.
  * @channel: The channel to use if no IBSS can be found to join.
+ * @channel_type: channel type (HT mode)
  * @channel_fixed: The channel should be fixed -- do not search for
  *	IBSSs to join on other channels.
  * @ie: information element(s) to include in the beacon
@@ -1978,6 +1979,11 @@
  *	configured as RX antennas. Antenna configuration commands will be
  *	rejected unless this or @available_antennas_tx is set.
  *
+ * @probe_resp_offload:
+ *	 Bitmap of supported protocols for probe response offloading.
+ *	 See &enum nl80211_probe_resp_offload_support_attr. Only valid
+ *	 when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
+ *
  * @max_remain_on_channel_duration: Maximum time a remain-on-channel operation
  *	may request, if implemented.
  *
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 5d1a758..6a3922f 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -857,7 +857,7 @@
 	enum fc_lport_state	       state;
 	unsigned long		       boot_time;
 	struct fc_host_statistics      host_stats;
-	struct fcoe_dev_stats	       *dev_stats;
+	struct fcoe_dev_stats __percpu *dev_stats;
 	u8			       retry_count;
 
 	/* Fabric information */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
new file mode 100644
index 0000000..4866499
--- /dev/null
+++ b/include/target/target_core_backend.h
@@ -0,0 +1,65 @@
+#ifndef TARGET_CORE_BACKEND_H
+#define TARGET_CORE_BACKEND_H
+
+#define TRANSPORT_PLUGIN_PHBA_PDEV		1
+#define TRANSPORT_PLUGIN_VHBA_PDEV		2
+#define TRANSPORT_PLUGIN_VHBA_VDEV		3
+
+struct se_subsystem_api {
+	struct list_head sub_api_list;
+
+	char name[16];
+	struct module *owner;
+
+	u8 transport_type;
+
+	unsigned int fua_write_emulated : 1;
+	unsigned int write_cache_emulated : 1;
+
+	int (*attach_hba)(struct se_hba *, u32);
+	void (*detach_hba)(struct se_hba *);
+	int (*pmode_enable_hba)(struct se_hba *, unsigned long);
+	void *(*allocate_virtdevice)(struct se_hba *, const char *);
+	struct se_device *(*create_virtdevice)(struct se_hba *,
+				struct se_subsystem_dev *, void *);
+	void (*free_device)(void *);
+	int (*transport_complete)(struct se_task *task);
+	struct se_task *(*alloc_task)(unsigned char *cdb);
+	int (*do_task)(struct se_task *);
+	int (*do_discard)(struct se_device *, sector_t, u32);
+	void (*do_sync_cache)(struct se_task *);
+	void (*free_task)(struct se_task *);
+	ssize_t (*check_configfs_dev_params)(struct se_hba *,
+			struct se_subsystem_dev *);
+	ssize_t (*set_configfs_dev_params)(struct se_hba *,
+			struct se_subsystem_dev *, const char *, ssize_t);
+	ssize_t (*show_configfs_dev_params)(struct se_hba *,
+			struct se_subsystem_dev *, char *);
+	u32 (*get_device_rev)(struct se_device *);
+	u32 (*get_device_type)(struct se_device *);
+	sector_t (*get_blocks)(struct se_device *);
+	unsigned char *(*get_sense_buffer)(struct se_task *);
+};
+
+int	transport_subsystem_register(struct se_subsystem_api *);
+void	transport_subsystem_release(struct se_subsystem_api *);
+
+struct se_device *transport_add_device_to_core_hba(struct se_hba *,
+		struct se_subsystem_api *, struct se_subsystem_dev *, u32,
+		void *, struct se_dev_limits *, const char *, const char *);
+
+void	transport_complete_sync_cache(struct se_cmd *, int);
+void	transport_complete_task(struct se_task *, int);
+
+void	target_get_task_cdb(struct se_task *, unsigned char *);
+
+void	transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
+int	transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
+int	transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
+int	transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
+
+/* core helpers also used by command snooping in pscsi */
+void	*transport_kmap_first_data_page(struct se_cmd *);
+void	transport_kunmap_first_data_page(struct se_cmd *);
+
+#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 6873c7d..daf532b 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -10,6 +10,7 @@
 #include <net/tcp.h>
 
 #define TARGET_CORE_MOD_VERSION		"v4.1.0-rc1-ml"
+#define TARGET_CORE_VERSION		TARGET_CORE_MOD_VERSION
 
 /* Maximum Number of LUNs per Target Portal Group */
 /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
@@ -34,6 +35,7 @@
 #define TRANSPORT_SENSE_BUFFER			SCSI_SENSE_BUFFERSIZE
 /* Used by transport_send_check_condition_and_sense() */
 #define SPC_SENSE_KEY_OFFSET			2
+#define SPC_ADD_SENSE_LEN_OFFSET		7
 #define SPC_ASC_KEY_OFFSET			12
 #define SPC_ASCQ_KEY_OFFSET			13
 #define TRANSPORT_IQN_LEN			224
@@ -53,6 +55,72 @@
 /* Used by transport_get_inquiry_vpd_device_ident() */
 #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN	254
 
+/* Attempts before moving from SHORT to LONG */
+#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD	3
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT	3  /* In milliseconds */
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG	10 /* In milliseconds */
+
+#define PYX_TRANSPORT_STATUS_INTERVAL		5 /* In seconds */
+
+/*
+ * struct se_subsystem_dev->su_dev_flags
+*/
+#define SDF_FIRMWARE_VPD_UNIT_SERIAL		0x00000001
+#define SDF_EMULATED_VPD_UNIT_SERIAL		0x00000002
+#define SDF_USING_UDEV_PATH			0x00000004
+#define SDF_USING_ALIAS				0x00000008
+
+/*
+ * struct se_device->dev_flags
+ */
+#define DF_READ_ONLY				0x00000001
+#define DF_SPC2_RESERVATIONS			0x00000002
+#define DF_SPC2_RESERVATIONS_WITH_ISID		0x00000004
+
+/* struct se_dev_attrib sanity values */
+/* Default max_unmap_lba_count */
+#define DA_MAX_UNMAP_LBA_COUNT			0
+/* Default max_unmap_block_desc_count */
+#define DA_MAX_UNMAP_BLOCK_DESC_COUNT		0
+/* Default unmap_granularity */
+#define DA_UNMAP_GRANULARITY_DEFAULT		0
+/* Default unmap_granularity_alignment */
+#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT	0
+/* Emulation for Direct Page Out */
+#define DA_EMULATE_DPO				0
+/* Emulation for Forced Unit Access WRITEs */
+#define DA_EMULATE_FUA_WRITE			1
+/* Emulation for Forced Unit Access READs */
+#define DA_EMULATE_FUA_READ			0
+/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
+#define DA_EMULATE_WRITE_CACHE			0
+/* Emulation for UNIT ATTENTION Interlock Control */
+#define DA_EMULATE_UA_INTLLCK_CTRL		0
+/* Emulation for TASK_ABORTED status (TAS) by default */
+#define DA_EMULATE_TAS				1
+/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
+#define DA_EMULATE_TPU				0
+/*
+ * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
+ * block/blk-lib.c:blkdev_issue_discard()
+ */
+#define DA_EMULATE_TPWS				0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_RESERVATIONS			0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_ALUA				0
+/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
+#define DA_ENFORCE_PR_ISIDS			1
+#define DA_STATUS_MAX_SECTORS_MIN		16
+#define DA_STATUS_MAX_SECTORS_MAX		8192
+/* By default don't report non-rotating (solid state) medium */
+#define DA_IS_NONROT				0
+/* Queue Algorithm Modifier default for restricted reordering in control mode page */
+#define DA_EMULATE_REST_REORD			0
+
+#define SE_MODE_PAGE_BUF			512
+
+
 /* struct se_hba->hba_flags */
 enum hba_flags_table {
 	HBA_FLAGS_INTERNAL_USE	= 0x01,
@@ -71,11 +139,12 @@
 	TRANSPORT_TPG_TYPE_DISCOVERY = 1,
 };
 
-/* Used for generate timer flags */
+/* struct se_task->task_flags */
 enum se_task_flags {
 	TF_ACTIVE		= (1 << 0),
 	TF_SENT			= (1 << 1),
 	TF_REQUEST_STOP		= (1 << 2),
+	TF_HAS_SENSE		= (1 << 3),
 };
 
 /* Special transport agnostic struct se_cmd->t_states */
@@ -158,9 +227,38 @@
 	TCM_RESERVATION_CONFLICT		= 0x10,
 };
 
+enum target_sc_flags_table {
+	TARGET_SCF_BIDI_OP		= 0x01,
+	TARGET_SCF_ACK_KREF		= 0x02,
+};
+
+/* fabric independent task management function values */
+enum tcm_tmreq_table {
+	TMR_ABORT_TASK		= 1,
+	TMR_ABORT_TASK_SET	= 2,
+	TMR_CLEAR_ACA		= 3,
+	TMR_CLEAR_TASK_SET	= 4,
+	TMR_LUN_RESET		= 5,
+	TMR_TARGET_WARM_RESET	= 6,
+	TMR_TARGET_COLD_RESET	= 7,
+	TMR_FABRIC_TMR		= 255,
+};
+
+/* fabric independent task management response values */
+enum tcm_tmrsp_table {
+	TMR_FUNCTION_COMPLETE		= 0,
+	TMR_TASK_DOES_NOT_EXIST		= 1,
+	TMR_LUN_DOES_NOT_EXIST		= 2,
+	TMR_TASK_STILL_ALLEGIANT	= 3,
+	TMR_TASK_FAILOVER_NOT_SUPPORTED	= 4,
+	TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED	= 5,
+	TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
+	TMR_FUNCTION_REJECTED		= 255,
+};
+
 struct se_obj {
 	atomic_t obj_access_count;
-} ____cacheline_aligned;
+};
 
 /*
  * Used by TCM Core internally to signal if ALUA emulation is enabled or
@@ -207,7 +305,7 @@
 	struct config_group alua_tg_pt_gps_group;
 	int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
 	struct list_head tg_pt_gps_list;
-} ____cacheline_aligned;
+};
 
 struct t10_alua_lu_gp {
 	u16	lu_gp_id;
@@ -218,7 +316,7 @@
 	struct config_group lu_gp_group;
 	struct list_head lu_gp_node;
 	struct list_head lu_gp_mem_list;
-} ____cacheline_aligned;
+};
 
 struct t10_alua_lu_gp_member {
 	bool lu_gp_assoc;
@@ -227,7 +325,7 @@
 	struct t10_alua_lu_gp *lu_gp;
 	struct se_device *lu_gp_mem_dev;
 	struct list_head lu_gp_mem_list;
-} ____cacheline_aligned;
+};
 
 struct t10_alua_tg_pt_gp {
 	u16	tg_pt_gp_id;
@@ -250,7 +348,7 @@
 	struct config_group tg_pt_gp_group;
 	struct list_head tg_pt_gp_list;
 	struct list_head tg_pt_gp_mem_list;
-} ____cacheline_aligned;
+};
 
 struct t10_alua_tg_pt_gp_member {
 	bool tg_pt_gp_assoc;
@@ -259,7 +357,7 @@
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
 	struct se_port *tg_pt;
 	struct list_head tg_pt_gp_mem_list;
-} ____cacheline_aligned;
+};
 
 struct t10_vpd {
 	unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
@@ -269,7 +367,7 @@
 	u32 association;
 	u32 device_identifier_type;
 	struct list_head vpd_list;
-} ____cacheline_aligned;
+};
 
 struct t10_wwn {
 	char vendor[8];
@@ -280,7 +378,7 @@
 	struct se_subsystem_dev *t10_sub_dev;
 	struct config_group t10_wwn_group;
 	struct list_head t10_vpd_list;
-} ____cacheline_aligned;
+};
 
 
 /*
@@ -333,7 +431,7 @@
 	struct list_head pr_reg_aptpl_list;
 	struct list_head pr_reg_atp_list;
 	struct list_head pr_reg_atp_mem_list;
-} ____cacheline_aligned;
+};
 
 /*
  * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
@@ -374,20 +472,20 @@
 	struct list_head registration_list;
 	struct list_head aptpl_reg_list;
 	struct t10_reservation_ops pr_ops;
-} ____cacheline_aligned;
+};
 
 struct se_queue_req {
 	int			state;
 	struct se_cmd		*cmd;
 	struct list_head	qr_list;
-} ____cacheline_aligned;
+};
 
 struct se_queue_obj {
 	atomic_t		queue_cnt;
 	spinlock_t		cmd_queue_lock;
 	struct list_head	qobj_list;
 	wait_queue_head_t	thread_wq;
-} ____cacheline_aligned;
+};
 
 struct se_task {
 	unsigned long long	task_lba;
@@ -397,16 +495,14 @@
 	struct scatterlist	*task_sg;
 	u32			task_sg_nents;
 	u16			task_flags;
-	u8			task_sense;
 	u8			task_scsi_status;
-	int			task_error_status;
 	enum dma_data_direction	task_data_direction;
-	atomic_t		task_state_active;
 	struct list_head	t_list;
 	struct list_head	t_execute_list;
 	struct list_head	t_state_list;
+	bool			t_state_active;
 	struct completion	task_stop_comp;
-} ____cacheline_aligned;
+};
 
 struct se_cmd {
 	/* SAM response code being sent to initiator */
@@ -451,6 +547,7 @@
 	struct list_head	se_queue_node;
 	struct list_head	se_cmd_list;
 	struct completion	cmd_wait_comp;
+	struct kref		cmd_kref;
 	struct target_core_fabric_ops *se_tfo;
 	int (*execute_task)(struct se_task *);
 	void (*transport_complete_callback)(struct se_cmd *);
@@ -492,7 +589,7 @@
 	struct list_head	t_task_list;
 	u32			t_task_list_num;
 
-} ____cacheline_aligned;
+};
 
 struct se_tmr_req {
 	/* Task Management function to be preformed */
@@ -510,7 +607,7 @@
 	struct se_device	*tmr_dev;
 	struct se_lun		*tmr_lun;
 	struct list_head	tmr_list;
-} ____cacheline_aligned;
+};
 
 struct se_ua {
 	u8			ua_asc;
@@ -518,7 +615,7 @@
 	struct se_node_acl	*ua_nacl;
 	struct list_head	ua_dev_list;
 	struct list_head	ua_nacl_list;
-} ____cacheline_aligned;
+};
 
 struct se_node_acl {
 	char			initiatorname[TRANSPORT_IQN_LEN];
@@ -545,7 +642,7 @@
 	struct config_group	*acl_default_groups[5];
 	struct list_head	acl_list;
 	struct list_head	acl_sess_list;
-} ____cacheline_aligned;
+};
 
 struct se_session {
 	unsigned		sess_tearing_down:1;
@@ -558,7 +655,7 @@
 	struct list_head	sess_cmd_list;
 	struct list_head	sess_wait_list;
 	spinlock_t		sess_cmd_lock;
-} ____cacheline_aligned;
+};
 
 struct se_device;
 struct se_transform_info;
@@ -578,7 +675,7 @@
 	struct list_head	lacl_list;
 	struct config_group	se_lun_group;
 	struct se_ml_stat_grps	ml_stat_grps;
-}  ____cacheline_aligned;
+};
 
 struct se_dev_entry {
 	bool			def_pr_registered;
@@ -603,7 +700,7 @@
 	struct se_lun		*se_lun;
 	struct list_head	alua_port_list;
 	struct list_head	ua_list;
-}  ____cacheline_aligned;
+};
 
 struct se_dev_limits {
 	/* Max supported HW queue depth */
@@ -612,7 +709,7 @@
 	u32		queue_depth;
 	/* From include/linux/blkdev.h for the other HW/SW limits. */
 	struct queue_limits limits;
-} ____cacheline_aligned;
+};
 
 struct se_dev_attrib {
 	int		emulate_dpo;
@@ -641,7 +738,7 @@
 	u32		unmap_granularity_alignment;
 	struct se_subsystem_dev *da_sub_dev;
 	struct config_group da_group;
-} ____cacheline_aligned;
+};
 
 struct se_dev_stat_grps {
 	struct config_group stat_group;
@@ -674,7 +771,7 @@
 	struct config_group se_dev_pr_group;
 	/* For target_core_stat.c groups */
 	struct se_dev_stat_grps dev_stat_grps;
-} ____cacheline_aligned;
+};
 
 struct se_device {
 	/* RELATIVE TARGET PORT IDENTIFER Counter */
@@ -685,7 +782,6 @@
 	u32			dev_port_count;
 	/* See transport_device_status_table */
 	u32			dev_status;
-	u32			dev_tcq_window_closed;
 	/* Physical device queue depth */
 	u32			queue_depth;
 	/* Used for SPC-2 reservations enforce of ISIDs */
@@ -702,7 +798,6 @@
 	spinlock_t		stats_lock;
 	/* Active commands on this virtual SE device */
 	atomic_t		simple_cmds;
-	atomic_t		depth_left;
 	atomic_t		dev_ordered_id;
 	atomic_t		execute_tasks;
 	atomic_t		dev_ordered_sync;
@@ -740,7 +835,7 @@
 	struct se_subsystem_api *transport;
 	/* Linked list for struct se_hba struct se_device list */
 	struct list_head	dev_list;
-}  ____cacheline_aligned;
+};
 
 struct se_hba {
 	u16			hba_tpgt;
@@ -759,7 +854,7 @@
 	struct config_group	hba_group;
 	struct mutex		hba_access_mutex;
 	struct se_subsystem_api *transport;
-}  ____cacheline_aligned;
+};
 
 struct se_port_stat_grps {
 	struct config_group stat_group;
@@ -785,13 +880,13 @@
 	struct se_port		*lun_sep;
 	struct config_group	lun_group;
 	struct se_port_stat_grps port_stat_grps;
-} ____cacheline_aligned;
+};
 
 struct scsi_port_stats {
        u64     cmd_pdus;
        u64     tx_data_octets;
        u64     rx_data_octets;
-} ____cacheline_aligned;
+};
 
 struct se_port {
 	/* RELATIVE TARGET PORT IDENTIFER */
@@ -811,12 +906,12 @@
 	struct se_portal_group *sep_tpg;
 	struct list_head sep_alua_list;
 	struct list_head sep_list;
-} ____cacheline_aligned;
+};
 
 struct se_tpg_np {
 	struct se_portal_group *tpg_np_parent;
 	struct config_group	tpg_np_group;
-} ____cacheline_aligned;
+};
 
 struct se_portal_group {
 	/* Type of target portal group, see transport_tpg_type_table */
@@ -849,13 +944,13 @@
 	struct config_group	tpg_acl_group;
 	struct config_group	tpg_attrib_group;
 	struct config_group	tpg_param_group;
-} ____cacheline_aligned;
+};
 
 struct se_wwn {
 	struct target_fabric_configfs *wwn_tf;
 	struct config_group	wwn_group;
 	struct config_group	*wwn_default_groups[2];
 	struct config_group	fabric_stat_group;
-} ____cacheline_aligned;
+};
 
 #endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
deleted file mode 100644
index 2be31ff..0000000
--- a/include/target/target_core_device.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#ifndef TARGET_CORE_DEVICE_H
-#define TARGET_CORE_DEVICE_H
-
-extern int transport_lookup_cmd_lun(struct se_cmd *, u32);
-extern int transport_lookup_tmr_lun(struct se_cmd *, u32);
-extern struct se_dev_entry *core_get_se_deve_from_rtpi(
-					struct se_node_acl *, u16);
-extern int core_free_device_list_for_node(struct se_node_acl *,
-					struct se_portal_group *);
-extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
-extern void core_update_device_list_access(u32, u32, struct se_node_acl *);
-extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32,
-					u32, struct se_node_acl *,
-					struct se_portal_group *, int);
-extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
-extern int core_dev_export(struct se_device *, struct se_portal_group *,
-					struct se_lun *);
-extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
-					struct se_lun *);
-extern int target_report_luns(struct se_task *);
-extern void se_release_device_for_hba(struct se_device *);
-extern void se_release_vpd_for_dev(struct se_device *);
-extern void se_clear_dev_ports(struct se_device *);
-extern int se_free_virtual_device(struct se_device *, struct se_hba *);
-extern int se_dev_check_online(struct se_device *);
-extern int se_dev_check_shutdown(struct se_device *);
-extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
-extern int se_dev_set_task_timeout(struct se_device *, u32);
-extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
-extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
-extern int se_dev_set_unmap_granularity(struct se_device *, u32);
-extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
-extern int se_dev_set_emulate_dpo(struct se_device *, int);
-extern int se_dev_set_emulate_fua_write(struct se_device *, int);
-extern int se_dev_set_emulate_fua_read(struct se_device *, int);
-extern int se_dev_set_emulate_write_cache(struct se_device *, int);
-extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
-extern int se_dev_set_emulate_tas(struct se_device *, int);
-extern int se_dev_set_emulate_tpu(struct se_device *, int);
-extern int se_dev_set_emulate_tpws(struct se_device *, int);
-extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
-extern int se_dev_set_is_nonrot(struct se_device *, int);
-extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
-extern int se_dev_set_queue_depth(struct se_device *, u32);
-extern int se_dev_set_max_sectors(struct se_device *, u32);
-extern int se_dev_set_optimal_sectors(struct se_device *, u32);
-extern int se_dev_set_block_size(struct se_device *, u32);
-extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
-					struct se_device *, u32);
-extern int core_dev_del_lun(struct se_portal_group *, u32);
-extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
-extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
-							u32, char *, int *);
-extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
-						struct se_lun_acl *, u32, u32);
-extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
-						struct se_lun *, struct se_lun_acl *);
-extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
-						struct se_lun_acl *lacl);
-extern int core_dev_setup_virtual_lun0(void);
-extern void core_dev_release_virtual_lun0(void);
-
-#endif /* TARGET_CORE_DEVICE_H */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
new file mode 100644
index 0000000..523e8bc
--- /dev/null
+++ b/include/target/target_core_fabric.h
@@ -0,0 +1,187 @@
+#ifndef TARGET_CORE_FABRIC_H
+#define TARGET_CORE_FABRIC_H
+
+struct target_core_fabric_ops {
+	struct configfs_subsystem *tf_subsys;
+	/*
+	 * Optional to signal struct se_task->task_sg[] padding entries
+	 * for scatterlist chaining using transport_do_task_sg_link(),
+	 * disabled by default
+	 */
+	bool task_sg_chaining;
+	char *(*get_fabric_name)(void);
+	u8 (*get_fabric_proto_ident)(struct se_portal_group *);
+	char *(*tpg_get_wwn)(struct se_portal_group *);
+	u16 (*tpg_get_tag)(struct se_portal_group *);
+	u32 (*tpg_get_default_depth)(struct se_portal_group *);
+	u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
+				struct se_node_acl *,
+				struct t10_pr_registration *, int *,
+				unsigned char *);
+	u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
+				struct se_node_acl *,
+				struct t10_pr_registration *, int *);
+	char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
+				const char *, u32 *, char **);
+	int (*tpg_check_demo_mode)(struct se_portal_group *);
+	int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
+	int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
+	int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
+	/*
+	 * Optionally used by fabrics to allow demo-mode login, but not
+	 * expose any TPG LUNs, and return 'not connected' in standard
+	 * inquiry response
+	 */
+	int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
+	struct se_node_acl *(*tpg_alloc_fabric_acl)(
+					struct se_portal_group *);
+	void (*tpg_release_fabric_acl)(struct se_portal_group *,
+					struct se_node_acl *);
+	u32 (*tpg_get_inst_index)(struct se_portal_group *);
+	/*
+	 * Optional function pointer for TCM to perform command map
+	 * from TCM processing thread context, for those struct se_cmd
+	 * initially allocated in interrupt context.
+	 */
+	int (*new_cmd_map)(struct se_cmd *);
+	/*
+	 * Optional to release struct se_cmd and fabric dependent allocated
+	 * I/O descriptor in transport_cmd_check_stop().
+	 *
+	 * Returning 1 will signal a descriptor has been released.
+	 * Returning 0 will signal a descriptor has not been released.
+	 */
+	int (*check_stop_free)(struct se_cmd *);
+	void (*release_cmd)(struct se_cmd *);
+	/*
+	 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
+	 */
+	int (*shutdown_session)(struct se_session *);
+	void (*close_session)(struct se_session *);
+	void (*stop_session)(struct se_session *, int, int);
+	void (*fall_back_to_erl0)(struct se_session *);
+	int (*sess_logged_in)(struct se_session *);
+	u32 (*sess_get_index)(struct se_session *);
+	/*
+	 * Used only for SCSI fabrics that contain multi-value TransportIDs
+	 * (like iSCSI).  All other SCSI fabrics should set this to NULL.
+	 */
+	u32 (*sess_get_initiator_sid)(struct se_session *,
+				      unsigned char *, u32);
+	int (*write_pending)(struct se_cmd *);
+	int (*write_pending_status)(struct se_cmd *);
+	void (*set_default_node_attributes)(struct se_node_acl *);
+	u32 (*get_task_tag)(struct se_cmd *);
+	int (*get_cmd_state)(struct se_cmd *);
+	int (*queue_data_in)(struct se_cmd *);
+	int (*queue_status)(struct se_cmd *);
+	int (*queue_tm_rsp)(struct se_cmd *);
+	u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
+	u16 (*get_fabric_sense_len)(void);
+	int (*is_state_remove)(struct se_cmd *);
+	/*
+	 * fabric module calls for target_core_fabric_configfs.c
+	 */
+	struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
+				struct config_group *, const char *);
+	void (*fabric_drop_wwn)(struct se_wwn *);
+	struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
+				struct config_group *, const char *);
+	void (*fabric_drop_tpg)(struct se_portal_group *);
+	int (*fabric_post_link)(struct se_portal_group *,
+				struct se_lun *);
+	void (*fabric_pre_unlink)(struct se_portal_group *,
+				struct se_lun *);
+	struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
+				struct config_group *, const char *);
+	void (*fabric_drop_np)(struct se_tpg_np *);
+	struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
+				struct config_group *, const char *);
+	void (*fabric_drop_nodeacl)(struct se_node_acl *);
+};
+
+struct se_session *transport_init_session(void);
+void	__transport_register_session(struct se_portal_group *,
+		struct se_node_acl *, struct se_session *, void *);
+void	transport_register_session(struct se_portal_group *,
+		struct se_node_acl *, struct se_session *, void *);
+void	transport_free_session(struct se_session *);
+void	transport_deregister_session_configfs(struct se_session *);
+void	transport_deregister_session(struct se_session *);
+
+
+void	transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
+		struct se_session *, u32, int, int, unsigned char *);
+int	transport_lookup_cmd_lun(struct se_cmd *, u32);
+int	transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
+int	target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
+		unsigned char *, u32, u32, int, int, int);
+int	transport_handle_cdb_direct(struct se_cmd *);
+int	transport_generic_handle_cdb_map(struct se_cmd *);
+int	transport_generic_handle_data(struct se_cmd *);
+int	transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
+		struct scatterlist *, u32, struct scatterlist *, u32);
+void	transport_do_task_sg_chain(struct se_cmd *);
+int	transport_generic_new_cmd(struct se_cmd *);
+
+void	transport_generic_process_write(struct se_cmd *);
+
+void	transport_generic_free_cmd(struct se_cmd *, int);
+
+bool	transport_wait_for_tasks(struct se_cmd *);
+int	transport_check_aborted_status(struct se_cmd *, int);
+int	transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
+
+void	target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
+int	target_put_sess_cmd(struct se_session *, struct se_cmd *);
+void	target_splice_sess_cmd_list(struct se_session *);
+void	target_wait_for_sess_cmds(struct se_session *, int);
+
+int	core_alua_check_nonop_delay(struct se_cmd *);
+
+struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
+void	core_tmr_release_req(struct se_tmr_req *);
+int	transport_generic_handle_tmr(struct se_cmd *);
+int	transport_lookup_tmr_lun(struct se_cmd *, u32);
+
+struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
+		unsigned char *);
+void	core_tpg_clear_object_luns(struct se_portal_group *);
+struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *,
+		struct se_node_acl *, const char *, u32);
+int	core_tpg_del_initiator_node_acl(struct se_portal_group *,
+		struct se_node_acl *, int);
+int	core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
+		unsigned char *, u32, int);
+int	core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
+		struct se_portal_group *, void *, int);
+int	core_tpg_deregister(struct se_portal_group *);
+
+/* SAS helpers */
+u8	sas_get_fabric_proto_ident(struct se_portal_group *);
+u32	sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+		struct t10_pr_registration *, int *, unsigned char *);
+u32	sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+		struct t10_pr_registration *, int *);
+char	*sas_parse_pr_out_transport_id(struct se_portal_group *, const char *,
+		u32 *, char **);
+
+/* FC helpers */
+u8	fc_get_fabric_proto_ident(struct se_portal_group *);
+u32	fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+		struct t10_pr_registration *, int *, unsigned char *);
+u32	fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+		struct t10_pr_registration *, int *);
+char	*fc_parse_pr_out_transport_id(struct se_portal_group *, const char *,
+		u32 *, char **);
+
+/* iSCSI helpers */
+u8	iscsi_get_fabric_proto_ident(struct se_portal_group *);
+u32	iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+		struct t10_pr_registration *, int *, unsigned char *);
+u32	iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+		struct t10_pr_registration *, int *);
+char	*iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
+		u32 *, char **);
+
+#endif /* TARGET_CORE_FABRICH */
diff --git a/include/target/target_core_fabric_lib.h b/include/target/target_core_fabric_lib.h
deleted file mode 100644
index c2f8d0e..0000000
--- a/include/target/target_core_fabric_lib.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef TARGET_CORE_FABRIC_LIB_H
-#define TARGET_CORE_FABRIC_LIB_H
-
-extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
-extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
-			struct t10_pr_registration *, int *, unsigned char *);
-extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
-			struct t10_pr_registration *, int *);
-extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
-			const char *, u32 *, char **);
-
-extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
-extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
-			struct t10_pr_registration *, int *, unsigned char *);
-extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
-			struct t10_pr_registration *, int *);
-extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
-			const char *, u32 *, char **);
-
-extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
-extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
-			struct t10_pr_registration *, int *, unsigned char *);
-extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
-			struct t10_pr_registration *, int *);
-extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
-			const char *, u32 *, char **);
-
-#endif /* TARGET_CORE_FABRIC_LIB_H */
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
deleted file mode 100644
index 0256825..0000000
--- a/include/target/target_core_fabric_ops.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* Defined in target_core_configfs.h */
-struct target_fabric_configfs;
-
-struct target_core_fabric_ops {
-	struct configfs_subsystem *tf_subsys;
-	/*
-	 * Optional to signal struct se_task->task_sg[] padding entries
-	 * for scatterlist chaining using transport_do_task_sg_link(),
-	 * disabled by default
-	 */
-	bool task_sg_chaining;
-	char *(*get_fabric_name)(void);
-	u8 (*get_fabric_proto_ident)(struct se_portal_group *);
-	char *(*tpg_get_wwn)(struct se_portal_group *);
-	u16 (*tpg_get_tag)(struct se_portal_group *);
-	u32 (*tpg_get_default_depth)(struct se_portal_group *);
-	u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
-				struct se_node_acl *,
-				struct t10_pr_registration *, int *,
-				unsigned char *);
-	u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
-				struct se_node_acl *,
-				struct t10_pr_registration *, int *);
-	char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
-				const char *, u32 *, char **);
-	int (*tpg_check_demo_mode)(struct se_portal_group *);
-	int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
-	int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
-	int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
-	/*
-	 * Optionally used by fabrics to allow demo-mode login, but not
-	 * expose any TPG LUNs, and return 'not connected' in standard
-	 * inquiry response
-	 */
-	int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
-	struct se_node_acl *(*tpg_alloc_fabric_acl)(
-					struct se_portal_group *);
-	void (*tpg_release_fabric_acl)(struct se_portal_group *,
-					struct se_node_acl *);
-	u32 (*tpg_get_inst_index)(struct se_portal_group *);
-	/*
-	 * Optional function pointer for TCM to perform command map
-	 * from TCM processing thread context, for those struct se_cmd
-	 * initially allocated in interrupt context.
-	 */
-	int (*new_cmd_map)(struct se_cmd *);
-	/*
-	 * Optional to release struct se_cmd and fabric dependent allocated
-	 * I/O descriptor in transport_cmd_check_stop().
-	 *
-	 * Returning 1 will signal a descriptor has been released.
-	 * Returning 0 will signal a descriptor has not been released.
-	 */
-	int (*check_stop_free)(struct se_cmd *);
-	/*
-	 * Optional check for active I/O shutdown
-	 */
-	int (*check_release_cmd)(struct se_cmd *);
-	void (*release_cmd)(struct se_cmd *);
-	/*
-	 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
-	 */
-	int (*shutdown_session)(struct se_session *);
-	void (*close_session)(struct se_session *);
-	void (*stop_session)(struct se_session *, int, int);
-	void (*fall_back_to_erl0)(struct se_session *);
-	int (*sess_logged_in)(struct se_session *);
-	u32 (*sess_get_index)(struct se_session *);
-	/*
-	 * Used only for SCSI fabrics that contain multi-value TransportIDs
-	 * (like iSCSI).  All other SCSI fabrics should set this to NULL.
-	 */
-	u32 (*sess_get_initiator_sid)(struct se_session *,
-				      unsigned char *, u32);
-	int (*write_pending)(struct se_cmd *);
-	int (*write_pending_status)(struct se_cmd *);
-	void (*set_default_node_attributes)(struct se_node_acl *);
-	u32 (*get_task_tag)(struct se_cmd *);
-	int (*get_cmd_state)(struct se_cmd *);
-	int (*queue_data_in)(struct se_cmd *);
-	int (*queue_status)(struct se_cmd *);
-	int (*queue_tm_rsp)(struct se_cmd *);
-	u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
-	u16 (*get_fabric_sense_len)(void);
-	int (*is_state_remove)(struct se_cmd *);
-	/*
-	 * fabric module calls for target_core_fabric_configfs.c
-	 */
-	struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
-				struct config_group *, const char *);
-	void (*fabric_drop_wwn)(struct se_wwn *);
-	struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
-				struct config_group *, const char *);
-	void (*fabric_drop_tpg)(struct se_portal_group *);
-	int (*fabric_post_link)(struct se_portal_group *,
-				struct se_lun *);
-	void (*fabric_pre_unlink)(struct se_portal_group *,
-				struct se_lun *);
-	struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
-				struct config_group *, const char *);
-	void (*fabric_drop_np)(struct se_tpg_np *);
-	struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
-				struct config_group *, const char *);
-	void (*fabric_drop_nodeacl)(struct se_node_acl *);
-};
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
deleted file mode 100644
index d5876e1..0000000
--- a/include/target/target_core_tmr.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef TARGET_CORE_TMR_H
-#define TARGET_CORE_TMR_H
-
-/* fabric independent task management function values */
-enum tcm_tmreq_table {
-	TMR_ABORT_TASK		= 1,
-	TMR_ABORT_TASK_SET	= 2,
-	TMR_CLEAR_ACA		= 3,
-	TMR_CLEAR_TASK_SET	= 4,
-	TMR_LUN_RESET		= 5,
-	TMR_TARGET_WARM_RESET	= 6,
-	TMR_TARGET_COLD_RESET	= 7,
-	TMR_FABRIC_TMR		= 255,
-};
-
-/* fabric independent task management response values */
-enum tcm_tmrsp_table {
-	TMR_FUNCTION_COMPLETE		= 0,
-	TMR_TASK_DOES_NOT_EXIST		= 1,
-	TMR_LUN_DOES_NOT_EXIST		= 2,
-	TMR_TASK_STILL_ALLEGIANT	= 3,
-	TMR_TASK_FAILOVER_NOT_SUPPORTED	= 4,
-	TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED	= 5,
-	TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
-	TMR_FUNCTION_REJECTED		= 255,
-};
-
-extern struct kmem_cache *se_tmr_req_cache;
-
-extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
-extern void core_tmr_release_req(struct se_tmr_req *);
-extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
-				struct list_head *, struct se_cmd *);
-
-#endif /* TARGET_CORE_TMR_H */
diff --git a/include/target/target_core_tpg.h b/include/target/target_core_tpg.h
deleted file mode 100644
index 77e1872..0000000
--- a/include/target/target_core_tpg.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef TARGET_CORE_TPG_H
-#define TARGET_CORE_TPG_H
-
-extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
-						const char *);
-extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
-						unsigned char *);
-extern void core_tpg_add_node_to_devs(struct se_node_acl *,
-						struct se_portal_group *);
-extern struct se_node_acl *core_tpg_check_initiator_node_acl(
-						struct se_portal_group *,
-						unsigned char *);
-extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
-extern void core_tpg_wait_for_mib_ref(struct se_node_acl *);
-extern void core_tpg_clear_object_luns(struct se_portal_group *);
-extern struct se_node_acl *core_tpg_add_initiator_node_acl(
-					struct se_portal_group *,
-					struct se_node_acl *,
-					const char *, u32);
-extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
-						struct se_node_acl *, int);
-extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
-						unsigned char *, u32, int);
-extern int core_tpg_register(struct target_core_fabric_ops *,
-					struct se_wwn *,
-					struct se_portal_group *, void *,
-					int);
-extern int core_tpg_deregister(struct se_portal_group *);
-extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
-extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32,
-				void *);
-extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
-extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
-
-#endif /* TARGET_CORE_TPG_H */
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
deleted file mode 100644
index dac4f2d..0000000
--- a/include/target/target_core_transport.h
+++ /dev/null
@@ -1,287 +0,0 @@
-#ifndef TARGET_CORE_TRANSPORT_H
-#define TARGET_CORE_TRANSPORT_H
-
-#define TARGET_CORE_VERSION			TARGET_CORE_MOD_VERSION
-
-/* Attempts before moving from SHORT to LONG */
-#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD	3
-#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT	3  /* In milliseconds */
-#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG	10 /* In milliseconds */
-
-#define PYX_TRANSPORT_STATUS_INTERVAL		5 /* In seconds */
-
-#define TRANSPORT_PLUGIN_PHBA_PDEV		1
-#define TRANSPORT_PLUGIN_VHBA_PDEV		2
-#define TRANSPORT_PLUGIN_VHBA_VDEV		3
-
-/*
- * struct se_subsystem_dev->su_dev_flags
-*/
-#define SDF_FIRMWARE_VPD_UNIT_SERIAL		0x00000001
-#define SDF_EMULATED_VPD_UNIT_SERIAL		0x00000002
-#define SDF_USING_UDEV_PATH			0x00000004
-#define SDF_USING_ALIAS				0x00000008
-
-/*
- * struct se_device->dev_flags
- */
-#define DF_READ_ONLY				0x00000001
-#define DF_SPC2_RESERVATIONS			0x00000002
-#define DF_SPC2_RESERVATIONS_WITH_ISID		0x00000004
-
-/* struct se_dev_attrib sanity values */
-/* Default max_unmap_lba_count */
-#define DA_MAX_UNMAP_LBA_COUNT			0
-/* Default max_unmap_block_desc_count */
-#define DA_MAX_UNMAP_BLOCK_DESC_COUNT		0
-/* Default unmap_granularity */
-#define DA_UNMAP_GRANULARITY_DEFAULT		0
-/* Default unmap_granularity_alignment */
-#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT	0
-/* Emulation for Direct Page Out */
-#define DA_EMULATE_DPO				0
-/* Emulation for Forced Unit Access WRITEs */
-#define DA_EMULATE_FUA_WRITE			1
-/* Emulation for Forced Unit Access READs */
-#define DA_EMULATE_FUA_READ			0
-/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
-#define DA_EMULATE_WRITE_CACHE			0
-/* Emulation for UNIT ATTENTION Interlock Control */
-#define DA_EMULATE_UA_INTLLCK_CTRL		0
-/* Emulation for TASK_ABORTED status (TAS) by default */
-#define DA_EMULATE_TAS				1
-/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
-#define DA_EMULATE_TPU				0
-/*
- * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
- * block/blk-lib.c:blkdev_issue_discard()
- */
-#define DA_EMULATE_TPWS				0
-/* No Emulation for PSCSI by default */
-#define DA_EMULATE_RESERVATIONS			0
-/* No Emulation for PSCSI by default */
-#define DA_EMULATE_ALUA				0
-/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
-#define DA_ENFORCE_PR_ISIDS			1
-#define DA_STATUS_MAX_SECTORS_MIN		16
-#define DA_STATUS_MAX_SECTORS_MAX		8192
-/* By default don't report non-rotating (solid state) medium */
-#define DA_IS_NONROT				0
-/* Queue Algorithm Modifier default for restricted reordering in control mode page */
-#define DA_EMULATE_REST_REORD			0
-
-#define SE_MODE_PAGE_BUF			512
-
-#define MOD_MAX_SECTORS(ms, bs)			(ms % (PAGE_SIZE / bs))
-
-struct se_subsystem_api;
-
-extern int init_se_kmem_caches(void);
-extern void release_se_kmem_caches(void);
-extern u32 scsi_get_new_index(scsi_index_t);
-extern void transport_init_queue_obj(struct se_queue_obj *);
-extern void transport_subsystem_check_init(void);
-extern int transport_subsystem_register(struct se_subsystem_api *);
-extern void transport_subsystem_release(struct se_subsystem_api *);
-extern void transport_load_plugins(void);
-extern struct se_session *transport_init_session(void);
-extern void __transport_register_session(struct se_portal_group *,
-					struct se_node_acl *,
-					struct se_session *, void *);
-extern void transport_register_session(struct se_portal_group *,
-					struct se_node_acl *,
-					struct se_session *, void *);
-extern void transport_free_session(struct se_session *);
-extern void transport_deregister_session_configfs(struct se_session *);
-extern void transport_deregister_session(struct se_session *);
-extern void transport_cmd_finish_abort(struct se_cmd *, int);
-extern void transport_complete_sync_cache(struct se_cmd *, int);
-extern void transport_complete_task(struct se_task *, int);
-extern void transport_add_task_to_execute_queue(struct se_task *,
-						struct se_task *,
-						struct se_device *);
-extern void transport_remove_task_from_execute_queue(struct se_task *,
-						struct se_device *);
-extern void __transport_remove_task_from_execute_queue(struct se_task *,
-						struct se_device *);
-unsigned char *transport_dump_cmd_direction(struct se_cmd *);
-extern void transport_dump_dev_state(struct se_device *, char *, int *);
-extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
-					unsigned long long, char *, int *);
-extern void transport_dump_vpd_proto_id(struct t10_vpd *,
-					unsigned char *, int);
-extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
-extern int transport_dump_vpd_assoc(struct t10_vpd *,
-					unsigned char *, int);
-extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
-extern int transport_dump_vpd_ident_type(struct t10_vpd *,
-					unsigned char *, int);
-extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
-extern int transport_dump_vpd_ident(struct t10_vpd *,
-					unsigned char *, int);
-extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
-extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
-					struct se_subsystem_api *,
-					struct se_subsystem_dev *, u32,
-					void *, struct se_dev_limits *,
-					const char *, const char *);
-extern void transport_init_se_cmd(struct se_cmd *,
-					struct target_core_fabric_ops *,
-					struct se_session *, u32, int, int,
-					unsigned char *);
-void *transport_kmap_first_data_page(struct se_cmd *cmd);
-void transport_kunmap_first_data_page(struct se_cmd *cmd);
-extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
-extern int transport_handle_cdb_direct(struct se_cmd *);
-extern int transport_generic_handle_cdb_map(struct se_cmd *);
-extern int transport_generic_handle_data(struct se_cmd *);
-extern int transport_generic_handle_tmr(struct se_cmd *);
-extern bool target_stop_task(struct se_task *task, unsigned long *flags);
-extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
-				struct scatterlist *, u32);
-extern int transport_clear_lun_from_sessions(struct se_lun *);
-extern bool transport_wait_for_tasks(struct se_cmd *);
-extern int transport_check_aborted_status(struct se_cmd *, int);
-extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
-extern void transport_send_task_abort(struct se_cmd *);
-extern void transport_release_cmd(struct se_cmd *);
-extern void transport_generic_free_cmd(struct se_cmd *, int);
-extern void target_get_sess_cmd(struct se_session *, struct se_cmd *);
-extern int target_put_sess_cmd(struct se_session *, struct se_cmd *);
-extern void target_splice_sess_cmd_list(struct se_session *);
-extern void target_wait_for_sess_cmds(struct se_session *, int);
-extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
-extern void transport_do_task_sg_chain(struct se_cmd *);
-extern void transport_generic_process_write(struct se_cmd *);
-extern int transport_generic_new_cmd(struct se_cmd *);
-extern int transport_generic_do_tmr(struct se_cmd *);
-/* From target_core_alua.c */
-extern int core_alua_check_nonop_delay(struct se_cmd *);
-/* From target_core_cdb.c */
-extern int transport_emulate_control_cdb(struct se_task *);
-extern void target_get_task_cdb(struct se_task *task, unsigned char *cdb);
-
-/*
- * Each se_transport_task_t can have N number of possible struct se_task's
- * for the storage transport(s) to possibly execute.
- * Used primarily for splitting up CDBs that exceed the physical storage
- * HBA's maximum sector count per task.
- */
-struct se_mem {
-	struct page	*se_page;
-	u32		se_len;
-	u32		se_off;
-	struct list_head se_list;
-} ____cacheline_aligned;
-
-/*
- * 	Each type of disk transport supported MUST have a template defined
- *	within its .h file.
- */
-struct se_subsystem_api {
-	/*
-	 * The Name. :-)
-	 */
-	char name[16];
-	/*
-	 * Transport Type.
-	 */
-	u8 transport_type;
-
-	unsigned int fua_write_emulated : 1;
-	unsigned int write_cache_emulated : 1;
-
-	/*
-	 * struct module for struct se_hba references
-	 */
-	struct module *owner;
-	/*
-	 * Used for global se_subsystem_api list_head
-	 */
-	struct list_head sub_api_list;
-	/*
-	 * attach_hba():
-	 */
-	int (*attach_hba)(struct se_hba *, u32);
-	/*
-	 * detach_hba():
-	 */
-	void (*detach_hba)(struct se_hba *);
-	/*
-	 * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
-	 *		Linux/SCSI struct Scsi_Host passthrough
-	*/
-	int (*pmode_enable_hba)(struct se_hba *, unsigned long);
-	/*
-	 * allocate_virtdevice():
-	 */
-	void *(*allocate_virtdevice)(struct se_hba *, const char *);
-	/*
-	 * create_virtdevice(): Only for Virtual HBAs
-	 */
-	struct se_device *(*create_virtdevice)(struct se_hba *,
-				struct se_subsystem_dev *, void *);
-	/*
-	 * free_device():
-	 */
-	void (*free_device)(void *);
-
-	/*
-	 * transport_complete():
-	 *
-	 * Use transport_generic_complete() for majority of DAS transport
-	 * drivers.  Provided out of convenience.
-	 */
-	int (*transport_complete)(struct se_task *task);
-	struct se_task *(*alloc_task)(unsigned char *cdb);
-	/*
-	 * do_task():
-	 */
-	int (*do_task)(struct se_task *);
-	/*
-	 * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
-	 * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
-	 */
-	int (*do_discard)(struct se_device *, sector_t, u32);
-	/*
-	 * Used  by virtual subsystem plugins IBLOCK and FILEIO to emulate
-	 * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
-	 */
-	void (*do_sync_cache)(struct se_task *);
-	/*
-	 * free_task():
-	 */
-	void (*free_task)(struct se_task *);
-	/*
-	 * check_configfs_dev_params():
-	 */
-	ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
-	/*
-	 * set_configfs_dev_params():
-	 */
-	ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
-						const char *, ssize_t);
-	/*
-	 * show_configfs_dev_params():
-	 */
-	ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
-						char *);
-	/*
-	 * get_device_rev():
-	 */
-	u32 (*get_device_rev)(struct se_device *);
-	/*
-	 * get_device_type():
-	 */
-	u32 (*get_device_type)(struct se_device *);
-	/*
-	 * Get the sector_t from a subsystem backstore..
-	 */
-	sector_t (*get_blocks)(struct se_device *);
-	/*
-	 * get_sense_buffer():
-	 */
-	unsigned char *(*get_sense_buffer)(struct se_task *);
-} ____cacheline_aligned;
-
-#endif /* TARGET_CORE_TRANSPORT_H */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 9b7c8ab..86ee272 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -128,7 +128,6 @@
 
 	if (S_ISREG(mode)) {
 		struct mqueue_inode_info *info;
-		struct task_struct *p = current;
 		unsigned long mq_bytes, mq_msg_tblsz;
 
 		inode->i_fop = &mqueue_file_operations;
@@ -159,7 +158,7 @@
 
 		spin_lock(&mq_lock);
 		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
-		    u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
+		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
 			spin_unlock(&mq_lock);
 			/* mqueue_evict_inode() releases info->messages */
 			ret = -EMFILE;
diff --git a/ipc/shm.c b/ipc/shm.c
index 02ecf2c..b76be5b 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -870,9 +870,7 @@
 	case SHM_LOCK:
 	case SHM_UNLOCK:
 	{
-		struct file *uninitialized_var(shm_file);
-
-		lru_add_drain_all();  /* drain pagevecs to lru lists */
+		struct file *shm_file;
 
 		shp = shm_lock_check(ns, shmid);
 		if (IS_ERR(shp)) {
@@ -895,22 +893,31 @@
 		err = security_shm_shmctl(shp, cmd);
 		if (err)
 			goto out_unlock;
-		
-		if(cmd==SHM_LOCK) {
+
+		shm_file = shp->shm_file;
+		if (is_file_hugepages(shm_file))
+			goto out_unlock;
+
+		if (cmd == SHM_LOCK) {
 			struct user_struct *user = current_user();
-			if (!is_file_hugepages(shp->shm_file)) {
-				err = shmem_lock(shp->shm_file, 1, user);
-				if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
-					shp->shm_perm.mode |= SHM_LOCKED;
-					shp->mlock_user = user;
-				}
+			err = shmem_lock(shm_file, 1, user);
+			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
+				shp->shm_perm.mode |= SHM_LOCKED;
+				shp->mlock_user = user;
 			}
-		} else if (!is_file_hugepages(shp->shm_file)) {
-			shmem_lock(shp->shm_file, 0, shp->mlock_user);
-			shp->shm_perm.mode &= ~SHM_LOCKED;
-			shp->mlock_user = NULL;
+			goto out_unlock;
 		}
+
+		/* SHM_UNLOCK */
+		if (!(shp->shm_perm.mode & SHM_LOCKED))
+			goto out_unlock;
+		shmem_lock(shm_file, 0, shp->mlock_user);
+		shp->shm_perm.mode &= ~SHM_LOCKED;
+		shp->mlock_user = NULL;
+		get_file(shm_file);
 		shm_unlock(shp);
+		shmem_unlock_mapping(shm_file->f_mapping);
+		fput(shm_file);
 		goto out;
 	}
 	case IPC_RMID:
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index caaea6e..af1de0f 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1863,11 +1863,12 @@
 
 /**
  * audit_syscall_exit - deallocate audit context after a system call
- * @pt_regs: syscall registers
+ * @success: success value of the syscall
+ * @return_code: return value of the syscall
  *
  * Tear down after system call.  If the audit context has been marked as
  * auditable (either because of the AUDIT_RECORD_CONTEXT state from
- * filtering, or because some other part of the kernel write an audit
+ * filtering, or because some other part of the kernel wrote an audit
  * message), then write out the syscall information.  In call cases,
  * free the names stored from getname().
  */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 95dd721..29f5b65 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1077,6 +1077,7 @@
 		/* Early boot.  kretprobe_table_locks not yet initialized. */
 		return;
 
+	INIT_HLIST_HEAD(&empty_rp);
 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
 	head = &kretprobe_inst_table[hash];
 	kretprobe_table_lock(hash, &flags);
@@ -1085,7 +1086,6 @@
 			recycle_rp_inst(ri, &empty_rp);
 	}
 	kretprobe_table_unlock(hash, &flags);
-	INIT_HLIST_HEAD(&empty_rp);
 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
 		hlist_del(&ri->hlist);
 		kfree(ri);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 1cf8890..6a768e5 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -812,7 +812,8 @@
 	unsigned int res;
 
 	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
-	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
+	res += DIV_ROUND_UP(res * sizeof(struct bm_block),
+			    LINKED_PAGE_DATA_SIZE);
 	return 2 * res;
 }
 
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index b0d798e..d72586f 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -129,7 +129,7 @@
  * cpupri_set - update the cpu priority setting
  * @cp: The cpupri context
  * @cpu: The target cpu
- * @pri: The priority (INVALID-RT99) to assign to this CPU
+ * @newpri: The priority (INVALID-RT99) to assign to this CPU
  *
  * Note: Assumes cpu_rq(cpu)->lock is locked
  *
@@ -200,7 +200,6 @@
 /**
  * cpupri_init - initialize the cpupri structure
  * @cp: The cpupri context
- * @bootmem: true if allocations need to use bootmem
  *
  * Returns: -ENOMEM if memory fails.
  */
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index db110b8..f1539de 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -634,10 +634,11 @@
 	int ret = 0;
 
 	/*
-	 * We skip modules that tain the kernel, especially those with different
-	 * module header (for forced load), to make sure we don't cause a crash.
+	 * We skip modules that taint the kernel, especially those with different
+	 * module headers (for forced load), to make sure we don't cause a crash.
+	 * Staging and out-of-tree GPL modules are fine.
 	 */
-	if (mod->taints)
+	if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
 		return 0;
 	mutex_lock(&tracepoints_mutex);
 	tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index fe84bb9..716802b 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -255,6 +255,8 @@
 	if (!n)
 		n++;		/* avoid zero length allocation */
 	p = buffer = kmalloc(n, GFP_KERNEL);
+	if (!p)
+		return NULL;
 
 	for (i = a->nlimbs - 1; i >= 0; i--) {
 		alimb = a->d[i];
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea8c3a4..5f34bd8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2508,6 +2508,7 @@
 {
 	struct hstate *h = hstate_vma(vma);
 	int ret = VM_FAULT_SIGBUS;
+	int anon_rmap = 0;
 	pgoff_t idx;
 	unsigned long size;
 	struct page *page;
@@ -2562,14 +2563,13 @@
 			spin_lock(&inode->i_lock);
 			inode->i_blocks += blocks_per_huge_page(h);
 			spin_unlock(&inode->i_lock);
-			page_dup_rmap(page);
 		} else {
 			lock_page(page);
 			if (unlikely(anon_vma_prepare(vma))) {
 				ret = VM_FAULT_OOM;
 				goto backout_unlocked;
 			}
-			hugepage_add_new_anon_rmap(page, vma, address);
+			anon_rmap = 1;
 		}
 	} else {
 		/*
@@ -2582,7 +2582,6 @@
 			      VM_FAULT_SET_HINDEX(h - hstates);
 			goto backout_unlocked;
 		}
-		page_dup_rmap(page);
 	}
 
 	/*
@@ -2606,6 +2605,10 @@
 	if (!huge_pte_none(huge_ptep_get(ptep)))
 		goto backout;
 
+	if (anon_rmap)
+		hugepage_add_new_anon_rmap(page, vma, address);
+	else
+		page_dup_rmap(page);
 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
 				&& (vma->vm_flags & VM_SHARED)));
 	set_huge_pte_at(mm, address, ptep, new_pte);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c3688df..556859f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3247,7 +3247,7 @@
 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
 	else
 		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
+	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
 	return ret;
 }
 
diff --git a/mm/memory.c b/mm/memory.c
index 5e30583..fa2f04e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -878,15 +878,24 @@
 			}
 			if (likely(!non_swap_entry(entry)))
 				rss[MM_SWAPENTS]++;
-			else if (is_write_migration_entry(entry) &&
-					is_cow_mapping(vm_flags)) {
-				/*
-				 * COW mappings require pages in both parent
-				 * and child to be set to read.
-				 */
-				make_migration_entry_read(&entry);
-				pte = swp_entry_to_pte(entry);
-				set_pte_at(src_mm, addr, src_pte, pte);
+			else if (is_migration_entry(entry)) {
+				page = migration_entry_to_page(entry);
+
+				if (PageAnon(page))
+					rss[MM_ANONPAGES]++;
+				else
+					rss[MM_FILEPAGES]++;
+
+				if (is_write_migration_entry(entry) &&
+				    is_cow_mapping(vm_flags)) {
+					/*
+					 * COW mappings require pages in both
+					 * parent and child to be set to read.
+					 */
+					make_migration_entry_read(&entry);
+					pte = swp_entry_to_pte(entry);
+					set_pte_at(src_mm, addr, src_pte, pte);
+				}
 			}
 		}
 		goto out_set_pte;
@@ -1191,6 +1200,16 @@
 
 			if (!non_swap_entry(entry))
 				rss[MM_SWAPENTS]--;
+			else if (is_migration_entry(entry)) {
+				struct page *page;
+
+				page = migration_entry_to_page(entry);
+
+				if (PageAnon(page))
+					rss[MM_ANONPAGES]--;
+				else
+					rss[MM_FILEPAGES]--;
+			}
 			if (unlikely(!free_swap_and_cache(entry)))
 				print_bad_pte(vma, addr, ptent, NULL);
 		}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0027d8f..d2186ec 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5413,7 +5413,25 @@
 
 bool is_pageblock_removable_nolock(struct page *page)
 {
-	struct zone *zone = page_zone(page);
+	struct zone *zone;
+	unsigned long pfn;
+
+	/*
+	 * We have to be careful here because we are iterating over memory
+	 * sections which are not zone aware so we might end up outside of
+	 * the zone but still within the section.
+	 * We have to take care about the node as well. If the node is offline
+	 * its NODE_DATA will be NULL - see page_zone.
+	 */
+	if (!node_online(page_to_nid(page)))
+		return false;
+
+	zone = page_zone(page);
+	pfn = page_to_pfn(page);
+	if (zone->zone_start_pfn > pfn ||
+			zone->zone_start_pfn + zone->spanned_pages <= pfn)
+		return false;
+
 	return __count_immobile_pages(zone, page, 0);
 }
 
diff --git a/mm/shmem.c b/mm/shmem.c
index feead19..269d049 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -379,7 +379,7 @@
 /*
  * Pagevec may contain swap entries, so shuffle up pages before releasing.
  */
-static void shmem_pagevec_release(struct pagevec *pvec)
+static void shmem_deswap_pagevec(struct pagevec *pvec)
 {
 	int i, j;
 
@@ -389,7 +389,36 @@
 			pvec->pages[j++] = page;
 	}
 	pvec->nr = j;
-	pagevec_release(pvec);
+}
+
+/*
+ * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
+ */
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+	struct pagevec pvec;
+	pgoff_t indices[PAGEVEC_SIZE];
+	pgoff_t index = 0;
+
+	pagevec_init(&pvec, 0);
+	/*
+	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
+	 */
+	while (!mapping_unevictable(mapping)) {
+		/*
+		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
+		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
+		 */
+		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+					PAGEVEC_SIZE, pvec.pages, indices);
+		if (!pvec.nr)
+			break;
+		index = indices[pvec.nr - 1] + 1;
+		shmem_deswap_pagevec(&pvec);
+		check_move_unevictable_pages(pvec.pages, pvec.nr);
+		pagevec_release(&pvec);
+		cond_resched();
+	}
 }
 
 /*
@@ -440,7 +469,8 @@
 			}
 			unlock_page(page);
 		}
-		shmem_pagevec_release(&pvec);
+		shmem_deswap_pagevec(&pvec);
+		pagevec_release(&pvec);
 		mem_cgroup_uncharge_end();
 		cond_resched();
 		index++;
@@ -470,7 +500,8 @@
 			continue;
 		}
 		if (index == start && indices[0] > end) {
-			shmem_pagevec_release(&pvec);
+			shmem_deswap_pagevec(&pvec);
+			pagevec_release(&pvec);
 			break;
 		}
 		mem_cgroup_uncharge_start();
@@ -494,7 +525,8 @@
 			}
 			unlock_page(page);
 		}
-		shmem_pagevec_release(&pvec);
+		shmem_deswap_pagevec(&pvec);
+		pagevec_release(&pvec);
 		mem_cgroup_uncharge_end();
 		index++;
 	}
@@ -1068,13 +1100,6 @@
 		user_shm_unlock(inode->i_size, user);
 		info->flags &= ~VM_LOCKED;
 		mapping_clear_unevictable(file->f_mapping);
-		/*
-		 * Ensure that a racing putback_lru_page() can see
-		 * the pages of this mapping are evictable when we
-		 * skip them due to !PageLRU during the scan.
-		 */
-		smp_mb__after_clear_bit();
-		scan_mapping_unevictable_pages(file->f_mapping);
 	}
 	retval = 0;
 
@@ -2445,6 +2470,10 @@
 	return 0;
 }
 
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+}
+
 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 {
 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2880396..c52b235 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -26,7 +26,6 @@
 #include <linux/buffer_head.h>	/* for try_to_release_page(),
 					buffer_heads_over_limit */
 #include <linux/mm_inline.h>
-#include <linux/pagevec.h>
 #include <linux/backing-dev.h>
 #include <linux/rmap.h>
 #include <linux/topology.h>
@@ -661,7 +660,7 @@
 		 * When racing with an mlock or AS_UNEVICTABLE clearing
 		 * (page is unlocked) make sure that if the other thread
 		 * does not observe our setting of PG_lru and fails
-		 * isolation/check_move_unevictable_page,
+		 * isolation/check_move_unevictable_pages,
 		 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
 		 * the page back to the evictable list.
 		 *
@@ -3499,100 +3498,61 @@
 	return 1;
 }
 
+#ifdef CONFIG_SHMEM
 /**
- * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
- * @page: page to check evictability and move to appropriate lru list
- * @zone: zone page is in
+ * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
+ * @pages:	array of pages to check
+ * @nr_pages:	number of pages to check
  *
- * Checks a page for evictability and moves the page to the appropriate
- * zone lru list.
+ * Checks pages for evictability and moves them to the appropriate lru list.
  *
- * Restrictions: zone->lru_lock must be held, page must be on LRU and must
- * have PageUnevictable set.
+ * This function is only used for SysV IPC SHM_UNLOCK.
  */
-static void check_move_unevictable_page(struct page *page, struct zone *zone)
+void check_move_unevictable_pages(struct page **pages, int nr_pages)
 {
 	struct lruvec *lruvec;
+	struct zone *zone = NULL;
+	int pgscanned = 0;
+	int pgrescued = 0;
+	int i;
 
-	VM_BUG_ON(PageActive(page));
-retry:
-	ClearPageUnevictable(page);
-	if (page_evictable(page, NULL)) {
-		enum lru_list l = page_lru_base_type(page);
+	for (i = 0; i < nr_pages; i++) {
+		struct page *page = pages[i];
+		struct zone *pagezone;
 
-		__dec_zone_state(zone, NR_UNEVICTABLE);
-		lruvec = mem_cgroup_lru_move_lists(zone, page,
-						   LRU_UNEVICTABLE, l);
-		list_move(&page->lru, &lruvec->lists[l]);
-		__inc_zone_state(zone, NR_INACTIVE_ANON + l);
-		__count_vm_event(UNEVICTABLE_PGRESCUED);
-	} else {
-		/*
-		 * rotate unevictable list
-		 */
-		SetPageUnevictable(page);
-		lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
-						   LRU_UNEVICTABLE);
-		list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
-		if (page_evictable(page, NULL))
-			goto retry;
-	}
-}
-
-/**
- * scan_mapping_unevictable_pages - scan an address space for evictable pages
- * @mapping: struct address_space to scan for evictable pages
- *
- * Scan all pages in mapping.  Check unevictable pages for
- * evictability and move them to the appropriate zone lru list.
- */
-void scan_mapping_unevictable_pages(struct address_space *mapping)
-{
-	pgoff_t next = 0;
-	pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
-			 PAGE_CACHE_SHIFT;
-	struct zone *zone;
-	struct pagevec pvec;
-
-	if (mapping->nrpages == 0)
-		return;
-
-	pagevec_init(&pvec, 0);
-	while (next < end &&
-		pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
-		int i;
-		int pg_scanned = 0;
-
-		zone = NULL;
-
-		for (i = 0; i < pagevec_count(&pvec); i++) {
-			struct page *page = pvec.pages[i];
-			pgoff_t page_index = page->index;
-			struct zone *pagezone = page_zone(page);
-
-			pg_scanned++;
-			if (page_index > next)
-				next = page_index;
-			next++;
-
-			if (pagezone != zone) {
-				if (zone)
-					spin_unlock_irq(&zone->lru_lock);
-				zone = pagezone;
-				spin_lock_irq(&zone->lru_lock);
-			}
-
-			if (PageLRU(page) && PageUnevictable(page))
-				check_move_unevictable_page(page, zone);
+		pgscanned++;
+		pagezone = page_zone(page);
+		if (pagezone != zone) {
+			if (zone)
+				spin_unlock_irq(&zone->lru_lock);
+			zone = pagezone;
+			spin_lock_irq(&zone->lru_lock);
 		}
-		if (zone)
-			spin_unlock_irq(&zone->lru_lock);
-		pagevec_release(&pvec);
 
-		count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
+		if (!PageLRU(page) || !PageUnevictable(page))
+			continue;
+
+		if (page_evictable(page, NULL)) {
+			enum lru_list lru = page_lru_base_type(page);
+
+			VM_BUG_ON(PageActive(page));
+			ClearPageUnevictable(page);
+			__dec_zone_state(zone, NR_UNEVICTABLE);
+			lruvec = mem_cgroup_lru_move_lists(zone, page,
+						LRU_UNEVICTABLE, lru);
+			list_move(&page->lru, &lruvec->lists[lru]);
+			__inc_zone_state(zone, NR_INACTIVE_ANON + lru);
+			pgrescued++;
+		}
 	}
 
+	if (zone) {
+		__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
+		__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
+		spin_unlock_irq(&zone->lru_lock);
+	}
 }
+#endif /* CONFIG_SHMEM */
 
 static void warn_scan_unevictable_pages(void)
 {
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index d793001..9b0c0b8 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -5,7 +5,7 @@
 ## Copyright (c) 1998 Michael Zucchi, All Rights Reserved        ##
 ## Copyright (C) 2000, 1  Tim Waugh <twaugh@redhat.com>          ##
 ## Copyright (C) 2001  Simon Huggins                             ##
-## Copyright (C) 2005-2010  Randy Dunlap                         ##
+## Copyright (C) 2005-2012  Randy Dunlap                         ##
 ## 								 ##
 ## #define enhancements by Armin Kuster <akuster@mvista.com>	 ##
 ## Copyright (c) 2000 MontaVista Software, Inc.			 ##
@@ -1785,6 +1785,7 @@
     $prototype =~ s/__devinit +//;
     $prototype =~ s/__init +//;
     $prototype =~ s/__init_or_module +//;
+    $prototype =~ s/__must_check +//;
     $prototype =~ s/^#\s*define\s+//; #ak added
     $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
 
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index d661afb..d45061d 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -99,6 +99,7 @@
 			    struct inode *inode, enum ima_hooks func, int mask)
 {
 	struct task_struct *tsk = current;
+	const struct cred *cred = current_cred();
 	int i;
 
 	if ((rule->flags & IMA_FUNC) && rule->func != func)
@@ -108,7 +109,7 @@
 	if ((rule->flags & IMA_FSMAGIC)
 	    && rule->fsmagic != inode->i_sb->s_magic)
 		return false;
-	if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid)
+	if ((rule->flags & IMA_UID) && rule->uid != cred->uid)
 		return false;
 	for (i = 0; i < MAX_LSM_RULES; i++) {
 		int rc = 0;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index c7a7cae..65647f8 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -33,6 +33,7 @@
 
 extern struct key_type key_type_dead;
 extern struct key_type key_type_user;
+extern struct key_type key_type_logon;
 
 /*****************************************************************************/
 /*
diff --git a/security/keys/key.c b/security/keys/key.c
index 4f64c72..7ada801 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -999,6 +999,7 @@
 	list_add_tail(&key_type_keyring.link, &key_types_list);
 	list_add_tail(&key_type_dead.link, &key_types_list);
 	list_add_tail(&key_type_user.link, &key_types_list);
+	list_add_tail(&key_type_logon.link, &key_types_list);
 
 	/* record the root user tracking */
 	rb_link_node(&root_key_user.node,
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 69ff52c..c7660a2 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -18,6 +18,8 @@
 #include <asm/uaccess.h>
 #include "internal.h"
 
+static int logon_vet_description(const char *desc);
+
 /*
  * user defined keys take an arbitrary string as the description and an
  * arbitrary blob of data as the payload
@@ -36,6 +38,24 @@
 EXPORT_SYMBOL_GPL(key_type_user);
 
 /*
+ * This key type is essentially the same as key_type_user, but it does
+ * not define a .read op. This is suitable for storing username and
+ * password pairs in the keyring that you do not want to be readable
+ * from userspace.
+ */
+struct key_type key_type_logon = {
+	.name			= "logon",
+	.instantiate		= user_instantiate,
+	.update			= user_update,
+	.match			= user_match,
+	.revoke			= user_revoke,
+	.destroy		= user_destroy,
+	.describe		= user_describe,
+	.vet_description	= logon_vet_description,
+};
+EXPORT_SYMBOL_GPL(key_type_logon);
+
+/*
  * instantiate a user defined key
  */
 int user_instantiate(struct key *key, const void *data, size_t datalen)
@@ -59,7 +79,7 @@
 	/* attach the data */
 	upayload->datalen = datalen;
 	memcpy(upayload->data, data, datalen);
-	rcu_assign_pointer(key->payload.data, upayload);
+	rcu_assign_keypointer(key, upayload);
 	ret = 0;
 
 error:
@@ -98,7 +118,7 @@
 	if (ret == 0) {
 		/* attach the new data, displacing the old */
 		zap = key->payload.data;
-		rcu_assign_pointer(key->payload.data, upayload);
+		rcu_assign_keypointer(key, upayload);
 		key->expiry = 0;
 	}
 
@@ -133,7 +153,7 @@
 	key_payload_reserve(key, 0);
 
 	if (upayload) {
-		rcu_assign_pointer(key->payload.data, NULL);
+		rcu_assign_keypointer(key, NULL);
 		kfree_rcu(upayload, rcu);
 	}
 }
@@ -189,3 +209,20 @@
 }
 
 EXPORT_SYMBOL_GPL(user_read);
+
+/* Vet the description for a "logon" key */
+static int logon_vet_description(const char *desc)
+{
+	char *p;
+
+	/* require a "qualified" description string */
+	p = strchr(desc, ':');
+	if (!p)
+		return -EINVAL;
+
+	/* also reject description with ':' as first char */
+	if (p == desc)
+		return -EINVAL;
+
+	return 0;
+}
diff --git a/sound/pci/hda/alc880_quirks.c b/sound/pci/hda/alc880_quirks.c
index 5b68435..501501e 100644
--- a/sound/pci/hda/alc880_quirks.c
+++ b/sound/pci/hda/alc880_quirks.c
@@ -762,16 +762,22 @@
 	/* Looks like the unsol event is incompatible with the standard
 	 * definition.  4bit tag is placed at 28 bit!
 	 */
-	switch (res >> 28) {
+	res >>= 28;
+	switch (res) {
 	case ALC_MIC_EVENT:
 		alc88x_simple_mic_automute(codec);
 		break;
 	default:
-		alc_sku_unsol_event(codec, res);
+		alc_exec_unsol_event(codec, res);
 		break;
 	}
 }
 
+static void alc880_unsol_event(struct hda_codec *codec, unsigned int res)
+{
+	alc_exec_unsol_event(codec, res >> 28);
+}
+
 static void alc880_uniwill_p53_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
@@ -800,10 +806,11 @@
 	/* Looks like the unsol event is incompatible with the standard
 	 * definition.  4bit tag is placed at 28 bit!
 	 */
-	if ((res >> 28) == ALC_DCVOL_EVENT)
+	res >>= 28;
+	if (res == ALC_DCVOL_EVENT)
 		alc880_uniwill_p53_dcvol_automute(codec);
 	else
-		alc_sku_unsol_event(codec, res);
+		alc_exec_unsol_event(codec, res);
 }
 
 /*
@@ -1677,7 +1684,7 @@
 		.channel_mode = alc880_lg_ch_modes,
 		.need_dac_fix = 1,
 		.input_mux = &alc880_lg_capture_source,
-		.unsol_event = alc_sku_unsol_event,
+		.unsol_event = alc880_unsol_event,
 		.setup = alc880_lg_setup,
 		.init_hook = alc_hp_automute,
 #ifdef CONFIG_SND_HDA_POWER_SAVE
diff --git a/sound/pci/hda/alc882_quirks.c b/sound/pci/hda/alc882_quirks.c
index bdf0ed4..bb364a5 100644
--- a/sound/pci/hda/alc882_quirks.c
+++ b/sound/pci/hda/alc882_quirks.c
@@ -730,6 +730,11 @@
 		alc889A_mb31_automute(codec);
 }
 
+static void alc882_unsol_event(struct hda_codec *codec, unsigned int res)
+{
+	alc_exec_unsol_event(codec, res >> 26);
+}
+
 /*
  * configuration and preset
  */
@@ -775,7 +780,7 @@
 			.channel_mode = alc885_mba21_ch_modes,
 			.num_channel_mode = ARRAY_SIZE(alc885_mba21_ch_modes),
 			.input_mux = &alc882_capture_source,
-			.unsol_event = alc_sku_unsol_event,
+			.unsol_event = alc882_unsol_event,
 			.setup = alc885_mba21_setup,
 			.init_hook = alc_hp_automute,
        },
@@ -791,7 +796,7 @@
 		.input_mux = &alc882_capture_source,
 		.dig_out_nid = ALC882_DIGOUT_NID,
 		.dig_in_nid = ALC882_DIGIN_NID,
-		.unsol_event = alc_sku_unsol_event,
+		.unsol_event = alc882_unsol_event,
 		.setup = alc885_mbp3_setup,
 		.init_hook = alc_hp_automute,
 	},
@@ -806,7 +811,7 @@
 		.input_mux = &mb5_capture_source,
 		.dig_out_nid = ALC882_DIGOUT_NID,
 		.dig_in_nid = ALC882_DIGIN_NID,
-		.unsol_event = alc_sku_unsol_event,
+		.unsol_event = alc882_unsol_event,
 		.setup = alc885_mb5_setup,
 		.init_hook = alc_hp_automute,
 	},
@@ -821,7 +826,7 @@
 		.input_mux = &macmini3_capture_source,
 		.dig_out_nid = ALC882_DIGOUT_NID,
 		.dig_in_nid = ALC882_DIGIN_NID,
-		.unsol_event = alc_sku_unsol_event,
+		.unsol_event = alc882_unsol_event,
 		.setup = alc885_macmini3_setup,
 		.init_hook = alc_hp_automute,
 	},
@@ -836,7 +841,7 @@
 		.input_mux = &alc889A_imac91_capture_source,
 		.dig_out_nid = ALC882_DIGOUT_NID,
 		.dig_in_nid = ALC882_DIGIN_NID,
-		.unsol_event = alc_sku_unsol_event,
+		.unsol_event = alc882_unsol_event,
 		.setup = alc885_imac91_setup,
 		.init_hook = alc_hp_automute,
 	},
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fb35474..95dfb68 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -469,6 +469,7 @@
 	unsigned int irq_pending_warned :1;
 	unsigned int probing :1; /* codec probing phase */
 	unsigned int snoop:1;
+	unsigned int align_buffer_size:1;
 
 	/* for debugging */
 	unsigned int last_cmd[AZX_MAX_CODECS];
@@ -1690,7 +1691,7 @@
 	runtime->hw.rates = hinfo->rates;
 	snd_pcm_limit_hw_rates(runtime);
 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
-	if (align_buffer_size)
+	if (chip->align_buffer_size)
 		/* constrain buffer sizes to be multiple of 128
 		   bytes. This is more efficient in terms of memory
 		   access but isn't required by the HDA spec and
@@ -2773,8 +2774,9 @@
 	}
 
 	/* disable buffer size rounding to 128-byte multiples if supported */
+	chip->align_buffer_size = align_buffer_size;
 	if (chip->driver_caps & AZX_DCAPS_BUFSIZE)
-		align_buffer_size = 0;
+		chip->align_buffer_size = 0;
 
 	/* allow 64bit DMA address if supported by H/W */
 	if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 8a32a69..a7a5733 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3027,7 +3027,7 @@
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
- 	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
+	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO),
 	SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
 	SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5e82acf..c95c8bd 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -185,7 +185,6 @@
 	unsigned int vol_in_capsrc:1; /* use capsrc volume (ADC has no vol) */
 	unsigned int parse_flags; /* passed to snd_hda_parse_pin_defcfg() */
 	unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */
-	unsigned int use_jack_tbl:1; /* 1 for model=auto */
 
 	/* auto-mute control */
 	int automute_mode;
@@ -621,17 +620,10 @@
 		alc_mux_select(codec, 0, spec->int_mic_idx, false);
 }
 
-/* unsolicited event for HP jack sensing */
-static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
+/* handle the specified unsol action (ALC_XXX_EVENT) */
+static void alc_exec_unsol_event(struct hda_codec *codec, int action)
 {
-	struct alc_spec *spec = codec->spec;
-	if (codec->vendor_id == 0x10ec0880)
-		res >>= 28;
-	else
-		res >>= 26;
-	if (spec->use_jack_tbl)
-		res = snd_hda_jack_get_action(codec, res);
-	switch (res) {
+	switch (action) {
 	case ALC_HP_EVENT:
 		alc_hp_automute(codec);
 		break;
@@ -645,6 +637,17 @@
 	snd_hda_jack_report_sync(codec);
 }
 
+/* unsolicited event for HP jack sensing */
+static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
+{
+	if (codec->vendor_id == 0x10ec0880)
+		res >>= 28;
+	else
+		res >>= 26;
+	res = snd_hda_jack_get_action(codec, res);
+	alc_exec_unsol_event(codec, res);
+}
+
 /* call init functions of standard auto-mute helpers */
 static void alc_inithook(struct hda_codec *codec)
 {
@@ -1883,7 +1886,7 @@
 };
 #endif
 
-static int alc_build_controls(struct hda_codec *codec)
+static int __alc_build_controls(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
 	struct snd_kcontrol *kctl = NULL;
@@ -2029,11 +2032,16 @@
 
 	alc_free_kctls(codec); /* no longer needed */
 
-	err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
+	return 0;
+}
+
+static int alc_build_controls(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+	int err = __alc_build_controls(codec);
 	if (err < 0)
 		return err;
-
-	return 0;
+	return snd_hda_jack_add_kctls(codec, &spec->autocfg);
 }
 
 
@@ -3233,7 +3241,7 @@
 	int i, err, noutputs;
 
 	noutputs = cfg->line_outs;
-	if (spec->multi_ios > 0)
+	if (spec->multi_ios > 0 && cfg->line_outs < 3)
 		noutputs += spec->multi_ios;
 
 	for (i = 0; i < noutputs; i++) {
@@ -3904,7 +3912,6 @@
 static void alc_auto_init_std(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	spec->use_jack_tbl = 1;
 	alc_auto_init_multi_out(codec);
 	alc_auto_init_extra_out(codec);
 	alc_auto_init_analog_input(codec);
@@ -4168,6 +4175,8 @@
 	codec->patch_ops = alc_patch_ops;
 	if (board_config == ALC_MODEL_AUTO)
 		spec->init_hook = alc_auto_init_std;
+	else
+		codec->patch_ops.build_controls = __alc_build_controls;
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 	if (!spec->loopback.amplist)
 		spec->loopback.amplist = alc880_loopbacks;
@@ -4297,6 +4306,8 @@
 	codec->patch_ops = alc_patch_ops;
 	if (board_config == ALC_MODEL_AUTO)
 		spec->init_hook = alc_auto_init_std;
+	else
+		codec->patch_ops.build_controls = __alc_build_controls;
 	spec->shutup = alc_eapd_shutup;
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 	if (!spec->loopback.amplist)
@@ -4691,6 +4702,8 @@
 	codec->patch_ops = alc_patch_ops;
 	if (board_config == ALC_MODEL_AUTO)
 		spec->init_hook = alc_auto_init_std;
+	else
+		codec->patch_ops.build_controls = __alc_build_controls;
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 	if (!spec->loopback.amplist)
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 3556408..336cfcd 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1608,7 +1608,7 @@
 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
 		      "Alienware M17x", STAC_ALIENWARE_M17X),
 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
-		      "Alienware M17x", STAC_ALIENWARE_M17X),
+		      "Alienware M17x R3", STAC_DELL_EQ),
 	{} /* terminator */
 };
 
@@ -4163,13 +4163,15 @@
 	return 1;
 }
 
-static int is_nid_hp_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
+static int is_nid_out_jack_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
 {
 	int i;
 	for (i = 0; i < cfg->hp_outs; i++)
 		if (cfg->hp_pins[i] == nid)
 			return 1; /* nid is a HP-Out */
-
+	for (i = 0; i < cfg->line_outs; i++)
+		if (cfg->line_out_pins[i] == nid)
+			return 1; /* nid is a line-Out */
 	return 0; /* nid is not a HP-Out */
 };
 
@@ -4375,7 +4377,7 @@
 			continue;
 		}
 
-		if (is_nid_hp_pin(cfg, nid))
+		if (is_nid_out_jack_pin(cfg, nid))
 			continue; /* already has an unsol event */
 
 		pinctl = snd_hda_codec_read(codec, nid, 0,
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d7bd918..f8863eb 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1457,5 +1457,5 @@
 module_exit(sgtl5000_exit);
 
 MODULE_DESCRIPTION("Freescale SGTL5000 ALSA SoC Codec Driver");
-MODULE_AUTHOR("Zeng Zhaoming <zhaoming.zeng@freescale.com>");
+MODULE_AUTHOR("Zeng Zhaoming <zengzm.kernel@gmail.com>");
 MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 2b40c93..7c7fd92 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -444,6 +444,12 @@
 	/* Enable the FLL */
 	snd_soc_write(codec, WM8993_FLL_CONTROL_1, reg1 | WM8993_FLL_ENA);
 
+	/* Both overestimates */
+	if (Fref < 1000000)
+		msleep(3);
+	else
+		msleep(1);
+
 	dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout);
 
 	wm8993->fll_fref = Fref;
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index aecdba9..5780c9b 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -88,11 +88,13 @@
 	iprtd->dma_data.dma_request = dma_params->dma;
 
 	/* Try to grab a DMA channel */
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-	iprtd->dma_chan = dma_request_channel(mask, filter, iprtd);
-	if (!iprtd->dma_chan)
-		return -EINVAL;
+	if (!iprtd->dma_chan) {
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+		iprtd->dma_chan = dma_request_channel(mask, filter, iprtd);
+		if (!iprtd->dma_chan)
+			return -EINVAL;
+	}
 
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 3986520..b5ecf6d 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -907,6 +907,10 @@
 			if (err < 0)
 				printk(KERN_ERR "asoc: failed to remove %s\n", platform->name);
 		}
+
+		/* Make sure all DAPM widgets are freed */
+		snd_soc_dapm_free(&platform->dapm);
+
 		platform->probed = 0;
 		list_del(&platform->card_list);
 		module_put(platform->dev->driver->owner);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 3ad1f59..1f55ded 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1426,7 +1426,7 @@
 			dapm->target_bias_level = SND_SOC_BIAS_ON;
 			break;
 		case SND_SOC_DAPM_STREAM_STOP:
-			if (dapm->codec->active)
+			if (dapm->codec && dapm->codec->active)
 				dapm->target_bias_level = SND_SOC_BIAS_ON;
 			else
 				dapm->target_bias_level = SND_SOC_BIAS_STANDBY;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 3c6f780..310d3dd 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -811,6 +811,8 @@
 	case 0x2C:	/* Westmere EP - Gulftown */
 	case 0x2A:	/* SNB */
 	case 0x2D:	/* SNB Xeon */
+	case 0x3A:	/* IVB */
+	case 0x3D:	/* IVB Xeon */
 		return 1;
 	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
 	case 0x2F:	/* Westmere-EX Xeon - Eagleton */