Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26: (1090 commits)
  [NET]: Fix and allocate less memory for ->priv'less netdevices
  [IPV6]: Fix dangling references on error in fib6_add().
  [NETLABEL]: Fix NULL deref in netlbl_unlabel_staticlist_gen() if ifindex not found
  [PKT_SCHED]: Fix datalen check in tcf_simp_init().
  [INET]: Uninline the __inet_inherit_port call.
  [INET]: Drop the inet_inherit_port() call.
  SCTP: Initialize partial_bytes_acked to 0, when all of the data is acked.
  [netdrvr] forcedeth: internal simplifications; changelog removal
  phylib: factor out get_phy_id from within get_phy_device
  PHY: add BCM5464 support to broadcom PHY driver
  cxgb3: Fix __must_check warning with dev_dbg.
  tc35815: Statistics cleanup
  natsemi: fix MMIO for PPC 44x platforms
  [TIPC]: Cleanup of TIPC reference table code
  [TIPC]: Optimized initialization of TIPC reference table
  [TIPC]: Remove inlining of reference table locking routines
  e1000: convert uint16_t style integers to u16
  ixgb: convert uint16_t style integers to u16
  sb1000.c: make const arrays static
  sb1000.c: stop inlining largish static functions
  ...
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index e8fb246..f7923a4 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -167,10 +167,8 @@
 	- notes on the change from 16 bit to 32 bit user/group IDs.
 hpet.txt
 	- High Precision Event Timer Driver for Linux.
-hrtimer/
-	- info on the timer_stats debugging facility for timer (ab)use.
-hrtimers/
-	- info on the hrtimers subsystem for high-resolution kernel timers.
+timers/
+	- info on the timer related topics
 hw_random.txt
 	- info on Linux support for random number generator in i8xx chipsets.
 hwmon/
diff --git a/Documentation/ABI/obsolete/o2cb b/Documentation/ABI/obsolete/o2cb
new file mode 100644
index 0000000..9c49d8e
--- /dev/null
+++ b/Documentation/ABI/obsolete/o2cb
@@ -0,0 +1,11 @@
+What:		/sys/o2cb symlink
+Date:		Dec 2005
+KernelVersion:	2.6.16
+Contact:	ocfs2-devel@oss.oracle.com
+Description:	This is a symlink: /sys/o2cb to /sys/fs/o2cb. The symlink will
+		be removed when new versions of ocfs2-tools which know to look
+		in /sys/fs/o2cb are sufficiently prevalent. Don't code new
+		software to look here, it should try /sys/fs/o2cb instead.
+		See Documentation/ABI/stable/o2cb for more information on usage.
+Users:		ocfs2-tools. It's sufficient to mail proposed changes to
+		ocfs2-devel@oss.oracle.com.
diff --git a/Documentation/ABI/stable/o2cb b/Documentation/ABI/stable/o2cb
new file mode 100644
index 0000000..5eb1545
--- /dev/null
+++ b/Documentation/ABI/stable/o2cb
@@ -0,0 +1,10 @@
+What:		/sys/fs/o2cb/ (was /sys/o2cb)
+Date:		Dec 2005
+KernelVersion:	2.6.16
+Contact:	ocfs2-devel@oss.oracle.com
+Description:	Ocfs2-tools looks at 'interface-revision' for versioning
+		information. Each logmask/ file controls a set of debug prints
+		and can be written into with the strings "allow", "deny", or
+		"off". Reading the file returns the current state.
+Users:		ocfs2-tools. It's sufficient to mail proposed changes to
+		ocfs2-devel@oss.oracle.com.
diff --git a/Documentation/ABI/testing/sysfs-ocfs2 b/Documentation/ABI/testing/sysfs-ocfs2
new file mode 100644
index 0000000..b7cc516
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-ocfs2
@@ -0,0 +1,89 @@
+What:		/sys/fs/ocfs2/
+Date:		April 2008
+Contact:	ocfs2-devel@oss.oracle.com
+Description:
+		The /sys/fs/ocfs2 directory contains knobs used by the
+		ocfs2-tools to interact with the filesystem.
+
+What:		/sys/fs/ocfs2/max_locking_protocol
+Date:		April 2008
+Contact:	ocfs2-devel@oss.oracle.com
+Description:
+		The /sys/fs/ocfs2/max_locking_protocol file displays version
+		of ocfs2 locking supported by the filesystem.  This version
+		covers how ocfs2 uses distributed locking between cluster
+		nodes.
+
+		The protocol version has a major and minor number.  Two
+		cluster nodes can interoperate if they have an identical
+		major number and an overlapping minor number - thus,
+		a node with version 1.10 can interoperate with a node
+		sporting version 1.8, as long as both use the 1.8 protocol.
+
+		Reading from this file returns a single line, the major
+		number and minor number joined by a period, eg "1.10".
+
+		This file is read-only.  The value is compiled into the
+		driver.
+
+What:		/sys/fs/ocfs2/loaded_cluster_plugins
+Date:		April 2008
+Contact:	ocfs2-devel@oss.oracle.com
+Description:
+		The /sys/fs/ocfs2/loaded_cluster_plugins file describes
+		the available plugins to support ocfs2 cluster operation.
+		A cluster plugin is required to use ocfs2 in a cluster.
+		There are currently two available plugins:
+
+		* 'o2cb' - The classic o2cb cluster stack that ocfs2 has
+			used since its inception.
+		* 'user' - A plugin supporting userspace cluster software
+			in conjunction with fs/dlm.
+
+		Reading from this file returns the names of all loaded
+		plugins, one per line.
+
+		This file is read-only.  Its contents may change as
+		plugins are loaded or removed.
+
+What:		/sys/fs/ocfs2/active_cluster_plugin
+Date:		April 2008
+Contact:	ocfs2-devel@oss.oracle.com
+Description:
+		The /sys/fs/ocfs2/active_cluster_plugin displays which
+		cluster plugin is currently in use by the filesystem.
+		The active plugin will appear in the loaded_cluster_plugins
+		file as well.  Only one plugin can be used at a time.
+
+		Reading from this file returns the name of the active plugin
+		on a single line.
+
+		This file is read-only.  Which plugin is active depends on
+		the cluster stack in use.  The contents may change
+		when all filesystems are unmounted and the cluster stack
+		is changed.
+
+What:		/sys/fs/ocfs2/cluster_stack
+Date:		April 2008
+Contact:	ocfs2-devel@oss.oracle.com
+Description:
+		The /sys/fs/ocfs2/cluster_stack file contains the name
+		of current ocfs2 cluster stack.  This value is set by
+		userspace tools when bringing the cluster stack online.
+
+		Cluster stack names are 4 characters in length.
+
+		When the 'o2cb' cluster stack is used, the 'o2cb' cluster
+		plugin is active.  All other cluster stacks use the 'user'
+		cluster plugin.
+
+		Reading from this file returns the name of the current
+		cluster stack on a single line.
+
+		Writing a new stack name to this file changes the current
+		cluster stack unless there are mounted ocfs2 filesystems.
+		If there are mounted filesystems, attempts to change the
+		stack return an error.
+
+Users:
+	ocfs2-tools <ocfs2-tools-devel@oss.oracle.com>
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 9ebd1f0..b2b6366 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -9,7 +9,7 @@
 DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
 	    kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
 	    procfs-guide.xml writing_usb_driver.xml networking.xml \
-	    kernel-api.xml filesystems.xml lsm.xml usb.xml \
+	    kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
 	    gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
 	    genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
 	    mac80211.xml
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl
new file mode 100644
index 0000000..97618be
--- /dev/null
+++ b/Documentation/DocBook/kgdb.tmpl
@@ -0,0 +1,447 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="kgdbOnLinux">
+ <bookinfo>
+  <title>Using kgdb and the kgdb Internals</title>
+
+  <authorgroup>
+   <author>
+    <firstname>Jason</firstname>
+    <surname>Wessel</surname>
+    <affiliation>
+     <address>
+      <email>jason.wessel@windriver.com</email>
+     </address>
+    </affiliation>
+   </author>
+  </authorgroup>
+
+  <authorgroup>
+   <author>
+    <firstname>Tom</firstname>
+    <surname>Rini</surname>
+    <affiliation>
+     <address>
+      <email>trini@kernel.crashing.org</email>
+     </address>
+    </affiliation>
+   </author>
+  </authorgroup>
+
+  <authorgroup>
+   <author>
+    <firstname>Amit S.</firstname>
+    <surname>Kale</surname>
+    <affiliation>
+     <address>
+      <email>amitkale@linsyssoft.com</email>
+     </address>
+    </affiliation>
+   </author>
+  </authorgroup>
+
+  <copyright>
+   <year>2008</year>
+   <holder>Wind River Systems, Inc.</holder>
+  </copyright>
+  <copyright>
+   <year>2004-2005</year>
+   <holder>MontaVista Software, Inc.</holder>
+  </copyright>
+  <copyright>
+   <year>2004</year>
+   <holder>Amit S. Kale</holder>
+  </copyright>
+
+  <legalnotice>
+   <para>
+   This file is licensed under the terms of the GNU General Public License
+   version 2. This program is licensed "as is" without any warranty of any
+   kind, whether express or implied.
+   </para>
+
+  </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+  <chapter id="Introduction">
+    <title>Introduction</title>
+    <para>
+    kgdb is a source level debugger for linux kernel. It is used along
+    with gdb to debug a linux kernel.  The expectation is that gdb can
+    be used to "break in" to the kernel to inspect memory, variables
+    and look through a cal stack information similar to what an
+    application developer would use gdb for.  It is possible to place
+    breakpoints in kernel code and perform some limited execution
+    stepping.
+    </para>
+    <para>
+    Two machines are required for using kgdb. One of these machines is a
+    development machine and the other is a test machine.  The kernel
+    to be debugged runs on the test machine. The development machine
+    runs an instance of gdb against the vmlinux file which contains
+    the symbols (not boot image such as bzImage, zImage, uImage...).
+    In gdb the developer specifies the connection parameters and
+    connects to kgdb.  Depending on which kgdb I/O modules exist in
+    the kernel for a given architecture, it may be possible to debug
+    the test machine's kernel with the development machine using a
+    rs232 or ethernet connection.
+    </para>
+  </chapter>
+  <chapter id="CompilingAKernel">
+    <title>Compiling a kernel</title>
+    <para>
+    To enable <symbol>CONFIG_KGDB</symbol>, look under the "Kernel debugging"
+    and then select "KGDB: kernel debugging with remote gdb".
+    </para>
+    <para>
+    Next you should choose one of more I/O drivers to interconnect debugging
+    host and debugged target.  Early boot debugging requires a KGDB
+    I/O driver that supports early debugging and the driver must be
+    built into the kernel directly. Kgdb I/O driver configuration
+    takes place via kernel or module parameters, see following
+    chapter.
+    </para>
+    <para>
+    The kgdb test compile options are described in the kgdb test suite chapter.
+    </para>
+
+  </chapter>
+  <chapter id="EnableKGDB">
+   <title>Enable kgdb for debugging</title>
+   <para>
+   In order to use kgdb you must activate it by passing configuration
+   information to one of the kgdb I/O drivers.  If you do not pass any
+   configuration information kgdb will not do anything at all.  Kgdb
+   will only actively hook up to the kernel trap hooks if a kgdb I/O
+   driver is loaded and configured.  If you unconfigure a kgdb I/O
+   driver, kgdb will unregister all the kernel hook points.
+   </para>
+   <para>
+   All drivers can be reconfigured at run time, if
+   <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
+   are enabled, by echo'ing a new config string to
+   <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
+   The driver can be unconfigured by passing an empty string.  You cannot
+   change the configuration while the debugger is attached.  Make sure
+   to detach the debugger with the <constant>detach</constant> command
+   prior to trying unconfigure a kgdb I/O driver.
+   </para>
+   <sect1 id="kgdbwait">
+   <title>Kernel parameter: kgdbwait</title>
+   <para>
+   The Kernel command line option <constant>kgdbwait</constant> makes
+   kgdb wait for a debugger connection during booting of a kernel.  You
+   can only use this option you compiled a kgdb I/O driver into the
+   kernel and you specified the I/O driver configuration as a kernel
+   command line option.  The kgdbwait parameter should always follow the
+   configuration parameter for the kgdb I/O driver in the kernel
+   command line else the I/O driver will not be configured prior to
+   asking the kernel to use it to wait.
+   </para>
+   <para>
+   The kernel will stop and wait as early as the I/O driver and
+   architecture will allow when you use this option.  If you build the
+   kgdb I/O driver as a kernel module kgdbwait will not do anything.
+   </para>
+   </sect1>
+  <sect1 id="kgdboc">
+  <title>Kernel parameter: kgdboc</title>
+  <para>
+  The kgdboc driver was originally an abbreviation meant to stand for
+  "kgdb over console".  Kgdboc is designed to work with a single
+  serial port. It was meant to cover the circumstance
+  where you wanted to use a serial console as your primary console as
+  well as using it to perform kernel debugging.  Of course you can
+  also use kgdboc without assigning a console to the same port.
+  </para>
+  <sect2 id="UsingKgdboc">
+  <title>Using kgdboc</title>
+  <para>
+  You can configure kgdboc via sysfs or a module or kernel boot line
+  parameter depending on if you build with CONFIG_KGDBOC as a module
+  or built-in.
+  <orderedlist>
+  <listitem><para>From the module load or build-in</para>
+  <para><constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
+  <para>
+  The example here would be if your console port was typically ttyS0, you would use something like <constant>kgdboc=ttyS0,115200</constant> or on the ARM Versatile AB you would likely use <constant>kgdboc=ttyAMA0,115200</constant>
+  </para>
+  </listitem>
+  <listitem><para>From sysfs</para>
+  <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para>
+  </listitem>
+  </orderedlist>
+  </para>
+  <para>
+  NOTE: Kgdboc does not support interrupting the target via the
+  gdb remote protocol.  You must manually send a sysrq-g unless you
+  have a proxy that splits console output to a terminal problem and
+  has a separate port for the debugger to connect to that sends the
+  sysrq-g for you.
+  </para>
+  <para>When using kgdboc with no debugger proxy, you can end up
+  connecting the debugger for one of two entry points.  If an
+  exception occurs after you have loaded kgdboc a message should print
+  on the console stating it is waiting for the debugger.  In case you
+  disconnect your terminal program and then connect the debugger in
+  its place.  If you want to interrupt the target system and forcibly
+  enter a debug session you have to issue a Sysrq sequence and then
+  type the letter <constant>g</constant>.  Then you disconnect the
+  terminal session and connect gdb.  Your options if you don't like
+  this are to hack gdb to send the sysrq-g for you as well as on the
+  initial connect, or to use a debugger proxy that allows an
+  unmodified gdb to do the debugging.
+  </para>
+  </sect2>
+  </sect1>
+  <sect1 id="kgdbcon">
+  <title>Kernel parameter: kgdbcon</title>
+  <para>
+  Kgdb supports using the gdb serial protocol to send console messages
+  to the debugger when the debugger is connected and running.  There
+  are two ways to activate this feature.
+  <orderedlist>
+  <listitem><para>Activate with the kernel command line option:</para>
+  <para><constant>kgdbcon</constant></para>
+  </listitem>
+  <listitem><para>Use sysfs before configuring an io driver</para>
+  <para>
+  <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
+  </para>
+  <para>
+  NOTE: If you do this after you configure the kgdb I/O driver, the
+  setting will not take effect until the next point the I/O is
+  reconfigured.
+  </para>
+  </listitem>
+  </orderedlist>
+  </para>
+  <para>
+  IMPORTANT NOTE: Using this option with kgdb over the console
+  (kgdboc) or kgdb over ethernet (kgdboe) is not supported.
+  </para>
+  </sect1>
+  </chapter>
+  <chapter id="ConnectingGDB">
+  <title>Connecting gdb</title>
+    <para>
+    If you are using kgdboc, you need to have used kgdbwait as a boot
+    argument, issued a sysrq-g, or the system you are going to debug
+    has already taken an exception and is waiting for the debugger to
+    attach before you can connect gdb.
+    </para>
+    <para>
+    If you are not using different kgdb I/O driver other than kgdboc,
+    you should be able to connect and the target will automatically
+    respond.
+    </para>
+    <para>
+    Example (using a serial port):
+    </para>
+    <programlisting>
+    % gdb ./vmlinux
+    (gdb) set remotebaud 115200
+    (gdb) target remote /dev/ttyS0
+    </programlisting>
+    <para>
+    Example (kgdb to a terminal server):
+    </para>
+    <programlisting>
+    % gdb ./vmlinux
+    (gdb) target remote udp:192.168.2.2:6443
+    </programlisting>
+    <para>
+    Example (kgdb over ethernet):
+    </para>
+    <programlisting>
+    % gdb ./vmlinux
+    (gdb) target remote udp:192.168.2.2:6443
+    </programlisting>
+    <para>
+    Once connected, you can debug a kernel the way you would debug an
+    application program.
+    </para>
+    <para>
+    If you are having problems connecting or something is going
+    seriously wrong while debugging, it will most often be the case
+    that you want to enable gdb to be verbose about its target
+    communications.  You do this prior to issuing the <constant>target
+    remote</constant> command by typing in: <constant>set remote debug 1</constant>
+    </para>
+  </chapter>
+  <chapter id="KGDBTestSuite">
+    <title>kgdb Test Suite</title>
+    <para>
+    When kgdb is enabled in the kernel config you can also elect to
+    enable the config parameter KGDB_TESTS.  Turning this on will
+    enable a special kgdb I/O module which is designed to test the
+    kgdb internal functions.
+    </para>
+    <para>
+    The kgdb tests are mainly intended for developers to test the kgdb
+    internals as well as a tool for developing a new kgdb architecture
+    specific implementation.  These tests are not really for end users
+    of the Linux kernel.  The primary source of documentation would be
+    to look in the drivers/misc/kgdbts.c file.
+    </para>
+    <para>
+    The kgdb test suite can also be configured at compile time to run
+    the core set of tests by setting the kernel config parameter
+    KGDB_TESTS_ON_BOOT.  This particular option is aimed at automated
+    regression testing and does not require modifying the kernel boot
+    config arguments.  If this is turned on, the kgdb test suite can
+    be disabled by specifying "kgdbts=" as a kernel boot argument.
+    </para>
+  </chapter>
+  <chapter id="CommonBackEndReq">
+  <title>KGDB Internals</title>
+  <sect1 id="kgdbArchitecture">
+    <title>Architecture Specifics</title>
+      <para>
+      Kgdb is organized into three basic components:
+      <orderedlist>
+      <listitem><para>kgdb core</para>
+      <para>
+      The kgdb core is found in kernel/kgdb.c.  It contains:
+      <itemizedlist>
+      <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
+      <listitem><para>A generic OS exception handler which includes sync'ing the processors into a stopped state on an multi cpu system.</para></listitem>
+      <listitem><para>The API to talk to the kgdb I/O drivers</para></listitem>
+      <listitem><para>The API to make calls to the arch specific kgdb implementation</para></listitem>
+      <listitem><para>The logic to perform safe memory reads and writes to memory while using the debugger</para></listitem>
+      <listitem><para>A full implementation for software breakpoints unless overridden by the arch</para></listitem>
+      </itemizedlist>
+      </para>
+      </listitem>
+      <listitem><para>kgdb arch specific implementation</para>
+      <para>
+      This implementation is generally found in arch/*/kernel/kgdb.c.
+      As an example, arch/x86/kernel/kgdb.c contains the specifics to
+      implement HW breakpoint as well as the initialization to
+      dynamically register and unregister for the trap handlers on
+      this architecture.  The arch specific portion implements:
+      <itemizedlist>
+      <listitem><para>contains an arch specific trap catcher which
+      invokes kgdb_handle_exception() to start kgdb about doing its
+      work</para></listitem>
+      <listitem><para>translation to and from gdb specific packet format to pt_regs</para></listitem>
+      <listitem><para>Registration and unregistration of architecture specific trap hooks</para></listitem>
+      <listitem><para>Any special exception handling and cleanup</para></listitem>
+      <listitem><para>NMI exception handling and cleanup</para></listitem>
+      <listitem><para>(optional)HW breakpoints</para></listitem>
+      </itemizedlist>
+      </para>
+      </listitem>
+      <listitem><para>kgdb I/O driver</para>
+      <para>
+      Each kgdb I/O driver has to provide an implemenation for the following:
+      <itemizedlist>
+      <listitem><para>configuration via builtin or module</para></listitem>
+      <listitem><para>dynamic configuration and kgdb hook registration calls</para></listitem>
+      <listitem><para>read and write character interface</para></listitem>
+      <listitem><para>A cleanup handler for unconfiguring from the kgdb core</para></listitem>
+      <listitem><para>(optional) Early debug methodology</para></listitem>
+      </itemizedlist>
+      Any given kgdb I/O driver has to operate very closely with the
+      hardware and must do it in such a way that does not enable
+      interrupts or change other parts of the system context without
+      completely restoring them. The kgdb core will repeatedly "poll"
+      a kgdb I/O driver for characters when it needs input.  The I/O
+      driver is expected to return immediately if there is no data
+      available.  Doing so allows for the future possibility to touch
+      watch dog hardware in such a way as to have a target system not
+      reset when these are enabled.
+      </para>
+      </listitem>
+      </orderedlist>
+      </para>
+      <para>
+      If you are intent on adding kgdb architecture specific support
+      for a new architecture, the architecture should define
+      <constant>HAVE_ARCH_KGDB</constant> in the architecture specific
+      Kconfig file.  This will enable kgdb for the architecture, and
+      at that point you must create an architecture specific kgdb
+      implementation.
+      </para>
+      <para>
+      There are a few flags which must be set on every architecture in
+      their &lt;asm/kgdb.h&gt; file.  These are:
+      <itemizedlist>
+        <listitem>
+	  <para>
+	  NUMREGBYTES: The size in bytes of all of the registers, so
+	  that we can ensure they will all fit into a packet.
+	  </para>
+	  <para>
+	  BUFMAX: The size in bytes of the buffer GDB will read into.
+	  This must be larger than NUMREGBYTES.
+	  </para>
+	  <para>
+	  CACHE_FLUSH_IS_SAFE: Set to 1 if it is always safe to call
+	  flush_cache_range or flush_icache_range.  On some architectures,
+	  these functions may not be safe to call on SMP since we keep other
+	  CPUs in a holding pattern.
+	  </para>
+	</listitem>
+      </itemizedlist>
+      </para>
+      <para>
+      There are also the following functions for the common backend,
+      found in kernel/kgdb.c, that must be supplied by the
+      architecture-specific backend unless marked as (optional), in
+      which case a default function maybe used if the architecture
+      does not need to provide a specific implementation.
+      </para>
+!Iinclude/linux/kgdb.h
+  </sect1>
+  <sect1 id="kgdbocDesign">
+  <title>kgdboc internals</title>
+  <para>
+  The kgdboc driver is actually a very thin driver that relies on the
+  underlying low level to the hardware driver having "polling hooks"
+  which the to which the tty driver is attached.  In the initial
+  implementation of kgdboc it the serial_core was changed to expose a
+  low level uart hook for doing polled mode reading and writing of a
+  single character while in an atomic context.  When kgdb makes an I/O
+  request to the debugger, kgdboc invokes a call back in the serial
+  core which in turn uses the call back in the uart driver.  It is
+  certainly possible to extend kgdboc to work with non-uart based
+  consoles in the future.
+  </para>
+  <para>
+  When using kgdboc with a uart, the uart driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_get_char = serial8250_get_poll_char,
+	.poll_put_char = serial8250_put_poll_char,
+#endif
+  </programlisting>
+  Any implementation specifics around creating a polling driver use the
+  <constant>#ifdef CONFIG_CONSOLE_POLL</constant>, as shown above.
+  Keep in mind that polling hooks have to be implemented in such a way
+  that they can be called from an atomic context and have to restore
+  the state of the uart chip on return such that the system can return
+  to normal when the debugger detaches.  You need to be very careful
+  with any kind of lock you consider, because failing here is most
+  going to mean pressing the reset button.
+  </para>
+  </sect1>
+  </chapter>
+  <chapter id="credits">
+     <title>Credits</title>
+	<para>
+		The following people have contributed to this document:
+		<orderedlist>
+			<listitem><para>Amit Kale<email>amitkale@linsyssoft.com</email></para></listitem>
+			<listitem><para>Tom Rini<email>trini@kernel.crashing.org</email></para></listitem>
+		</orderedlist>
+                In March 2008 this document was completely rewritten by:
+		<itemizedlist>
+		<listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
+		</itemizedlist>
+	</para>
+  </chapter>
+</book>
+
diff --git a/Documentation/debugging-via-ohci1394.txt b/Documentation/debugging-via-ohci1394.txt
index c360d4e..59a91e5 100644
--- a/Documentation/debugging-via-ohci1394.txt
+++ b/Documentation/debugging-via-ohci1394.txt
@@ -41,15 +41,19 @@
 This can be turned off by ohci1394's module parameter phys_dma=0.
 
 The alternative firewire-ohci driver in drivers/firewire uses filtered physical
-DMA, hence is not yet suitable for remote debugging.
+DMA by default, which is more secure but not suitable for remote debugging.
+Compile the driver with CONFIG_FIREWIRE_OHCI_REMOTE_DMA (Kernel hacking menu:
+Remote debugging over FireWire with firewire-ohci) to get unfiltered physical
+DMA.
 
-Because ohci1394 depends on the PCI enumeration to be completed, an
-initialization routine which runs pretty early (long before console_init()
-which makes the printk buffer appear on the console can be called) was written.
+Because ohci1394 and firewire-ohci depend on the PCI enumeration to be
+completed, an initialization routine which runs pretty early has been
+implemented for x86.  This routine runs long before console_init() can be
+called, i.e. before the printk buffer appears on the console.
 
 To activate it, enable CONFIG_PROVIDE_OHCI1394_DMA_INIT (Kernel hacking menu:
-Provide code for enabling DMA over FireWire early on boot) and pass the
-parameter "ohci1394_dma=early" to the recompiled kernel on boot.
+Remote debugging over FireWire early on boot) and pass the parameter
+"ohci1394_dma=early" to the recompiled kernel on boot.
 
 Tools
 -----
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index ee3cc8b..af0e939 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -204,7 +204,7 @@
 ---------------------------
 
 What:	i386/x86_64 bzImage symlinks
-When:	April 2008
+When:	April 2010
 
 Why:	The i386/x86_64 merge provides a symlink to the old bzImage
 	location so not yet updated user space tools, e.g. package
@@ -281,3 +281,13 @@
 	code / infrastructure should be in the kernel and not in some
 	out-of-tree driver.
 Who:	Thomas Gleixner <tglx@linutronix.de>
+
+---------------------------
+
+What:	/sys/o2cb symlink
+When:	January 2010
+Why:	/sys/fs/o2cb is the proper location for this information - /sys/o2cb
+	exists as a symlink for backwards compatibility for old versions of
+	ocfs2-tools. 2 years should be sufficient time to phase in new versions
+	which know to look in /sys/fs/o2cb.
+Who:	ocfs2-devel@oss.oracle.com
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 74aeb14..0a1668b 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -52,16 +52,15 @@
 	and also gets the setgid bit set if it is a directory itself.
 
   ihashsize=value
-	Sets the number of hash buckets available for hashing the
-	in-memory inodes of the specified mount point.  If a value
-	of zero is used, the value selected by the default algorithm
-	will be displayed in /proc/mounts.
+	In memory inode hashes have been removed, so this option has
+	no function as of August 2007. Option is deprecated.
 
   ikeep/noikeep
-	When inode clusters are emptied of inodes, keep them around
-	on the disk (ikeep) - this is the traditional XFS behaviour
-	and is still the default for now.  Using the noikeep option,
-	inode clusters are returned to the free space pool.
+	When ikeep is specified, XFS does not delete empty inode clusters
+	and keeps them around on disk. ikeep is the traditional XFS
+	behaviour. When noikeep is specified, empty inode clusters
+	are returned to the free space pool. The default is noikeep for
+	non-DMAPI mounts, while ikeep is the default when DMAPI is in use.
 
   inode64
 	Indicates that XFS is allowed to create inodes at any location
diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt
index fc49b79..2eb1610 100644
--- a/Documentation/i386/boot.txt
+++ b/Documentation/i386/boot.txt
@@ -170,6 +170,8 @@
 0238/4	2.06+	cmdline_size	Maximum size of the kernel command line
 023C/4	2.07+	hardware_subarch Hardware subarchitecture
 0240/8	2.07+	hardware_subarch_data Subarchitecture-specific data
+0248/4	2.08+	payload_offset	Offset of kernel payload
+024C/4	2.08+	payload_length	Length of kernel payload
 
 (1) For backwards compatibility, if the setup_sects field contains 0, the
     real value is 4.
@@ -512,6 +514,32 @@
 
   A pointer to data that is specific to hardware subarch
 
+Field name:	payload_offset
+Type:		read
+Offset/size:	0x248/4
+Protocol:	2.08+
+
+  If non-zero then this field contains the offset from the end of the
+  real-mode code to the payload.
+
+  The payload may be compressed. The format of both the compressed and
+  uncompressed data should be determined using the standard magic
+  numbers. Currently only gzip compressed ELF is used.
+  
+Field name:	payload_length
+Type:		read
+Offset/size:	0x24c/4
+Protocol:	2.08+
+
+  The length of the payload.
+
+**** THE IMAGE CHECKSUM
+
+From boot protocol version 2.08 onwards the CRC-32 is calculated over
+the entire file using the characteristic polynomial 0x04C11DB7 and an
+initial remainder of 0xffffffff.  The checksum is appended to the
+file; therefore the CRC of the file up to the limit specified in the
+syssize field of the header is always 0.
 
 **** THE KERNEL COMMAND LINE
 
diff --git a/Documentation/ide/ide.txt b/Documentation/ide/ide.txt
index 818676a..486c699 100644
--- a/Documentation/ide/ide.txt
+++ b/Documentation/ide/ide.txt
@@ -71,29 +71,6 @@
 ones), for the drives/geometries attached to those interfaces, and for the IRQ
 lines being used by the interfaces (normally 14, 15 for ide0/ide1).
 
-For special cases, interfaces may be specified using kernel "command line"
-options.  For example,
-
-	ide3=0x168,0x36e,10	/* ioports 0x168-0x16f,0x36e, irq 10 */
-
-Normally the irq number need not be specified, as ide.c will probe for it:
-
-	ide3=0x168,0x36e	/* ioports 0x168-0x16f,0x36e */
-
-The standard port, and irq values are these:
-
-	ide0=0x1f0,0x3f6,14
-	ide1=0x170,0x376,15
-	ide2=0x1e8,0x3ee,11
-	ide3=0x168,0x36e,10
-
-Note that the first parameter reserves 8 contiguous ioports, whereas the
-second value denotes a single ioport. If in doubt, do a 'cat /proc/ioports'.
-
-In all probability the device uses these ports and IRQs if it is attached
-to the appropriate ide channel.  Pass the parameter for the correct ide
-channel to the kernel, as explained above.
-
 Any number of interfaces may share a single IRQ if necessary, at a slight
 performance penalty, whether on separate cards or a single VLB card.
 The IDE driver automatically detects and handles this.  However, this may
@@ -184,13 +161,6 @@
 Please pass on any feedback on any of this stuff to the maintainer,
 whose address can be found in linux/MAINTAINERS.
 
-Note that if BOTH hd.c and ide.c are configured into the kernel,
-hd.c will normally be allowed to control the primary IDE interface.
-This is useful for older hardware that may be incompatible with ide.c,
-and still allows newer hardware to run on the 2nd/3rd/4th IDE ports
-under control of ide.c.   To have ide.c also "take over" the primary
-IDE port in this situation, use the "command line" parameter:  ide0=0x1f0
-
 The IDE driver is modularized.  The high level disk/CD-ROM/tape/floppy
 drivers can always be compiled as loadable modules, the chipset drivers
 can only be compiled into the kernel, and the core code (ide.c) can be
@@ -206,7 +176,7 @@
 driver using the "options=" keyword to insmod, while replacing any ',' with
 ';'.  For example:
 
-	insmod ide.o options="ide0=serialize ide1=serialize ide2=0x1e8;0x3ee;11"
+	insmod ide.o options="hda=nodma hdb=nodma"
 
 
 ================================================================================
@@ -247,21 +217,11 @@
 			  As for VLB, it is safest to not specify it.
 			  Bigger values are safer than smaller ones.
 
- "idex=base"		: probe for an interface at the addr specified,
-			  where "base" is usually 0x1f0 or 0x170
-			  and "ctl" is assumed to be "base"+0x206
-
- "idex=base,ctl"	: specify both base and ctl
-
- "idex=base,ctl,irq"	: specify base, ctl, and irq number
-
  "idex=serialize"	: do not overlap operations on idex. Please note
 			  that you will have to specify this option for
 			  both the respective primary and secondary channel
 			  to take effect.
 
- "idex=four"		: four drives on idex and ide(x^1) share same ports
-
  "idex=reset"		: reset interface after probe
 
  "idex=ata66"		: informs the interface that it has an 80c cable
@@ -269,8 +229,6 @@
 			  ability to bit test for detection is currently
 			  unknown.
 
- "ide=reverse"		: formerly called to pci sub-system, but now local.
-
  "ide=doubler"		: probe/support IDE doublers on Amiga
 
 There may be more options than shown -- use the source, Luke!
@@ -290,6 +248,9 @@
 kernel paremeter to enable probing for VLB version of the chipset (PCI ones
 are detected automatically).
 
+You also need to use "probe" kernel parameter for ide-4drives driver
+(support for IDE generic chipset with four drives on one port).
+
 ================================================================================
 
 Some Terminology
diff --git a/Documentation/ide/warm-plug-howto.txt b/Documentation/ide/warm-plug-howto.txt
new file mode 100644
index 0000000..d588546
--- /dev/null
+++ b/Documentation/ide/warm-plug-howto.txt
@@ -0,0 +1,13 @@
+
+IDE warm-plug HOWTO
+===================
+
+To warm-plug devices on a port 'idex':
+
+# echo -n "1" > /sys/class/ide_port/idex/delete_devices
+
+unplug old device(s) and plug new device(s)
+
+# echo -n "1" > /sys/class/ide_port/idex/scan
+
+done
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index dafd001..256a216 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -763,11 +763,11 @@
 			Format: <io>[,<membase>[,<icn_id>[,<icn_id2>]]]
 
 	ide=		[HW] (E)IDE subsystem
-			Format: ide=nodma or ide=doubler or ide=reverse
+			Format: ide=nodma or ide=doubler
 			See Documentation/ide/ide.txt.
 
 	ide?=		[HW] (E)IDE subsystem
-			Format: ide?=noprobe or chipset specific parameters.
+			Format: ide?=ata66 or chipset specific parameters.
 			See Documentation/ide/ide.txt.
 
 	idebus=		[HW] (E)IDE subsystem - VLB/PCI bus speed
@@ -812,6 +812,19 @@
 
 	inttest=	[IA64]
 
+	iommu=		[x86]
+		off
+		force
+		noforce
+		biomerge
+		panic
+		nopanic
+		merge
+		nomerge
+		forcesac
+		soft
+
+
 	intel_iommu=	[DMAR] Intel IOMMU driver (DMAR) option
 		off
 			Disable intel iommu driver.
@@ -928,6 +941,11 @@
 	kstack=N	[X86-32,X86-64] Print N words from the kernel stack
 			in oops dumps.
 
+	kgdboc=		[HW] kgdb over consoles.
+			Requires a tty driver that supports console polling.
+			(only serial suported for now)
+			Format: <serial_device>[,baud]
+
 	l2cr=		[PPC]
 
 	lapic		[X86-32,APIC] Enable the local APIC even if BIOS
@@ -1134,6 +1152,11 @@
 			         or
 			         memmap=0x10000$0x18690000
 
+	memtest=	[KNL,X86_64] Enable memtest
+			Format: <integer>
+			range: 0,4 : pattern number
+			default : 0 <disable>
+
 	meye.*=		[HW] Set MotionEye Camera parameters
 			See Documentation/video4linux/meye.txt.
 
@@ -1339,6 +1362,10 @@
 
 	nowb		[ARM]
 
+	nptcg=		[IA64] Override max number of concurrent global TLB
+			purges which is reported from either PAL_VM_SUMMARY or
+			SAL PALO.
+
 	numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
 			one of ['zone', 'node', 'default'] can be specified
 			This can be set from sysctl after boot.
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index 0eb7c58..e054209 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -115,6 +115,27 @@
 Description:  Allocates memory for a debug log     
               Must not be called within an interrupt handler 
 
+----------------------------------------------------------------------------
+debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
+				  int buf_size, mode_t mode, uid_t uid,
+				  gid_t gid);
+
+Parameter:    name:	   Name of debug log (e.g. used for debugfs entry)
+	      pages:	   Number of pages, which will be allocated per area
+	      nr_areas:    Number of debug areas
+	      buf_size:    Size of data area in each debug entry
+	      mode:	   File mode for debugfs files. E.g. S_IRWXUGO
+	      uid:	   User ID for debugfs files. Currently only 0 is
+			   supported.
+	      gid:	   Group ID for debugfs files. Currently only 0 is
+			   supported.
+
+Return Value: Handle for generated debug area
+	      NULL if register failed
+
+Description:  Allocates memory for a debug log
+	      Must not be called within an interrupt handler
+
 ---------------------------------------------------------------------------
 void debug_unregister (debug_info_t * id);
 
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt
index b7be95b..4075260 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.txt
@@ -2,7 +2,7 @@
 The driver is currently maintained by Kai Mäkisara (email
 Kai.Makisara@kolumbus.fi)
 
-Last modified: Mon Mar  7 21:14:44 2005 by kai.makisara
+Last modified: Sun Feb 24 21:59:07 2008 by kai.makisara
 
 
 BASICS
@@ -133,6 +133,11 @@
 file 'dev' contains the device numbers corresponding to this device. The links
 'device' and 'driver' point to the SCSI device and driver entries.
 
+Each directory also contains the entry 'options' which shows the currently
+enabled driver and mode options. The value in the file is a bit mask where the
+bit definitions are the same as those used with MTSETDRVBUFFER in setting the
+options.
+
 A link named 'tape' is made from the SCSI device directory to the class
 directory corresponding to the mode 0 auto-rewind device (e.g., st0). 
 
@@ -372,6 +377,11 @@
 	     MT_ST_SYSV sets the SYSV semantics (mode)
 	     MT_ST_NOWAIT enables immediate mode (i.e., don't wait for
 	        the command to finish) for some commands (e.g., rewind)
+	     MT_ST_SILI enables setting the SILI bit in SCSI commands when
+		reading in variable block mode to enhance performance when
+		reading blocks shorter than the byte count; set this only
+		if you are sure that the drive supports SILI and the HBA
+		correctly returns transfer residuals
 	     MT_ST_DEBUGGING debugging (global; debugging must be
 		compiled into the driver)
 	MT_ST_SETBOOLEANS
diff --git a/Documentation/hrtimers/highres.txt b/Documentation/timers/highres.txt
similarity index 100%
rename from Documentation/hrtimers/highres.txt
rename to Documentation/timers/highres.txt
diff --git a/Documentation/hrtimers/hrtimers.txt b/Documentation/timers/hrtimers.txt
similarity index 100%
rename from Documentation/hrtimers/hrtimers.txt
rename to Documentation/timers/hrtimers.txt
diff --git a/Documentation/hrtimer/timer_stats.txt b/Documentation/timers/timer_stats.txt
similarity index 100%
rename from Documentation/hrtimer/timer_stats.txt
rename to Documentation/timers/timer_stats.txt
diff --git a/Documentation/x86/pat.txt b/Documentation/x86/pat.txt
new file mode 100644
index 0000000..17965f9
--- /dev/null
+++ b/Documentation/x86/pat.txt
@@ -0,0 +1,100 @@
+
+PAT (Page Attribute Table)
+
+x86 Page Attribute Table (PAT) allows for setting the memory attribute at the
+page level granularity. PAT is complementary to the MTRR settings which allows
+for setting of memory types over physical address ranges. However, PAT is
+more flexible than MTRR due to its capability to set attributes at page level
+and also due to the fact that there are no hardware limitations on number of
+such attribute settings allowed. Added flexibility comes with guidelines for
+not having memory type aliasing for the same physical memory with multiple
+virtual addresses.
+
+PAT allows for different types of memory attributes. The most commonly used
+ones that will be supported at this time are Write-back, Uncached,
+Write-combined and Uncached Minus.
+
+There are many different APIs in the kernel that allows setting of memory
+attributes at the page level. In order to avoid aliasing, these interfaces
+should be used thoughtfully. Below is a table of interfaces available,
+their intended usage and their memory attribute relationships. Internally,
+these APIs use a reserve_memtype()/free_memtype() interface on the physical
+address range to avoid any aliasing.
+
+
+-------------------------------------------------------------------
+API                    |    RAM   |  ACPI,...  |  Reserved/Holes  |
+-----------------------|----------|------------|------------------|
+                       |          |            |                  |
+ioremap                |    --    |    UC      |       UC         |
+                       |          |            |                  |
+ioremap_cache          |    --    |    WB      |       WB         |
+                       |          |            |                  |
+ioremap_nocache        |    --    |    UC      |       UC         |
+                       |          |            |                  |
+ioremap_wc             |    --    |    --      |       WC         |
+                       |          |            |                  |
+set_memory_uc          |    UC    |    --      |       --         |
+ set_memory_wb         |          |            |                  |
+                       |          |            |                  |
+set_memory_wc          |    WC    |    --      |       --         |
+ set_memory_wb         |          |            |                  |
+                       |          |            |                  |
+pci sysfs resource     |    --    |    --      |       UC         |
+                       |          |            |                  |
+pci sysfs resource_wc  |    --    |    --      |       WC         |
+ is IORESOURCE_PREFETCH|          |            |                  |
+                       |          |            |                  |
+pci proc               |    --    |    --      |       UC         |
+ !PCIIOC_WRITE_COMBINE |          |            |                  |
+                       |          |            |                  |
+pci proc               |    --    |    --      |       WC         |
+ PCIIOC_WRITE_COMBINE  |          |            |                  |
+                       |          |            |                  |
+/dev/mem               |    --    |    UC      |       UC         |
+ read-write            |          |            |                  |
+                       |          |            |                  |
+/dev/mem               |    --    |    UC      |       UC         |
+ mmap SYNC flag        |          |            |                  |
+                       |          |            |                  |
+/dev/mem               |    --    |  WB/WC/UC  |    WB/WC/UC      |
+ mmap !SYNC flag       |          |(from exist-|  (from exist-    |
+ and                   |          |  ing alias)|    ing alias)    |
+ any alias to this area|          |            |                  |
+                       |          |            |                  |
+/dev/mem               |    --    |    WB      |       WB         |
+ mmap !SYNC flag       |          |            |                  |
+ no alias to this area |          |            |                  |
+ and                   |          |            |                  |
+ MTRR says WB          |          |            |                  |
+                       |          |            |                  |
+/dev/mem               |    --    |    --      |    UC_MINUS      |
+ mmap !SYNC flag       |          |            |                  |
+ no alias to this area |          |            |                  |
+ and                   |          |            |                  |
+ MTRR says !WB         |          |            |                  |
+                       |          |            |                  |
+-------------------------------------------------------------------
+
+Notes:
+
+-- in the above table mean "Not suggested usage for the API". Some of the --'s
+are strictly enforced by the kernel. Some others are not really enforced
+today, but may be enforced in future.
+
+For ioremap and pci access through /sys or /proc - The actual type returned
+can be more restrictive, in case of any existing aliasing for that address.
+For example: If there is an existing uncached mapping, a new ioremap_wc can
+return uncached mapping in place of write-combine requested.
+
+set_memory_[uc|wc] and set_memory_wb should be used in pairs, where driver will
+first make a region uc or wc and switch it back to wb after use.
+
+Over time writes to /proc/mtrr will be deprecated in favor of using PAT based
+interfaces. Users writing to /proc/mtrr are suggested to use above interfaces.
+
+Drivers should use ioremap_[uc|wc] to access PCI BARs with [uc|wc] access
+types.
+
+Drivers should use set_memory_[uc|wc] to set access type for RAM ranges.
+
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index 34abae4..b0c7b6c 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -307,3 +307,8 @@
 			stuck (default)
 
 Miscellaneous
+
+	nogbpages
+		Do not use GB pages for kernel direct mappings.
+	gbpages
+		Use GB pages for kernel direct mappings.
diff --git a/MAINTAINERS b/MAINTAINERS
index 22d688c..45b86ab 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2310,6 +2310,12 @@
 L:	kexec@lists.infradead.org
 S:	Maintained
 
+KGDB
+P:	Jason Wessel
+M:	jason.wessel@windriver.com
+L:	kgdb-bugreport@lists.sourceforge.net
+S:	Maintained
+
 KPROBES
 P:	Ananth N Mavinakayanahalli
 M:	ananth@in.ibm.com
@@ -2937,6 +2943,7 @@
 M:	joel.becker@oracle.com
 L:	ocfs2-devel@oss.oracle.com
 W:	http://oss.oracle.com/projects/ocfs2/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mfasheh/ocfs2.git
 S:	Supported
 
 OMNIKEY CARDMAN 4000 DRIVER
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index dccf052..ac706c1 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -7,7 +7,7 @@
 EXTRA_CFLAGS	:= -Werror -Wno-sign-compare
 
 obj-y    := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
-	    irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \
+	    irq_alpha.o signal.o setup.o ptrace.o time.o \
 	    alpha_ksyms.o systbls.o err_common.o io.o
 
 obj-$(CONFIG_VGA_HOSE)	+= console.o
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index e9762a3..d96e742 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -77,15 +77,6 @@
 EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__strnlen_user);
 
-/* Semaphore helper functions.  */
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_failed_interruptible);
-EXPORT_SYMBOL(__up_wakeup);
-EXPORT_SYMBOL(down);
-EXPORT_SYMBOL(down_interruptible);
-EXPORT_SYMBOL(down_trylock);
-EXPORT_SYMBOL(up);
-
 /* 
  * SMP-specific symbols.
  */
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c
deleted file mode 100644
index 8d2982a..0000000
--- a/arch/alpha/kernel/semaphore.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Alpha semaphore implementation.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999, 2000 Richard Henderson
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-
-/*
- * This is basically the PPC semaphore scheme ported to use
- * the Alpha ll/sc sequences, so see the PPC code for
- * credits.
- */
-
-/*
- * Atomically update sem->count.
- * This does the equivalent of the following:
- *
- *	old_count = sem->count;
- *	tmp = MAX(old_count, 0) + incr;
- *	sem->count = tmp;
- *	return old_count;
- */
-static inline int __sem_update_count(struct semaphore *sem, int incr)
-{
-	long old_count, tmp = 0;
-
-	__asm__ __volatile__(
-	"1:	ldl_l	%0,%2\n"
-	"	cmovgt	%0,%0,%1\n"
-	"	addl	%1,%3,%1\n"
-	"	stl_c	%1,%2\n"
-	"	beq	%1,2f\n"
-	"	mb\n"
-	".subsection 2\n"
-	"2:	br	1b\n"
-	".previous"
-	: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
-	: "Ir" (incr), "1" (tmp), "m" (sem->count));
-
-	return old_count;
-}
-
-/*
- * Perform the "down" function.  Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from down, the return is ignored and the wait loop is
- * not interruptible.  This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from down_interruptible, the return value gets checked
- * upon return.  If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- */
-
-void __sched
-__down_failed(struct semaphore *sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	printk("%s(%d): down failed(%p)\n",
-	       tsk->comm, task_pid_nr(tsk), sem);
-#endif
-
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	wmb();
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	/*
-	 * Try to get the semaphore.  If the count is > 0, then we've
-	 * got the semaphore; we decrement count and exit the loop.
-	 * If the count is 0 or negative, we set it to -1, indicating
-	 * that we are asleep, and then sleep.
-	 */
-	while (__sem_update_count(sem, -1) <= 0) {
-		schedule();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	}
-	remove_wait_queue(&sem->wait, &wait);
-	tsk->state = TASK_RUNNING;
-
-	/*
-	 * If there are any more sleepers, wake one of them up so
-	 * that it can either get the semaphore, or set count to -1
-	 * indicating that there are still processes sleeping.
-	 */
-	wake_up(&sem->wait);
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	printk("%s(%d): down acquired(%p)\n",
-	       tsk->comm, task_pid_nr(tsk), sem);
-#endif
-}
-
-int __sched
-__down_failed_interruptible(struct semaphore *sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	long ret = 0;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	printk("%s(%d): down failed(%p)\n",
-	       tsk->comm, task_pid_nr(tsk), sem);
-#endif
-
-	tsk->state = TASK_INTERRUPTIBLE;
-	wmb();
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	while (__sem_update_count(sem, -1) <= 0) {
-		if (signal_pending(current)) {
-			/*
-			 * A signal is pending - give up trying.
-			 * Set sem->count to 0 if it is negative,
-			 * since we are no longer sleeping.
-			 */
-			__sem_update_count(sem, 0);
-			ret = -EINTR;
-			break;
-		}
-		schedule();
-		set_task_state(tsk, TASK_INTERRUPTIBLE);
-	}
-
-	remove_wait_queue(&sem->wait, &wait);
-	tsk->state = TASK_RUNNING;
-	wake_up(&sem->wait);
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	printk("%s(%d): down %s(%p)\n",
-	       current->comm, task_pid_nr(current),
-	       (ret < 0 ? "interrupted" : "acquired"), sem);
-#endif
-	return ret;
-}
-
-void
-__up_wakeup(struct semaphore *sem)
-{
-	/*
-	 * Note that we incremented count in up() before we came here,
-	 * but that was ineffective since the result was <= 0, and
-	 * any negative value of count is equivalent to 0.
-	 * This ends up setting count to 1, unless count is now > 0
-	 * (i.e. because some other cpu has called up() in the meantime),
-	 * in which case we just increment count.
-	 */
-	__sem_update_count(sem, 1);
-	wake_up(&sem->wait);
-}
-
-void __sched
-down(struct semaphore *sem)
-{
-#ifdef WAITQUEUE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	printk("%s(%d): down(%p) <count=%d> from %p\n",
-	       current->comm, task_pid_nr(current), sem,
-	       atomic_read(&sem->count), __builtin_return_address(0));
-#endif
-	__down(sem);
-}
-
-int __sched
-down_interruptible(struct semaphore *sem)
-{
-#ifdef WAITQUEUE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	printk("%s(%d): down(%p) <count=%d> from %p\n",
-	       current->comm, task_pid_nr(current), sem,
-	       atomic_read(&sem->count), __builtin_return_address(0));
-#endif
-	return __down_interruptible(sem);
-}
-
-int
-down_trylock(struct semaphore *sem)
-{
-	int ret;
-
-#ifdef WAITQUEUE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-
-	ret = __down_trylock(sem);
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	printk("%s(%d): down_trylock %s from %p\n",
-	       current->comm, task_pid_nr(current),
-	       ret ? "failed" : "acquired",
-	       __builtin_return_address(0));
-#endif
-
-	return ret;
-}
-
-void
-up(struct semaphore *sem)
-{
-#ifdef WAITQUEUE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	printk("%s(%d): up(%p) <count=%d> from %p\n",
-	       current->comm, task_pid_nr(current), sem,
-	       atomic_read(&sem->count), __builtin_return_address(0));
-#endif
-	__up(sem);
-}
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 00d44c6..6235f72 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -7,7 +7,7 @@
 # Object file lists.
 
 obj-y		:= compat.o entry-armv.o entry-common.o irq.o \
-		   process.o ptrace.o semaphore.o setup.o signal.o \
+		   process.o ptrace.o setup.o signal.o \
 		   sys_arm.o stacktrace.o time.o traps.o
 
 obj-$(CONFIG_ISA_DMA_API)	+= dma.o
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c
deleted file mode 100644
index 981fe5c..0000000
--- a/arch/arm/kernel/semaphore.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- *  ARM semaphore implementation, taken from
- *
- *  i386 semaphore implementation.
- *
- *  (C) Copyright 1999 Linus Torvalds
- *
- *  Modified for ARM by Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is
- * protected by the semaphore spinlock.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- *  - only on a boundary condition do we need to care. When we go
- *    from a negative count to a non-negative, we wake people up.
- *  - when we go from a non-negative count to a negative do we
- *    (a) synchronize with the "sleeper" count and (b) make sure
- *    that we're on the wakeup list before we synchronize so that
- *    we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
-	wake_up(&sem->wait);
-}
-
-static DEFINE_SPINLOCK(semaphore_lock);
-
-void __sched __down(struct semaphore * sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	spin_lock_irq(&semaphore_lock);
-	sem->sleepers++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irq(&semaphore_lock);
-
-		schedule();
-		tsk->state = TASK_UNINTERRUPTIBLE;
-		spin_lock_irq(&semaphore_lock);
-	}
-	spin_unlock_irq(&semaphore_lock);
-	remove_wait_queue(&sem->wait, &wait);
-	tsk->state = TASK_RUNNING;
-	wake_up(&sem->wait);
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	tsk->state = TASK_INTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	spin_lock_irq(&semaphore_lock);
-	sem->sleepers ++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * With signals pending, this turns into
-		 * the trylock failure case - we won't be
-		 * sleeping, and we* can't get the lock as
-		 * it has contention. Just correct the count
-		 * and exit.
-		 */
-		if (signal_pending(current)) {
-			retval = -EINTR;
-			sem->sleepers = 0;
-			atomic_add(sleepers, &sem->count);
-			break;
-		}
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock. The
-		 * "-1" is because we're still hoping to get
-		 * the lock.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irq(&semaphore_lock);
-
-		schedule();
-		tsk->state = TASK_INTERRUPTIBLE;
-		spin_lock_irq(&semaphore_lock);
-	}
-	spin_unlock_irq(&semaphore_lock);
-	tsk->state = TASK_RUNNING;
-	remove_wait_queue(&sem->wait, &wait);
-	wake_up(&sem->wait);
-	return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-int __down_trylock(struct semaphore * sem)
-{
-	int sleepers;
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_lock, flags);
-	sleepers = sem->sleepers + 1;
-	sem->sleepers = 0;
-
-	/*
-	 * Add "everybody else" and us into it. They aren't
-	 * playing, because we own the spinlock.
-	 */
-	if (!atomic_add_negative(sleepers, &sem->count))
-		wake_up(&sem->wait);
-
-	spin_unlock_irqrestore(&semaphore_lock, flags);
-	return 1;
-}
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- *
- * ip contains the semaphore pointer on entry. Save the C-clobbered
- * registers (r0 to r3 and lr), but not ip, as we use it as a return
- * value in some cases..
- * To remain AAPCS compliant (64-bit stack align) we save r4 as well.
- */
-asm("	.section .sched.text,\"ax\",%progbits	\n\
-	.align	5				\n\
-	.globl	__down_failed			\n\
-__down_failed:					\n\
-	stmfd	sp!, {r0 - r4, lr}		\n\
-	mov	r0, ip				\n\
-	bl	__down				\n\
-	ldmfd	sp!, {r0 - r4, pc}		\n\
-						\n\
-	.align	5				\n\
-	.globl	__down_interruptible_failed	\n\
-__down_interruptible_failed:			\n\
-	stmfd	sp!, {r0 - r4, lr}		\n\
-	mov	r0, ip				\n\
-	bl	__down_interruptible		\n\
-	mov	ip, r0				\n\
-	ldmfd	sp!, {r0 - r4, pc}		\n\
-						\n\
-	.align	5				\n\
-	.globl	__down_trylock_failed		\n\
-__down_trylock_failed:				\n\
-	stmfd	sp!, {r0 - r4, lr}		\n\
-	mov	r0, ip				\n\
-	bl	__down_trylock			\n\
-	mov	ip, r0				\n\
-	ldmfd	sp!, {r0 - r4, pc}		\n\
-						\n\
-	.align	5				\n\
-	.globl	__up_wakeup			\n\
-__up_wakeup:					\n\
-	stmfd	sp!, {r0 - r4, lr}		\n\
-	mov	r0, ip				\n\
-	bl	__up				\n\
-	ldmfd	sp!, {r0 - r4, pc}		\n\
-	");
-
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_interruptible_failed);
-EXPORT_SYMBOL(__down_trylock_failed);
-EXPORT_SYMBOL(__up_wakeup);
diff --git a/arch/avr32/kernel/Makefile b/arch/avr32/kernel/Makefile
index e4b6d12..18229d0 100644
--- a/arch/avr32/kernel/Makefile
+++ b/arch/avr32/kernel/Makefile
@@ -6,7 +6,7 @@
 
 obj-$(CONFIG_SUBARCH_AVR32B)	+= entry-avr32b.o
 obj-y				+= syscall_table.o syscall-stubs.o irq.o
-obj-y				+= setup.o traps.o semaphore.o ocd.o ptrace.o
+obj-y				+= setup.o traps.o ocd.o ptrace.o
 obj-y				+= signal.o sys_avr32.o process.o time.o
 obj-y				+= init_task.o switch_to.o cpu.o
 obj-$(CONFIG_MODULES)		+= module.o avr32_ksyms.o
diff --git a/arch/avr32/kernel/semaphore.c b/arch/avr32/kernel/semaphore.c
deleted file mode 100644
index 1e2705a..0000000
--- a/arch/avr32/kernel/semaphore.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * AVR32 sempahore implementation.
- *
- * Copyright (C) 2004-2006 Atmel Corporation
- *
- * Based on linux/arch/i386/kernel/semaphore.c
- *  Copyright (C) 1999 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-
-#include <asm/semaphore.h>
-#include <asm/atomic.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- *  - only on a boundary condition do we need to care. When we go
- *    from a negative count to a non-negative, we wake people up.
- *  - when we go from a non-negative count to a negative do we
- *    (a) synchronize with the "sleeper" count and (b) make sure
- *    that we're on the wakeup list before we synchronize so that
- *    we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
-	wake_up(&sem->wait);
-}
-EXPORT_SYMBOL(__up);
-
-void __sched __down(struct semaphore *sem)
-{
-	struct task_struct *tsk = current;
-        DECLARE_WAITQUEUE(wait, tsk);
-        unsigned long flags;
-
-        tsk->state = TASK_UNINTERRUPTIBLE;
-        spin_lock_irqsave(&sem->wait.lock, flags);
-        add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
-        sem->sleepers++;
-        for (;;) {
-                int sleepers = sem->sleepers;
-
-                /*
-                 * Add "everybody else" into it. They aren't
-                 * playing, because we own the spinlock in
-                 * the wait_queue_head.
-                 */
-                if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
-                        sem->sleepers = 0;
-                        break;
-                }
-                sem->sleepers = 1;      /* us - see -1 above */
-                spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-                schedule();
-
-                spin_lock_irqsave(&sem->wait.lock, flags);
-                tsk->state = TASK_UNINTERRUPTIBLE;
-        }
-        remove_wait_queue_locked(&sem->wait, &wait);
-        wake_up_locked(&sem->wait);
-        spin_unlock_irqrestore(&sem->wait.lock, flags);
-        tsk->state = TASK_RUNNING;
-}
-EXPORT_SYMBOL(__down);
-
-int __sched __down_interruptible(struct semaphore *sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-        DECLARE_WAITQUEUE(wait, tsk);
-        unsigned long flags;
-
-        tsk->state = TASK_INTERRUPTIBLE;
-        spin_lock_irqsave(&sem->wait.lock, flags);
-        add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
-        sem->sleepers++;
-        for (;;) {
-                int sleepers = sem->sleepers;
-
-		/*
-		 * With signals pending, this turns into the trylock
-		 * failure case - we won't be sleeping, and we can't
-		 * get the lock as it has contention. Just correct the
-		 * count and exit.
-		 */
-		if (signal_pending(current)) {
-			retval = -EINTR;
-			sem->sleepers = 0;
-			atomic_add(sleepers, &sem->count);
-			break;
-		}
-
-                /*
-                 * Add "everybody else" into it. They aren't
-                 * playing, because we own the spinlock in
-                 * the wait_queue_head.
-                 */
-                if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
-                        sem->sleepers = 0;
-                        break;
-                }
-                sem->sleepers = 1;      /* us - see -1 above */
-                spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-                schedule();
-
-                spin_lock_irqsave(&sem->wait.lock, flags);
-                tsk->state = TASK_INTERRUPTIBLE;
-        }
-        remove_wait_queue_locked(&sem->wait, &wait);
-        wake_up_locked(&sem->wait);
-        spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-        tsk->state = TASK_RUNNING;
-	return retval;
-}
-EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 589c6ac..2dd1f30 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -31,10 +31,6 @@
 	bool
 	default y
 
-config SEMAPHORE_SLEEPERS
-	bool
-	default y
-
 config GENERIC_FIND_NEXT_BIT
 	bool
 	default y
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 0bfbb26..053edff 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -42,11 +42,6 @@
 
 EXPORT_SYMBOL(kernel_thread);
 
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(__down_interruptible);
-
 EXPORT_SYMBOL(is_in_rom);
 EXPORT_SYMBOL(bfin_return_from_exception);
 
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile
index c8e8ea5..ee7bcd4 100644
--- a/arch/cris/kernel/Makefile
+++ b/arch/cris/kernel/Makefile
@@ -5,8 +5,7 @@
 
 extra-y	:= vmlinux.lds
 
-obj-y   := process.o traps.o irq.o ptrace.o setup.o \
-	   time.o sys_cris.o semaphore.o
+obj-y   := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
 
 obj-$(CONFIG_MODULES)    += crisksyms.o
 obj-$(CONFIG_MODULES)	 += module.o
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index 62f0e75..7ac000f 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -9,7 +9,6 @@
 #include <linux/string.h>
 #include <linux/tty.h>
 
-#include <asm/semaphore.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/checksum.h>
@@ -49,12 +48,6 @@
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
 
-/* Semaphore functions */
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
-
 /* Userspace access functions */
 EXPORT_SYMBOL(__copy_user_zeroing);
 EXPORT_SYMBOL(__copy_user);
diff --git a/arch/cris/kernel/semaphore.c b/arch/cris/kernel/semaphore.c
deleted file mode 100644
index f137a439..0000000
--- a/arch/cris/kernel/semaphore.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- *  Generic semaphore code. Buyer beware. Do your own
- * specific changes in <asm/semaphore-helper.h>
- */
-
-#include <linux/sched.h>
-#include <asm/semaphore-helper.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
- *
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to
- * wake up and exit.  ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore.  The others will go back
- * to sleep.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-void __up(struct semaphore *sem)
-{
-	wake_one_more(sem);
-	wake_up(&sem->wait);
-}
-
-/*
- * Perform the "down" function.  Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from __down, the return is ignored and the wait loop is
- * not interruptible.  This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from __down_interruptible, the return value gets checked
- * upon return.  If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- *
- */
-
-#define DOWN_VAR				\
-	struct task_struct *tsk = current;	\
-	wait_queue_t wait;			\
-	init_waitqueue_entry(&wait, tsk);
-
-#define DOWN_HEAD(task_state)						\
-									\
-									\
-	tsk->state = (task_state);					\
-	add_wait_queue(&sem->wait, &wait);				\
-									\
-	/*								\
-	 * Ok, we're set up.  sem->count is known to be less than zero	\
-	 * so we must wait.						\
-	 *								\
-	 * We can let go the lock for purposes of waiting.		\
-	 * We re-acquire it after awaking so as to protect		\
-	 * all semaphore operations.					\
-	 *								\
-	 * If "up()" is called before we call waking_non_zero() then	\
-	 * we will catch it right away.  If it is called later then	\
-	 * we will have to go through a wakeup cycle to catch it.	\
-	 *								\
-	 * Multiple waiters contend for the semaphore lock to see	\
-	 * who gets to gate through and who has to wait some more.	\
-	 */								\
-	for (;;) {
-
-#define DOWN_TAIL(task_state)			\
-		tsk->state = (task_state);	\
-	}					\
-	tsk->state = TASK_RUNNING;		\
-	remove_wait_queue(&sem->wait, &wait);
-
-void __sched __down(struct semaphore * sem)
-{
-	DOWN_VAR
-	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
-	if (waking_non_zero(sem))
-		break;
-	schedule();
-	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-	DOWN_VAR
-	DOWN_HEAD(TASK_INTERRUPTIBLE)
-
-	ret = waking_non_zero_interruptible(sem, tsk);
-	if (ret)
-	{
-		if (ret == 1)
-			/* ret != 0 only if we get interrupted -arca */
-			ret = 0;
-		break;
-	}
-	schedule();
-	DOWN_TAIL(TASK_INTERRUPTIBLE)
-	return ret;
-}
-
-int __down_trylock(struct semaphore * sem)
-{
-	return waking_non_zero_trylock(sem);
-}
diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile
index e8f73ed..c36f70b 100644
--- a/arch/frv/kernel/Makefile
+++ b/arch/frv/kernel/Makefile
@@ -9,7 +9,7 @@
 
 obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
 	 kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
-	 sys_frv.o time.o semaphore.o setup.o frv_ksyms.o \
+	 sys_frv.o time.o setup.o frv_ksyms.o \
 	 debug-stub.o irq.o sleep.o uaccess.o
 
 obj-$(CONFIG_GDBSTUB)		+= gdb-stub.o gdb-io.o
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index f772704..0316b3c 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -12,7 +12,6 @@
 #include <asm/pgalloc.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/semaphore.h>
 #include <asm/checksum.h>
 #include <asm/hardirq.h>
 #include <asm/cacheflush.h>
diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c
deleted file mode 100644
index 7ee3a14..0000000
--- a/arch/frv/kernel/semaphore.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/* semaphore.c: FR-V semaphores
- *
- * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- * - Derived from lib/rwsem-spinlock.c
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <asm/semaphore.h>
-
-struct sem_waiter {
-	struct list_head	list;
-	struct task_struct	*task;
-};
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-void semtrace(struct semaphore *sem, const char *str)
-{
-	if (sem->debug)
-		printk("[%d] %s({%d,%d})\n",
-		       current->pid,
-		       str,
-		       sem->counter,
-		       list_empty(&sem->wait_list) ? 0 : 1);
-}
-#else
-#define semtrace(SEM,STR) do { } while(0)
-#endif
-
-/*
- * wait for a token to be granted from a semaphore
- * - entered with lock held and interrupts disabled
- */
-void __down(struct semaphore *sem, unsigned long flags)
-{
-	struct task_struct *tsk = current;
-	struct sem_waiter waiter;
-
-	semtrace(sem, "Entering __down");
-
-	/* set up my own style of waitqueue */
-	waiter.task = tsk;
-	get_task_struct(tsk);
-
-	list_add_tail(&waiter.list, &sem->wait_list);
-
-	/* we don't need to touch the semaphore struct anymore */
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-	/* wait to be given the semaphore */
-	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-
-	for (;;) {
-		if (list_empty(&waiter.list))
-			break;
-		schedule();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	}
-
-	tsk->state = TASK_RUNNING;
-	semtrace(sem, "Leaving __down");
-}
-
-EXPORT_SYMBOL(__down);
-
-/*
- * interruptibly wait for a token to be granted from a semaphore
- * - entered with lock held and interrupts disabled
- */
-int __down_interruptible(struct semaphore *sem, unsigned long flags)
-{
-	struct task_struct *tsk = current;
-	struct sem_waiter waiter;
-	int ret;
-
-	semtrace(sem,"Entering __down_interruptible");
-
-	/* set up my own style of waitqueue */
-	waiter.task = tsk;
-	get_task_struct(tsk);
-
-	list_add_tail(&waiter.list, &sem->wait_list);
-
-	/* we don't need to touch the semaphore struct anymore */
-	set_task_state(tsk, TASK_INTERRUPTIBLE);
-
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-	/* wait to be given the semaphore */
-	ret = 0;
-	for (;;) {
-		if (list_empty(&waiter.list))
-			break;
-		if (unlikely(signal_pending(current)))
-			goto interrupted;
-		schedule();
-		set_task_state(tsk, TASK_INTERRUPTIBLE);
-	}
-
- out:
-	tsk->state = TASK_RUNNING;
-	semtrace(sem, "Leaving __down_interruptible");
-	return ret;
-
- interrupted:
-	spin_lock_irqsave(&sem->wait_lock, flags);
-
-	if (!list_empty(&waiter.list)) {
-		list_del(&waiter.list);
-		ret = -EINTR;
-	}
-
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-	if (ret == -EINTR)
-		put_task_struct(current);
-	goto out;
-}
-
-EXPORT_SYMBOL(__down_interruptible);
-
-/*
- * release a single token back to a semaphore
- * - entered with lock held and interrupts disabled
- */
-void __up(struct semaphore *sem)
-{
-	struct task_struct *tsk;
-	struct sem_waiter *waiter;
-
-	semtrace(sem,"Entering __up");
-
-	/* grant the token to the process at the front of the queue */
-	waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
-
-	/* We must be careful not to touch 'waiter' after we set ->task = NULL.
-	 * It is allocated on the waiter's stack and may become invalid at
-	 * any time after that point (due to a wakeup from another source).
-	 */
-	list_del_init(&waiter->list);
-	tsk = waiter->task;
-	mb();
-	waiter->task = NULL;
-	wake_up_process(tsk);
-	put_task_struct(tsk);
-
-	semtrace(sem,"Leaving __up");
-}
-
-EXPORT_SYMBOL(__up);
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile
index 874f6ae..6c248c3 100644
--- a/arch/h8300/kernel/Makefile
+++ b/arch/h8300/kernel/Makefile
@@ -5,7 +5,7 @@
 extra-y := vmlinux.lds
 
 obj-y := process.o traps.o ptrace.o irq.o \
-	 sys_h8300.o time.o semaphore.o signal.o \
+	 sys_h8300.o time.o signal.o \
          setup.o gpio.o init_task.o syscalls.o \
 	 entry.o
 
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index d1b1526..6866bd9 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -12,7 +12,6 @@
 #include <asm/pgalloc.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/semaphore.h>
 #include <asm/checksum.h>
 #include <asm/current.h>
 #include <asm/gpio.h>
diff --git a/arch/h8300/kernel/semaphore.c b/arch/h8300/kernel/semaphore.c
deleted file mode 100644
index d12cbbf..0000000
--- a/arch/h8300/kernel/semaphore.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- *  Generic semaphore code. Buyer beware. Do your own
- * specific changes in <asm/semaphore-helper.h>
- */
-
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <asm/semaphore-helper.h>
-
-#ifndef CONFIG_RMW_INSNS
-spinlock_t semaphore_wake_lock;
-#endif
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
- *
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to
- * wake up and exit.  ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore.  The others will go back
- * to sleep.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-void __up(struct semaphore *sem)
-{
-	wake_one_more(sem);
-	wake_up(&sem->wait);
-}
-
-/*
- * Perform the "down" function.  Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from __down, the return is ignored and the wait loop is
- * not interruptible.  This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from __down_interruptible, the return value gets checked
- * upon return.  If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- *
- */
-
-
-#define DOWN_HEAD(task_state)						\
-									\
-									\
-	current->state = (task_state);					\
-	add_wait_queue(&sem->wait, &wait);				\
-									\
-	/*								\
-	 * Ok, we're set up.  sem->count is known to be less than zero	\
-	 * so we must wait.						\
-	 *								\
-	 * We can let go the lock for purposes of waiting.		\
-	 * We re-acquire it after awaking so as to protect		\
-	 * all semaphore operations.					\
-	 *								\
-	 * If "up()" is called before we call waking_non_zero() then	\
-	 * we will catch it right away.  If it is called later then	\
-	 * we will have to go through a wakeup cycle to catch it.	\
-	 *								\
-	 * Multiple waiters contend for the semaphore lock to see	\
-	 * who gets to gate through and who has to wait some more.	\
-	 */								\
-	for (;;) {
-
-#define DOWN_TAIL(task_state)			\
-		current->state = (task_state);	\
-	}					\
-	current->state = TASK_RUNNING;		\
-	remove_wait_queue(&sem->wait, &wait);
-
-void __sched __down(struct semaphore * sem)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
-	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
-	if (waking_non_zero(sem))
-		break;
-	schedule();
-	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	DECLARE_WAITQUEUE(wait, current);
-	int ret = 0;
-
-	DOWN_HEAD(TASK_INTERRUPTIBLE)
-
-	ret = waking_non_zero_interruptible(sem, current);
-	if (ret)
-	{
-		if (ret == 1)
-			/* ret != 0 only if we get interrupted -arca */
-			ret = 0;
-		break;
-	}
-	schedule();
-	DOWN_TAIL(TASK_INTERRUPTIBLE)
-	return ret;
-}
-
-int __down_trylock(struct semaphore * sem)
-{
-	return waking_non_zero_trylock(sem);
-}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 8fa3faf..ed21737 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -283,6 +283,17 @@
 	default "17" if HUGETLB_PAGE
 	default "11"
 
+config VIRT_CPU_ACCOUNTING
+	bool "Deterministic task and CPU time accounting"
+	default n
+	help
+	  Select this option to enable more accurate task and CPU time
+	  accounting.  This is done by reading a CPU counter on each
+	  kernel entry and exit and on transitions within the kernel
+	  between system, softirq and hardirq state, so there is a
+	  small performance impact.
+	  If in doubt, say N here.
+
 config SMP
 	bool "Symmetric multi-processing support"
 	help
@@ -611,6 +622,9 @@
 	bool
 	default y
 
+config IOMMU_HELPER
+	def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
+
 source "arch/ia64/hp/sim/Kconfig"
 
 source "arch/ia64/Kconfig.debug"
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 523eae6..9409de5 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -35,6 +35,7 @@
 #include <linux/nodemask.h>
 #include <linux/bitops.h>         /* hweight64() */
 #include <linux/crash_dump.h>
+#include <linux/iommu-helper.h>
 
 #include <asm/delay.h>		/* ia64_get_itc() */
 #include <asm/io.h>
@@ -460,6 +461,13 @@
 	return order;
 }
 
+static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
+				 unsigned int bitshiftcnt)
+{
+	return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
+		+ bitshiftcnt;
+}
+
 /**
  * sba_search_bitmap - find free space in IO PDIR resource bitmap
  * @ioc: IO MMU structure which owns the pdir we are interested in.
@@ -471,15 +479,25 @@
  * Cool perf optimization: search for log2(size) bits at a time.
  */
 static SBA_INLINE unsigned long
-sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
+sba_search_bitmap(struct ioc *ioc, struct device *dev,
+		  unsigned long bits_wanted, int use_hint)
 {
 	unsigned long *res_ptr;
 	unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
-	unsigned long flags, pide = ~0UL;
+	unsigned long flags, pide = ~0UL, tpide;
+	unsigned long boundary_size;
+	unsigned long shift;
+	int ret;
 
 	ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
 	ASSERT(res_ptr < res_end);
 
+	boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
+	boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
+
+	BUG_ON(ioc->ibase & ~iovp_mask);
+	shift = ioc->ibase >> iovp_shift;
+
 	spin_lock_irqsave(&ioc->res_lock, flags);
 
 	/* Allow caller to force a search through the entire resource space */
@@ -504,9 +522,7 @@
 			if (likely(*res_ptr != ~0UL)) {
 				bitshiftcnt = ffz(*res_ptr);
 				*res_ptr |= (1UL << bitshiftcnt);
-				pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
-				pide <<= 3;	/* convert to bit address */
-				pide += bitshiftcnt;
+				pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
 				ioc->res_bitshift = bitshiftcnt + bits_wanted;
 				goto found_it;
 			}
@@ -535,11 +551,13 @@
 			DBG_RES("    %p %lx %lx\n", res_ptr, mask, *res_ptr);
 			ASSERT(0 != mask);
 			for (; mask ; mask <<= o, bitshiftcnt += o) {
-				if(0 == ((*res_ptr) & mask)) {
+				tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
+				ret = iommu_is_span_boundary(tpide, bits_wanted,
+							     shift,
+							     boundary_size);
+				if ((0 == ((*res_ptr) & mask)) && !ret) {
 					*res_ptr |= mask;     /* mark resources busy! */
-					pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
-					pide <<= 3;	/* convert to bit address */
-					pide += bitshiftcnt;
+					pide = tpide;
 					ioc->res_bitshift = bitshiftcnt + bits_wanted;
 					goto found_it;
 				}
@@ -560,6 +578,11 @@
 		end = res_end - qwords;
 
 		for (; res_ptr < end; res_ptr++) {
+			tpide = ptr_to_pide(ioc, res_ptr, 0);
+			ret = iommu_is_span_boundary(tpide, bits_wanted,
+						     shift, boundary_size);
+			if (ret)
+				goto next_ptr;
 			for (i = 0 ; i < qwords ; i++) {
 				if (res_ptr[i] != 0)
 					goto next_ptr;
@@ -572,8 +595,7 @@
 				res_ptr[i] = ~0UL;
 			res_ptr[i] |= RESMAP_MASK(bits);
 
-			pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
-			pide <<= 3;	/* convert to bit address */
+			pide = tpide;
 			res_ptr += qwords;
 			ioc->res_bitshift = bits;
 			goto found_it;
@@ -605,7 +627,7 @@
  * resource bit map.
  */
 static int
-sba_alloc_range(struct ioc *ioc, size_t size)
+sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
 {
 	unsigned int pages_needed = size >> iovp_shift;
 #ifdef PDIR_SEARCH_TIMING
@@ -622,9 +644,9 @@
 	/*
 	** "seek and ye shall find"...praying never hurts either...
 	*/
-	pide = sba_search_bitmap(ioc, pages_needed, 1);
+	pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
 	if (unlikely(pide >= (ioc->res_size << 3))) {
-		pide = sba_search_bitmap(ioc, pages_needed, 0);
+		pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
 		if (unlikely(pide >= (ioc->res_size << 3))) {
 #if DELAYED_RESOURCE_CNT > 0
 			unsigned long flags;
@@ -653,7 +675,7 @@
 			}
 			spin_unlock_irqrestore(&ioc->saved_lock, flags);
 
-			pide = sba_search_bitmap(ioc, pages_needed, 0);
+			pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
 			if (unlikely(pide >= (ioc->res_size << 3)))
 				panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
 				      ioc->ioc_hpa);
@@ -936,7 +958,7 @@
 	spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
-	pide = sba_alloc_range(ioc, size);
+	pide = sba_alloc_range(ioc, dev, size);
 
 	iovp = (dma_addr_t) pide << iovp_shift;
 
@@ -1373,7 +1395,7 @@
 		dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
 		ASSERT(dma_len <= DMA_CHUNK_SIZE);
 		dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
-			| (sba_alloc_range(ioc, dma_len) << iovp_shift)
+			| (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
 			| dma_offset);
 		n_mappings++;
 	}
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 7661bb0..3a078ad 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -201,22 +201,6 @@
 	simscsi_sg_readwrite(sc, mode, offset);
 }
 
-static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
-{
-
-	int i;
-	unsigned thislen;
-	struct scatterlist *slp;
-
-	scsi_for_each_sg(sc, slp, scsi_sg_count(sc), i) {
-		if (!len)
-			break;
-		thislen = min(len, slp->length);
-		memcpy(sg_virt(slp), buf, thislen);
-		len -= thislen;
-	}
-}
-
 static int
 simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 {
@@ -258,7 +242,7 @@
 			buf[6] = 0;	/* reserved */
 			buf[7] = 0;	/* various flags */
 			memcpy(buf + 8, "HP      SIMULATED DISK  0.00",  28);
-			simscsi_fillresult(sc, buf, 36);
+			scsi_sg_copy_from_buffer(sc, buf, 36);
 			sc->result = GOOD;
 			break;
 
@@ -306,14 +290,15 @@
 			buf[5] = 0;
 			buf[6] = 2;
 			buf[7] = 0;
-			simscsi_fillresult(sc, buf, 8);
+			scsi_sg_copy_from_buffer(sc, buf, 8);
 			sc->result = GOOD;
 			break;
 
 		      case MODE_SENSE:
 		      case MODE_SENSE_10:
 			/* sd.c uses this to determine whether disk does write-caching. */
-			simscsi_fillresult(sc, (char *)empty_zero_page, scsi_bufflen(sc));
+			scsi_sg_copy_from_buffer(sc, (char *)empty_zero_page,
+						 PAGE_SIZE);
 			sc->result = GOOD;
 			break;
 
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
index 446c9aa..9a3abf5 100644
--- a/arch/ia64/ia32/elfcore32.h
+++ b/arch/ia64/ia32/elfcore32.h
@@ -30,7 +30,19 @@
 	int	si_errno;			/* errno */
 };
 
-#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+/*
+ * Hacks are here since types between compat_timeval (= pair of s32) and
+ * ia64-native timeval (= pair of s64) are not compatible, at least a file
+ * arch/ia64/ia32/../../../fs/binfmt_elf.c will get warnings from compiler on
+ * use of cputime_to_timeval(), which usually an alias of jiffies_to_timeval().
+ */
+#define cputime_to_timeval(a,b) \
+	do { (b)->tv_usec = 0; (b)->tv_sec = (a)/NSEC_PER_SEC; } while(0)
+#else
+#define jiffies_to_timeval(a,b) \
+	do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; } while(0)
+#endif
 
 struct elf_prstatus
 {
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index b1bf51fe..7e028ce 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -38,6 +38,7 @@
 #include <linux/eventpoll.h>
 #include <linux/personality.h>
 #include <linux/ptrace.h>
+#include <linux/regset.h>
 #include <linux/stat.h>
 #include <linux/ipc.h>
 #include <linux/capability.h>
@@ -2387,16 +2388,45 @@
 	return -ESRCH;
 }
 
+static void set_tls_desc(struct task_struct *p, int idx,
+		const struct ia32_user_desc *info, int n)
+{
+	struct thread_struct *t = &p->thread;
+	struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
+	int cpu;
+
+	/*
+	 * We must not get preempted while modifying the TLS.
+	 */
+	cpu = get_cpu();
+
+	while (n-- > 0) {
+		if (LDT_empty(info)) {
+			desc->a = 0;
+			desc->b = 0;
+		} else {
+			desc->a = LDT_entry_a(info);
+			desc->b = LDT_entry_b(info);
+		}
+
+		++info;
+		++desc;
+	}
+
+	if (t == &current->thread)
+		load_TLS(t, cpu);
+
+	put_cpu();
+}
+
 /*
  * Set a given TLS descriptor:
  */
 asmlinkage int
 sys32_set_thread_area (struct ia32_user_desc __user *u_info)
 {
-	struct thread_struct *t = &current->thread;
 	struct ia32_user_desc info;
-	struct desc_struct *desc;
-	int cpu, idx;
+	int idx;
 
 	if (copy_from_user(&info, u_info, sizeof(info)))
 		return -EFAULT;
@@ -2416,18 +2446,7 @@
 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
 		return -EINVAL;
 
-	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
-
-	cpu = smp_processor_id();
-
-	if (LDT_empty(&info)) {
-		desc->a = 0;
-		desc->b = 0;
-	} else {
-		desc->a = LDT_entry_a(&info);
-		desc->b = LDT_entry_b(&info);
-	}
-	load_TLS(t, cpu);
+	set_tls_desc(current, idx, &info, 1);
 	return 0;
 }
 
@@ -2451,6 +2470,20 @@
 #define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
 #define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
 
+static void fill_user_desc(struct ia32_user_desc *info, int idx,
+		const struct desc_struct *desc)
+{
+	info->entry_number = idx;
+	info->base_addr = GET_BASE(desc);
+	info->limit = GET_LIMIT(desc);
+	info->seg_32bit = GET_32BIT(desc);
+	info->contents = GET_CONTENTS(desc);
+	info->read_exec_only = !GET_WRITABLE(desc);
+	info->limit_in_pages = GET_LIMIT_PAGES(desc);
+	info->seg_not_present = !GET_PRESENT(desc);
+	info->useable = GET_USEABLE(desc);
+}
+
 asmlinkage int
 sys32_get_thread_area (struct ia32_user_desc __user *u_info)
 {
@@ -2464,22 +2497,588 @@
 		return -EINVAL;
 
 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-
-	info.entry_number = idx;
-	info.base_addr = GET_BASE(desc);
-	info.limit = GET_LIMIT(desc);
-	info.seg_32bit = GET_32BIT(desc);
-	info.contents = GET_CONTENTS(desc);
-	info.read_exec_only = !GET_WRITABLE(desc);
-	info.limit_in_pages = GET_LIMIT_PAGES(desc);
-	info.seg_not_present = !GET_PRESENT(desc);
-	info.useable = GET_USEABLE(desc);
+	fill_user_desc(&info, idx, desc);
 
 	if (copy_to_user(u_info, &info, sizeof(info)))
 		return -EFAULT;
 	return 0;
 }
 
+struct regset_get {
+	void *kbuf;
+	void __user *ubuf;
+};
+
+struct regset_set {
+	const void *kbuf;
+	const void __user *ubuf;
+};
+
+struct regset_getset {
+	struct task_struct *target;
+	const struct user_regset *regset;
+	union {
+		struct regset_get get;
+		struct regset_set set;
+	} u;
+	unsigned int pos;
+	unsigned int count;
+	int ret;
+};
+
+static void getfpreg(struct task_struct *task, int regno, int *val)
+{
+	switch (regno / sizeof(int)) {
+	case 0:
+		*val = task->thread.fcr & 0xffff;
+		break;
+	case 1:
+		*val = task->thread.fsr & 0xffff;
+		break;
+	case 2:
+		*val = (task->thread.fsr>>16) & 0xffff;
+		break;
+	case 3:
+		*val = task->thread.fir;
+		break;
+	case 4:
+		*val = (task->thread.fir>>32) & 0xffff;
+		break;
+	case 5:
+		*val = task->thread.fdr;
+		break;
+	case 6:
+		*val = (task->thread.fdr >> 32) & 0xffff;
+		break;
+	}
+}
+
+static void setfpreg(struct task_struct *task, int regno, int val)
+{
+	switch (regno / sizeof(int)) {
+	case 0:
+		task->thread.fcr = (task->thread.fcr & (~0x1f3f))
+			| (val & 0x1f3f);
+		break;
+	case 1:
+		task->thread.fsr = (task->thread.fsr & (~0xffff)) | val;
+		break;
+	case 2:
+		task->thread.fsr = (task->thread.fsr & (~0xffff0000))
+			| (val << 16);
+		break;
+	case 3:
+		task->thread.fir = (task->thread.fir & (~0xffffffff)) | val;
+		break;
+	case 5:
+		task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val;
+		break;
+	}
+}
+
+static void access_fpreg_ia32(int regno, void *reg,
+		struct pt_regs *pt, struct switch_stack *sw,
+		int tos, int write)
+{
+	void *f;
+
+	if ((regno += tos) >= 8)
+		regno -= 8;
+	if (regno < 4)
+		f = &pt->f8 + regno;
+	else if (regno <= 7)
+		f = &sw->f12 + (regno - 4);
+	else {
+		printk(KERN_ERR "regno must be less than 7 \n");
+		 return;
+	}
+
+	if (write)
+		memcpy(f, reg, sizeof(struct _fpreg_ia32));
+	else
+		memcpy(reg, f, sizeof(struct _fpreg_ia32));
+}
+
+static void do_fpregs_get(struct unw_frame_info *info, void *arg)
+{
+	struct regset_getset *dst = arg;
+	struct task_struct *task = dst->target;
+	struct pt_regs *pt;
+	int start, end, tos;
+	char buf[80];
+
+	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
+		return;
+	if (dst->pos < 7 * sizeof(int)) {
+		end = min((dst->pos + dst->count),
+			(unsigned int)(7 * sizeof(int)));
+		for (start = dst->pos; start < end; start += sizeof(int))
+			getfpreg(task, start, (int *)(buf + start));
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, buf,
+				0, 7 * sizeof(int));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+	if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
+		pt = task_pt_regs(task);
+		tos = (task->thread.fsr >> 11) & 7;
+		end = min(dst->pos + dst->count,
+			(unsigned int)(sizeof(struct ia32_user_i387_struct)));
+		start = (dst->pos - 7 * sizeof(int)) /
+			sizeof(struct _fpreg_ia32);
+		end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
+		for (; start < end; start++)
+			access_fpreg_ia32(start,
+				(struct _fpreg_ia32 *)buf + start,
+				pt, info->sw, tos, 0);
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				buf, 7 * sizeof(int),
+				sizeof(struct ia32_user_i387_struct));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+}
+
+static void do_fpregs_set(struct unw_frame_info *info, void *arg)
+{
+	struct regset_getset *dst = arg;
+	struct task_struct *task = dst->target;
+	struct pt_regs *pt;
+	char buf[80];
+	int end, start, tos;
+
+	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
+		return;
+
+	if (dst->pos < 7 * sizeof(int)) {
+		start = dst->pos;
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, buf,
+				0, 7 * sizeof(int));
+		if (dst->ret)
+			return;
+		for (; start < dst->pos; start += sizeof(int))
+			setfpreg(task, start, *((int *)(buf + start)));
+		if (dst->count == 0)
+			return;
+	}
+	if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
+		start = (dst->pos - 7 * sizeof(int)) /
+			sizeof(struct _fpreg_ia32);
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf,
+				buf, 7 * sizeof(int),
+				sizeof(struct ia32_user_i387_struct));
+		if (dst->ret)
+			return;
+		pt = task_pt_regs(task);
+		tos = (task->thread.fsr >> 11) & 7;
+		end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
+		for (; start < end; start++)
+			access_fpreg_ia32(start,
+				(struct _fpreg_ia32 *)buf + start,
+				pt, info->sw, tos, 1);
+		if (dst->count == 0)
+			return;
+	}
+}
+
+#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member)))
+static void getfpxreg(struct task_struct *task, int start, int end, char *buf)
+{
+	int min_val;
+
+	min_val = min(end, OFFSET(fop));
+	while (start < min_val) {
+		if (start == OFFSET(cwd))
+			*((short *)buf) = task->thread.fcr & 0xffff;
+		else if (start == OFFSET(swd))
+			*((short *)buf) = task->thread.fsr & 0xffff;
+		else if (start == OFFSET(twd))
+			*((short *)buf) = (task->thread.fsr>>16) & 0xffff;
+		buf += 2;
+		start += 2;
+	}
+	/* skip fop element */
+	if (start == OFFSET(fop)) {
+		start += 2;
+		buf += 2;
+	}
+	while (start < end) {
+		if (start == OFFSET(fip))
+			*((int *)buf) = task->thread.fir;
+		else if (start == OFFSET(fcs))
+			*((int *)buf) = (task->thread.fir>>32) & 0xffff;
+		else if (start == OFFSET(foo))
+			*((int *)buf) = task->thread.fdr;
+		else if (start == OFFSET(fos))
+			*((int *)buf) = (task->thread.fdr>>32) & 0xffff;
+		else if (start == OFFSET(mxcsr))
+			*((int *)buf) = ((task->thread.fcr>>32) & 0xff80)
+					 | ((task->thread.fsr>>32) & 0x3f);
+		buf += 4;
+		start += 4;
+	}
+}
+
+static void setfpxreg(struct task_struct *task, int start, int end, char *buf)
+{
+	int min_val, num32;
+	short num;
+	unsigned long num64;
+
+	min_val = min(end, OFFSET(fop));
+	while (start < min_val) {
+		num = *((short *)buf);
+		if (start == OFFSET(cwd)) {
+			task->thread.fcr = (task->thread.fcr & (~0x1f3f))
+						| (num & 0x1f3f);
+		} else if (start == OFFSET(swd)) {
+			task->thread.fsr = (task->thread.fsr & (~0xffff)) | num;
+		} else if (start == OFFSET(twd)) {
+			task->thread.fsr = (task->thread.fsr & (~0xffff0000))
+				| (((int)num) << 16);
+		}
+		buf += 2;
+		start += 2;
+	}
+	/* skip fop element */
+	if (start == OFFSET(fop)) {
+		start += 2;
+		buf += 2;
+	}
+	while (start < end) {
+		num32 = *((int *)buf);
+		if (start == OFFSET(fip))
+			task->thread.fir = (task->thread.fir & (~0xffffffff))
+						 | num32;
+		else if (start == OFFSET(foo))
+			task->thread.fdr = (task->thread.fdr & (~0xffffffff))
+						 | num32;
+		else if (start == OFFSET(mxcsr)) {
+			num64 = num32 & 0xff10;
+			task->thread.fcr = (task->thread.fcr &
+				(~0xff1000000000UL)) | (num64<<32);
+			num64 = num32 & 0x3f;
+			task->thread.fsr = (task->thread.fsr &
+				(~0x3f00000000UL)) | (num64<<32);
+		}
+		buf += 4;
+		start += 4;
+	}
+}
+
+static void do_fpxregs_get(struct unw_frame_info *info, void *arg)
+{
+	struct regset_getset *dst = arg;
+	struct task_struct *task = dst->target;
+	struct pt_regs *pt;
+	char buf[128];
+	int start, end, tos;
+
+	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
+		return;
+	if (dst->pos < OFFSET(st_space[0])) {
+		end = min(dst->pos + dst->count, (unsigned int)32);
+		getfpxreg(task, dst->pos, end, buf);
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, buf,
+				0, OFFSET(st_space[0]));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+	if (dst->pos < OFFSET(xmm_space[0])) {
+		pt = task_pt_regs(task);
+		tos = (task->thread.fsr >> 11) & 7;
+		end = min(dst->pos + dst->count,
+				(unsigned int)OFFSET(xmm_space[0]));
+		start = (dst->pos - OFFSET(st_space[0])) / 16;
+		end = (end - OFFSET(st_space[0])) / 16;
+		for (; start < end; start++)
+			access_fpreg_ia32(start, buf + 16 * start, pt,
+						info->sw, tos, 0);
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+	if (dst->pos < OFFSET(padding[0]))
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				&info->sw->f16, OFFSET(xmm_space[0]),
+				OFFSET(padding[0]));
+}
+
+static void do_fpxregs_set(struct unw_frame_info *info, void *arg)
+{
+	struct regset_getset *dst = arg;
+	struct task_struct *task = dst->target;
+	char buf[128];
+	int start, end;
+
+	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
+		return;
+
+	if (dst->pos < OFFSET(st_space[0])) {
+		start = dst->pos;
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf,
+				buf, 0, OFFSET(st_space[0]));
+		if (dst->ret)
+			return;
+		setfpxreg(task, start, dst->pos, buf);
+		if (dst->count == 0)
+			return;
+	}
+	if (dst->pos < OFFSET(xmm_space[0])) {
+		struct pt_regs *pt;
+		int tos;
+		pt = task_pt_regs(task);
+		tos = (task->thread.fsr >> 11) & 7;
+		start = (dst->pos - OFFSET(st_space[0])) / 16;
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf,
+				buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
+		if (dst->ret)
+			return;
+		end = (dst->pos - OFFSET(st_space[0])) / 16;
+		for (; start < end; start++)
+			access_fpreg_ia32(start, buf + 16 * start, pt, info->sw,
+						 tos, 1);
+		if (dst->count == 0)
+			return;
+	}
+	if (dst->pos < OFFSET(padding[0]))
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf,
+				&info->sw->f16, OFFSET(xmm_space[0]),
+				 OFFSET(padding[0]));
+}
+#undef OFFSET
+
+static int do_regset_call(void (*call)(struct unw_frame_info *, void *),
+		struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	struct regset_getset info = { .target = target, .regset = regset,
+		.pos = pos, .count = count,
+		.u.set = { .kbuf = kbuf, .ubuf = ubuf },
+		.ret = 0 };
+
+	if (target == current)
+		unw_init_running(call, &info);
+	else {
+		struct unw_frame_info ufi;
+		memset(&ufi, 0, sizeof(ufi));
+		unw_init_from_blocked_task(&ufi, target);
+		(*call)(&ufi, &info);
+	}
+
+	return info.ret;
+}
+
+static int ia32_fpregs_get(struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	return do_regset_call(do_fpregs_get, target, regset, pos, count,
+		kbuf, ubuf);
+}
+
+static int ia32_fpregs_set(struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	return do_regset_call(do_fpregs_set, target, regset, pos, count,
+		kbuf, ubuf);
+}
+
+static int ia32_fpxregs_get(struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	return do_regset_call(do_fpxregs_get, target, regset, pos, count,
+		kbuf, ubuf);
+}
+
+static int ia32_fpxregs_set(struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	return do_regset_call(do_fpxregs_set, target, regset, pos, count,
+		kbuf, ubuf);
+}
+
+static int ia32_genregs_get(struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	if (kbuf) {
+		u32 *kp = kbuf;
+		while (count > 0) {
+			*kp++ = getreg(target, pos);
+			pos += 4;
+			count -= 4;
+		}
+	} else {
+		u32 __user *up = ubuf;
+		while (count > 0) {
+			if (__put_user(getreg(target, pos), up++))
+				return -EFAULT;
+			pos += 4;
+			count -= 4;
+		}
+	}
+	return 0;
+}
+
+static int ia32_genregs_set(struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	int ret = 0;
+
+	if (kbuf) {
+		const u32 *kp = kbuf;
+		while (!ret && count > 0) {
+			putreg(target, pos, *kp++);
+			pos += 4;
+			count -= 4;
+		}
+	} else {
+		const u32 __user *up = ubuf;
+		u32 val;
+		while (!ret && count > 0) {
+			ret = __get_user(val, up++);
+			if (!ret)
+				putreg(target, pos, val);
+			pos += 4;
+			count -= 4;
+		}
+	}
+	return ret;
+}
+
+static int ia32_tls_active(struct task_struct *target,
+		const struct user_regset *regset)
+{
+	struct thread_struct *t = &target->thread;
+	int n = GDT_ENTRY_TLS_ENTRIES;
+	while (n > 0 && desc_empty(&t->tls_array[n -1]))
+		--n;
+	return n;
+}
+
+static int ia32_tls_get(struct task_struct *target,
+		const struct user_regset *regset, unsigned int pos,
+		unsigned int count, void *kbuf, void __user *ubuf)
+{
+	const struct desc_struct *tls;
+
+	if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
+			(pos % sizeof(struct ia32_user_desc)) != 0 ||
+			(count % sizeof(struct ia32_user_desc)) != 0)
+		return -EINVAL;
+
+	pos /= sizeof(struct ia32_user_desc);
+	count /= sizeof(struct ia32_user_desc);
+
+	tls = &target->thread.tls_array[pos];
+
+	if (kbuf) {
+		struct ia32_user_desc *info = kbuf;
+		while (count-- > 0)
+			fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
+					tls++);
+	} else {
+		struct ia32_user_desc __user *u_info = ubuf;
+		while (count-- > 0) {
+			struct ia32_user_desc info;
+			fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
+			if (__copy_to_user(u_info++, &info, sizeof(info)))
+				return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static int ia32_tls_set(struct task_struct *target,
+		const struct user_regset *regset, unsigned int pos,
+		unsigned int count, const void *kbuf, const void __user *ubuf)
+{
+	struct ia32_user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
+	const struct ia32_user_desc *info;
+
+	if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
+			(pos % sizeof(struct ia32_user_desc)) != 0 ||
+			(count % sizeof(struct ia32_user_desc)) != 0)
+		return -EINVAL;
+
+	if (kbuf)
+		info = kbuf;
+	else if (__copy_from_user(infobuf, ubuf, count))
+		return -EFAULT;
+	else
+		info = infobuf;
+
+	set_tls_desc(target,
+		GDT_ENTRY_TLS_MIN + (pos / sizeof(struct ia32_user_desc)),
+		info, count / sizeof(struct ia32_user_desc));
+
+	return 0;
+}
+
+/*
+ * This should match arch/i386/kernel/ptrace.c:native_regsets.
+ * XXX ioperm? vm86?
+ */
+static const struct user_regset ia32_regsets[] = {
+	{
+		.core_note_type = NT_PRSTATUS,
+		.n = sizeof(struct user_regs_struct32)/4,
+		.size = 4, .align = 4,
+		.get = ia32_genregs_get, .set = ia32_genregs_set
+	},
+	{
+		.core_note_type = NT_PRFPREG,
+		.n = sizeof(struct ia32_user_i387_struct) / 4,
+		.size = 4, .align = 4,
+		.get = ia32_fpregs_get, .set = ia32_fpregs_set
+	},
+	{
+		.core_note_type = NT_PRXFPREG,
+		.n = sizeof(struct ia32_user_fxsr_struct) / 4,
+		.size = 4, .align = 4,
+		.get = ia32_fpxregs_get, .set = ia32_fpxregs_set
+	},
+	{
+		.core_note_type = NT_386_TLS,
+		.n = GDT_ENTRY_TLS_ENTRIES,
+		.bias = GDT_ENTRY_TLS_MIN,
+		.size = sizeof(struct ia32_user_desc),
+		.align = sizeof(struct ia32_user_desc),
+		.active = ia32_tls_active,
+		.get = ia32_tls_get, .set = ia32_tls_set,
+	},
+};
+
+const struct user_regset_view user_ia32_view = {
+	.name = "i386", .e_machine = EM_386,
+	.regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets)
+};
+
 long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 
 			__u32 len_low, __u32 len_high, int advice)
 { 
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 33e5a59..13fd10e 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -6,7 +6,7 @@
 
 obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o	\
 	 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o		\
-	 salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
+	 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
 	 unwind.o mca.o mca_asm.o topology.o
 
 obj-$(CONFIG_IA64_BRL_EMU)	+= brl_emu.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 78f28d8..c7467f8 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -423,6 +423,7 @@
 #define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
 #define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
 static struct acpi_table_slit __initdata *slit_table;
+cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
 
 static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
 {
@@ -482,6 +483,7 @@
 	    (pa->apic_id << 8) | (pa->local_sapic_eid);
 	/* nid should be overridden as logical node id later */
 	node_cpuid[srat_num_cpus].nid = pxm;
+	cpu_set(srat_num_cpus, early_cpu_possible_map);
 	srat_num_cpus++;
 }
 
@@ -559,7 +561,7 @@
 	}
 
 	/* set logical node id in cpu structure */
-	for (i = 0; i < srat_num_cpus; i++)
+	for_each_possible_early_cpu(i)
 		node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
 
 	printk(KERN_INFO "Number of logical nodes in system = %d\n",
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 0aebc6f..230a6f9 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -7,6 +7,7 @@
 #define ASM_OFFSETS_C 1
 
 #include <linux/sched.h>
+#include <linux/pid.h>
 #include <linux/clocksource.h>
 
 #include <asm-ia64/processor.h>
@@ -34,17 +35,29 @@
 	DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
 	DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
 
+	BUILD_BUG_ON(sizeof(struct upid) != 32);
+	DEFINE(IA64_UPID_SHIFT, 5);
+
 	BLANK();
 
 	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
 	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
 	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
+	DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
+	DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
+	DEFINE(TI_AC_UTIME, offsetof(struct thread_info, ac_utime));
+#endif
 
 	BLANK();
 
 	DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
 	DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
 	DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
+	DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid));
+	DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level));
+	DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0]));
 	DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
 	DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
 	DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index fbe742a..90ef338 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -24,6 +24,7 @@
 static atomic_t kdump_cpu_frozen;
 atomic_t kdump_in_progress;
 static int kdump_on_init = 1;
+static int kdump_on_fatal_mca = 1;
 
 static inline Elf64_Word
 *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
@@ -118,6 +119,7 @@
 static void
 machine_kdump_on_init(void)
 {
+	crash_save_vmcoreinfo();
 	local_irq_disable();
 	kexec_disable_iosapic();
 	machine_kexec(ia64_kimage);
@@ -148,7 +150,7 @@
 	struct ia64_mca_notify_die *nd;
 	struct die_args *args = data;
 
-	if (!kdump_on_init)
+	if (!kdump_on_init && !kdump_on_fatal_mca)
 		return NOTIFY_DONE;
 
 	if (!ia64_kimage) {
@@ -173,32 +175,38 @@
 		return NOTIFY_DONE;
 
 	switch (val) {
-		case DIE_INIT_MONARCH_PROCESS:
+	case DIE_INIT_MONARCH_PROCESS:
+		if (kdump_on_init) {
 			atomic_set(&kdump_in_progress, 1);
 			*(nd->monarch_cpu) = -1;
-			break;
-		case DIE_INIT_MONARCH_LEAVE:
+		}
+		break;
+	case DIE_INIT_MONARCH_LEAVE:
+		if (kdump_on_init)
 			machine_kdump_on_init();
-			break;
-		case DIE_INIT_SLAVE_LEAVE:
-			if (atomic_read(&kdump_in_progress))
-				unw_init_running(kdump_cpu_freeze, NULL);
-			break;
-		case DIE_MCA_RENDZVOUS_LEAVE:
-			if (atomic_read(&kdump_in_progress))
-				unw_init_running(kdump_cpu_freeze, NULL);
-			break;
-		case DIE_MCA_MONARCH_LEAVE:
-		     /* die_register->signr indicate if MCA is recoverable */
-			if (!args->signr)
-				machine_kdump_on_init();
-			break;
+		break;
+	case DIE_INIT_SLAVE_LEAVE:
+		if (atomic_read(&kdump_in_progress))
+			unw_init_running(kdump_cpu_freeze, NULL);
+		break;
+	case DIE_MCA_RENDZVOUS_LEAVE:
+		if (atomic_read(&kdump_in_progress))
+			unw_init_running(kdump_cpu_freeze, NULL);
+		break;
+	case DIE_MCA_MONARCH_LEAVE:
+		/* die_register->signr indicate if MCA is recoverable */
+		if (kdump_on_fatal_mca && !args->signr) {
+			atomic_set(&kdump_in_progress, 1);
+			*(nd->monarch_cpu) = -1;
+			machine_kdump_on_init();
+		}
+		break;
 	}
 	return NOTIFY_DONE;
 }
 
 #ifdef CONFIG_SYSCTL
-static ctl_table kdump_on_init_table[] = {
+static ctl_table kdump_ctl_table[] = {
 	{
 		.ctl_name = CTL_UNNUMBERED,
 		.procname = "kdump_on_init",
@@ -207,6 +215,14 @@
 		.mode = 0644,
 		.proc_handler = &proc_dointvec,
 	},
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "kdump_on_fatal_mca",
+		.data = &kdump_on_fatal_mca,
+		.maxlen = sizeof(int),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
 	{ .ctl_name = 0 }
 };
 
@@ -215,7 +231,7 @@
 	  .ctl_name = CTL_KERN,
 	  .procname = "kernel",
 	  .mode = 0555,
-	  .child = kdump_on_init_table,
+	  .child = kdump_ctl_table,
 	},
 	{ .ctl_name = 0 }
 };
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 728d724..d45f215 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -37,6 +37,7 @@
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/mca.h>
+#include <asm/tlbflush.h>
 
 #define EFI_DEBUG	0
 
@@ -403,6 +404,41 @@
 	return NULL;
 }
 
+
+static u8 __init palo_checksum(u8 *buffer, u32 length)
+{
+	u8 sum = 0;
+	u8 *end = buffer + length;
+
+	while (buffer < end)
+		sum = (u8) (sum + *(buffer++));
+
+	return sum;
+}
+
+/*
+ * Parse and handle PALO table which is published at:
+ * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf
+ */
+static void __init handle_palo(unsigned long palo_phys)
+{
+	struct palo_table *palo = __va(palo_phys);
+	u8  checksum;
+
+	if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) {
+		printk(KERN_INFO "PALO signature incorrect.\n");
+		return;
+	}
+
+	checksum = palo_checksum((u8 *)palo, palo->length);
+	if (checksum) {
+		printk(KERN_INFO "PALO checksum incorrect.\n");
+		return;
+	}
+
+	setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO);
+}
+
 void
 efi_map_pal_code (void)
 {
@@ -432,6 +468,7 @@
 	u64 efi_desc_size;
 	char *cp, vendor[100] = "unknown";
 	int i;
+	unsigned long palo_phys;
 
 	/*
 	 * It's too early to be able to use the standard kernel command line
@@ -496,6 +533,8 @@
 	efi.hcdp       = EFI_INVALID_TABLE_ADDR;
 	efi.uga        = EFI_INVALID_TABLE_ADDR;
 
+	palo_phys      = EFI_INVALID_TABLE_ADDR;
+
 	for (i = 0; i < (int) efi.systab->nr_tables; i++) {
 		if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
 			efi.mps = config_tables[i].table;
@@ -515,10 +554,17 @@
 		} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
 			efi.hcdp = config_tables[i].table;
 			printk(" HCDP=0x%lx", config_tables[i].table);
+		} else if (efi_guidcmp(config_tables[i].guid,
+			 PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) {
+			palo_phys = config_tables[i].table;
+			printk(" PALO=0x%lx", config_tables[i].table);
 		}
 	}
 	printk("\n");
 
+	if (palo_phys != EFI_INVALID_TABLE_ADDR)
+		handle_palo(palo_phys);
+
 	runtime = __va(efi.systab->runtime);
 	efi.get_time = phys_get_time;
 	efi.set_time = phys_set_time;
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 3c331c4..b0be4a2 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -710,6 +710,16 @@
 (pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
 #endif
 .work_processed_syscall:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	adds r2=PT(LOADRS)+16,r12
+(pUStk)	mov.m r22=ar.itc			// fetch time at leave
+	adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
+	;;
+(p6)	ld4 r31=[r18]				// load current_thread_info()->flags
+	ld8 r19=[r2],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"
+	adds r3=PT(AR_BSPSTORE)+16,r12		// deferred
+	;;
+#else
 	adds r2=PT(LOADRS)+16,r12
 	adds r3=PT(AR_BSPSTORE)+16,r12
 	adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
@@ -718,6 +728,7 @@
 	ld8 r19=[r2],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"
 	nop.i 0
 	;;
+#endif
 	mov r16=ar.bsp				// M2  get existing backing store pointer
 	ld8 r18=[r2],PT(R9)-PT(B6)		// load b6
 (p6)	and r15=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
@@ -737,12 +748,21 @@
 
 	ld8 r29=[r2],16		// M0|1 load cr.ipsr
 	ld8 r28=[r3],16		// M0|1 load cr.iip
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
+	;;
+	ld8 r30=[r2],16		// M0|1 load cr.ifs
+	ld8 r25=[r3],16		// M0|1 load ar.unat
+(pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
+	;;
+#else
 	mov r22=r0		// A    clear r22
 	;;
 	ld8 r30=[r2],16		// M0|1 load cr.ifs
 	ld8 r25=[r3],16		// M0|1 load ar.unat
 (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
 	;;
+#endif
 	ld8 r26=[r2],PT(B0)-PT(AR_PFS)	// M0|1 load ar.pfs
 (pKStk)	mov r22=psr			// M2   read PSR now that interrupts are disabled
 	nop 0
@@ -759,7 +779,11 @@
 	ld8.fill r1=[r3],16			// M0|1 load r1
 (pUStk) mov r17=1				// A
 	;;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+(pUStk) st1 [r15]=r17				// M2|3
+#else
 (pUStk) st1 [r14]=r17				// M2|3
+#endif
 	ld8.fill r13=[r3],16			// M0|1
 	mov f8=f0				// F    clear f8
 	;;
@@ -775,12 +799,22 @@
 	shr.u r18=r19,16		// I0|1 get byte size of existing "dirty" partition
 	cover				// B    add current frame into dirty partition & set cr.ifs
 	;;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	mov r19=ar.bsp			// M2   get new backing store pointer
+	st8 [r14]=r22			// M	save time at leave
+	mov f10=f0			// F    clear f10
+
+	mov r22=r0			// A	clear r22
+	movl r14=__kernel_syscall_via_epc // X
+	;;
+#else
 	mov r19=ar.bsp			// M2   get new backing store pointer
 	mov f10=f0			// F    clear f10
 
 	nop.m 0
 	movl r14=__kernel_syscall_via_epc // X
 	;;
+#endif
 	mov.m ar.csd=r0			// M2   clear ar.csd
 	mov.m ar.ccv=r0			// M2   clear ar.ccv
 	mov b7=r14			// I0   clear b7 (hint with __kernel_syscall_via_epc)
@@ -913,10 +947,18 @@
 	adds r16=PT(CR_IPSR)+16,r12
 	adds r17=PT(CR_IIP)+16,r12
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.pred.rel.mutex pUStk,pKStk
+(pKStk)	mov r22=psr		// M2 read PSR now that interrupts are disabled
+(pUStk)	mov.m r22=ar.itc	// M  fetch time at leave
+	nop.i 0
+	;;
+#else
 (pKStk)	mov r22=psr		// M2 read PSR now that interrupts are disabled
 	nop.i 0
 	nop.i 0
 	;;
+#endif
 	ld8 r29=[r16],16	// load cr.ipsr
 	ld8 r28=[r17],16	// load cr.iip
 	;;
@@ -938,15 +980,37 @@
 	;;
 	ld8.fill r12=[r16],16
 	ld8.fill r13=[r17],16
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+(pUStk)	adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
+#else
 (pUStk)	adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
+#endif
 	;;
 	ld8 r20=[r16],16	// ar.fpsr
 	ld8.fill r15=[r17],16
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+(pUStk)	adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18	// deferred
+#endif
 	;;
 	ld8.fill r14=[r16],16
 	ld8.fill r2=[r17]
 (pUStk)	mov r17=1
 	;;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	//  mmi_ :  ld8 st1 shr;;         mmi_ : st8 st1 shr;;
+	//  mib  :  mov add br        ->  mib  : ld8 add br
+	//  bbb_ :  br  nop cover;;       mbb_ : mov br  cover;;
+	//
+	//  no one require bsp in r16 if (pKStk) branch is selected.
+(pUStk)	st8 [r3]=r22		// save time at leave
+(pUStk)	st1 [r18]=r17		// restore current->thread.on_ustack
+	shr.u r18=r19,16	// get byte size of existing "dirty" partition
+	;;
+	ld8.fill r3=[r16]	// deferred
+	LOAD_PHYS_STACK_REG_SIZE(r17)
+(pKStk)	br.cond.dpnt skip_rbs_switch
+	mov r16=ar.bsp		// get existing backing store pointer
+#else
 	ld8.fill r3=[r16]
 (pUStk)	st1 [r18]=r17		// restore current->thread.on_ustack
 	shr.u r18=r19,16	// get byte size of existing "dirty" partition
@@ -954,6 +1018,7 @@
 	mov r16=ar.bsp		// get existing backing store pointer
 	LOAD_PHYS_STACK_REG_SIZE(r17)
 (pKStk)	br.cond.dpnt skip_rbs_switch
+#endif
 
 	/*
 	 * Restore user backing store.
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 4484197..c1625c7 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -61,13 +61,29 @@
 	.prologue
 	.altrp b6
 	.body
+	add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
+	;;
+	ld8 r17=[r17]				// r17 = current->group_leader
 	add r9=TI_FLAGS+IA64_TASK_SIZE,r16
 	;;
 	ld4 r9=[r9]
-	add r8=IA64_TASK_TGID_OFFSET,r16
+	add r17=IA64_TASK_TGIDLINK_OFFSET,r17
 	;;
 	and r9=TIF_ALLWORK_MASK,r9
-	ld4 r8=[r8]				// r8 = current->tgid
+	ld8 r17=[r17]				// r17 = current->group_leader->pids[PIDTYPE_PID].pid
+	;;
+	add r8=IA64_PID_LEVEL_OFFSET,r17
+	;;
+	ld4 r8=[r8]				// r8 = pid->level
+	add r17=IA64_PID_UPID_OFFSET,r17	// r17 = &pid->numbers[0]
+	;;
+	shl r8=r8,IA64_UPID_SHIFT
+	;;
+	add r17=r17,r8				// r17 = &pid->numbers[pid->level]
+	;;
+	ld4 r8=[r17]				// r8 = pid->numbers[pid->level].nr
+	;;
+	mov r17=0
 	;;
 	cmp.ne p8,p0=0,r9
 (p8)	br.spnt.many fsys_fallback_syscall
@@ -126,15 +142,25 @@
 	.altrp b6
 	.body
 	add r9=TI_FLAGS+IA64_TASK_SIZE,r16
+	add r17=IA64_TASK_TGIDLINK_OFFSET,r16
 	;;
 	ld4 r9=[r9]
 	tnat.z p6,p7=r32		// check argument register for being NaT
+	ld8 r17=[r17]				// r17 = current->pids[PIDTYPE_PID].pid
 	;;
 	and r9=TIF_ALLWORK_MASK,r9
-	add r8=IA64_TASK_PID_OFFSET,r16
+	add r8=IA64_PID_LEVEL_OFFSET,r17
 	add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16
 	;;
-	ld4 r8=[r8]
+	ld4 r8=[r8]				// r8 = pid->level
+	add r17=IA64_PID_UPID_OFFSET,r17	// r17 = &pid->numbers[0]
+	;;
+	shl r8=r8,IA64_UPID_SHIFT
+	;;
+	add r17=r17,r8				// r17 = &pid->numbers[pid->level]
+	;;
+	ld4 r8=[r17]				// r8 = pid->numbers[pid->level].nr
+	;;
 	cmp.ne p8,p0=0,r9
 	mov r17=-1
 	;;
@@ -210,27 +236,25 @@
 	// Note that instructions are optimized for McKinley. McKinley can
 	// process two bundles simultaneously and therefore we continuously
 	// try to feed the CPU two bundles and then a stop.
-	//
-	// Additional note that code has changed a lot. Optimization is TBD.
-	// Comments begin with "?" are maybe outdated.
-	tnat.nz p6,p0 = r31	// ? branch deferred to fit later bundle
-	mov pr = r30,0xc000	// Set predicates according to function
+
 	add r2 = TI_FLAGS+IA64_TASK_SIZE,r16
+	tnat.nz p6,p0 = r31		// guard against Nat argument
+(p6)	br.cond.spnt.few .fail_einval
 	movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address
 	;;
+	ld4 r2 = [r2]			// process work pending flags
 	movl r29 = itc_jitter_data	// itc_jitter
 	add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20	// wall_time
-	ld4 r2 = [r2]		// process work pending flags
-	;;
-(p15)	add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20	// monotonic_time
 	add r21 = IA64_CLKSRC_MMIO_OFFSET,r20
-	add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
-	and r2 = TIF_ALLWORK_MASK,r2
-(p6)    br.cond.spnt.few .fail_einval	// ? deferred branch
+	mov pr = r30,0xc000	// Set predicates according to function
 	;;
-	add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last
+	and r2 = TIF_ALLWORK_MASK,r2
+	add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
+(p15)	add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20	// monotonic_time
+	;;
+	add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20	// clksrc_cycle_last
 	cmp.ne p6, p0 = 0, r2	// Fallback if work is scheduled
-(p6)    br.cond.spnt.many fsys_fallback_syscall
+(p6)	br.cond.spnt.many fsys_fallback_syscall
 	;;
 	// Begin critical section
 .time_redo:
@@ -258,7 +282,6 @@
 (p8)	mov r2 = ar.itc		// CPU_TIMER. 36 clocks latency!!!
 (p9)	ld8 r2 = [r30]		// MMIO_TIMER. Could also have latency issues..
 (p13)	ld8 r25 = [r19]		// get itc_lastcycle value
-	;;		// ? could be removed by moving the last add upward
 	ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET	// tv_sec
 	;;
 	ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET	// tv_nsec
@@ -285,13 +308,12 @@
 EX(.fail_efault, probe.w.fault r31, 3)
 	xmpy.l f8 = f8,f7	// nsec_per_cyc*(counter-last_counter)
 	;;
-	// ? simulate tbit.nz.or p7,p0 = r28,0
 	getf.sig r2 = f8
 	mf
 	;;
 	ld4 r10 = [r20]		// gtod_lock.sequence
 	shr.u r2 = r2,r23	// shift by factor
-	;;		// ? overloaded 3 bundles!
+	;;
 	add r8 = r8,r2		// Add xtime.nsecs
 	cmp4.ne p7,p0 = r28,r10
 (p7)	br.cond.dpnt.few .time_redo	// sequence number changed, redo
@@ -319,9 +341,9 @@
 EX(.fail_efault, probe.w.fault r23, 3)	// This also costs 5 cycles
 (p14)	xmpy.hu f8 = f8, f7		// xmpy has 5 cycles latency so use it
 	;;
-	mov r8 = r0
 (p14)	getf.sig r2 = f8
 	;;
+	mov r8 = r0
 (p14)	shr.u r21 = r2, 4
 	;;
 EX(.fail_efault, st8 [r31] = r9)
@@ -660,7 +682,11 @@
 	nop.i 0
 	;;
 	mov ar.rsc=0				// M2   set enforced lazy mode, pl 0, LE, loadrs=0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	mov.m r30=ar.itc			// M    get cycle for accounting
+#else
 	nop.m 0
+#endif
 	nop.i 0
 	;;
 	mov r23=ar.bspstore			// M2 (12 cyc) save ar.bspstore
@@ -682,6 +708,28 @@
 	cmp.ne pKStk,pUStk=r0,r0		// A    set pKStk <- 0, pUStk <- 1
 	br.call.sptk.many b7=ia64_syscall_setup	// B
 	;;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	// mov.m r30=ar.itc is called in advance
+	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
+	add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2
+	;;
+	ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP	// time at last check in kernel
+	ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE	// time at leave kernel
+	;;
+	ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME	// cumulated stime
+	ld8 r21=[r17]				// cumulated utime
+	sub r22=r19,r18				// stime before leave kernel
+	;;
+	st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP	// update stamp
+	sub r18=r30,r19				// elapsed time in user mode
+	;;
+	add r20=r20,r22				// sum stime
+	add r21=r21,r18				// sum utime
+	;;
+	st8 [r16]=r20				// update stime
+	st8 [r17]=r21				// update utime
+	;;
+#endif
 	mov ar.rsc=0x3				// M2   set eager mode, pl 0, LE, loadrs=0
 	mov rp=r14				// I0   set the real return addr
 	and r3=_TIF_SYSCALL_TRACEAUDIT,r3	// A
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index d3a41d5..ddeab4e 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1002,6 +1002,26 @@
 	br.ret.sptk.many rp
 END(sched_clock)
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+GLOBAL_ENTRY(cycle_to_cputime)
+	alloc r16=ar.pfs,1,0,0,0
+	addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+	;;
+	ldf8 f8=[r8]
+	;;
+	setf.sig f9=r32
+	;;
+	xmpy.lu f10=f9,f8	// calculate low 64 bits of 128-bit product	(4 cyc)
+	xmpy.hu f11=f9,f8	// calculate high 64 bits of 128-bit product
+	;;
+	getf.sig r8=f10		//						(5 cyc)
+	getf.sig r9=f11
+	;;
+	shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
+	br.ret.sptk.many rp
+END(cycle_to_cputime)
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+
 GLOBAL_ENTRY(start_kernel_thread)
 	.prologue
 	.save rp, r0				// this is the end of the call-chain
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 8e7193d..6da1f20 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -19,12 +19,6 @@
 EXPORT_SYMBOL(ip_fast_csum);		/* hand-coded assembly */
 EXPORT_SYMBOL(csum_ipv6_magic);
 
-#include <asm/semaphore.h>
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(__up);
-
 #include <asm/page.h>
 EXPORT_SYMBOL(clear_page);
 
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index d8be23f..5538471 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -472,7 +472,7 @@
 			static unsigned char count;
 			static long last_time;
 
-			if (jiffies - last_time > 5*HZ)
+			if (time_after(jiffies, last_time + 5 * HZ))
 				count = 0;
 			if (++count < 5) {
 				last_time = jiffies;
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 34f44d8..6678c49 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -805,8 +805,13 @@
 
 (p8)	adds r28=16,r28				// A    switch cr.iip to next bundle
 (p9)	adds r8=1,r8				// A    increment ei to next slot
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	;;
+	mov b6=r30				// I0   setup syscall handler branch reg early
+#else
 	nop.i 0
 	;;
+#endif
 
 	mov.m r25=ar.unat			// M2 (5 cyc)
 	dep r29=r8,r29,41,2			// I0   insert new ei into cr.ipsr
@@ -817,7 +822,11 @@
 	//
 ///////////////////////////////////////////////////////////////////////
 	st1 [r16]=r0				// M2|3 clear current->thread.on_ustack flag
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	mov.m r30=ar.itc			// M    get cycle for accounting
+#else
 	mov b6=r30				// I0   setup syscall handler branch reg early
+#endif
 	cmp.eq pKStk,pUStk=r0,r17		// A    were we on kernel stacks already?
 
 	and r9=_TIF_SYSCALL_TRACEAUDIT,r9	// A    mask trace or audit
@@ -829,6 +838,30 @@
 	cmp.eq p14,p0=r9,r0			// A    are syscalls being traced/audited?
 	br.call.sptk.many b7=ia64_syscall_setup	// B
 1:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	// mov.m r30=ar.itc is called in advance, and r13 is current
+	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13	// A
+	add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13	// A
+(pKStk)	br.cond.spnt .skip_accounting		// B	unlikely skip
+	;;
+	ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP	// M  get last stamp
+	ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE	// M  time at leave
+	;;
+	ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME	// M  cumulated stime
+	ld8 r21=[r17]				// M  cumulated utime
+	sub r22=r19,r18				// A  stime before leave
+	;;
+	st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP	// M  update stamp
+	sub r18=r30,r19				// A  elapsed time in user
+	;;
+	add r20=r20,r22				// A  sum stime
+	add r21=r21,r18				// A  sum utime
+	;;
+	st8 [r16]=r20				// M  update stime
+	st8 [r17]=r21				// M  update utime
+	;;
+.skip_accounting:
+#endif
 	mov ar.rsc=0x3				// M2   set eager mode, pl 0, LE, loadrs=0
 	nop 0
 	bsw.1					// B (6 cyc) regs are saved, switch to bank 1
@@ -928,6 +961,7 @@
 	 *	- r27: saved ar.rsc
 	 *	- r28: saved cr.iip
 	 *	- r29: saved cr.ipsr
+	 *	- r30: ar.itc for accounting (don't touch)
 	 *	- r31: saved pr
 	 *	-  b0: original contents (to be saved)
 	 * On exit:
@@ -1090,6 +1124,41 @@
 	DBG_FAULT(16)
 	FAULT(16)
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	/*
+	 * There is no particular reason for this code to be here, other than
+	 * that there happens to be space here that would go unused otherwise.
+	 * If this fault ever gets "unreserved", simply moved the following
+	 * code to a more suitable spot...
+	 *
+	 * account_sys_enter is called from SAVE_MIN* macros if accounting is
+	 * enabled and if the macro is entered from user mode.
+	 */
+ENTRY(account_sys_enter)
+	// mov.m r20=ar.itc is called in advance, and r13 is current
+	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
+	add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
+	;;
+	ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP	// time at last check in kernel
+	ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE	// time at left from kernel
+        ;;
+	ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME	// cumulated stime
+	ld8 r21=[r17]				// cumulated utime
+	sub r22=r19,r18				// stime before leave kernel
+	;;
+	st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP	// update stamp
+	sub r18=r20,r19				// elapsed time in user mode
+	;;
+	add r23=r23,r22				// sum stime
+	add r21=r21,r18				// sum utime
+	;;
+	st8 [r16]=r23				// update stime
+	st8 [r17]=r21				// update utime
+	;;
+	br.ret.sptk.many rp
+END(account_sys_enter)
+#endif
+
 	.org ia64_ivt+0x4400
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x4400 Entry 17 (size 64 bundles) Reserved
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 8d9a446..233434f 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -78,6 +78,20 @@
   { u, u, u },  			/* 1F */
 };
 
+/* Insert a long branch code */
+static void __kprobes set_brl_inst(void *from, void *to)
+{
+	s64 rel = ((s64) to - (s64) from) >> 4;
+	bundle_t *brl;
+	brl = (bundle_t *) ((u64) from & ~0xf);
+	brl->quad0.template = 0x05;	/* [MLX](stop) */
+	brl->quad0.slot0 = NOP_M_INST;	/* nop.m 0x0 */
+	brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2;
+	brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46);
+	/* brl.cond.sptk.many.clr rel<<4 (qp=0) */
+	brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff);
+}
+
 /*
  * In this function we check to see if the instruction
  * is IP relative instruction and update the kprobe
@@ -496,6 +510,77 @@
 	regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
 }
 
+/* Check the instruction in the slot is break */
+static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot)
+{
+	unsigned int major_opcode;
+	unsigned int template = bundle->quad0.template;
+	unsigned long kprobe_inst;
+
+	/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
+	if (slot == 1 && bundle_encoding[template][1] == L)
+		slot++;
+
+	/* Get Kprobe probe instruction at given slot*/
+	get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
+
+	/* For break instruction,
+	 * Bits 37:40 Major opcode to be zero
+	 * Bits 27:32 X6 to be zero
+	 * Bits 32:35 X3 to be zero
+	 */
+	if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) {
+		/* Not a break instruction */
+		return 0;
+	}
+
+	/* Is a break instruction */
+	return 1;
+}
+
+/*
+ * In this function, we check whether the target bundle modifies IP or
+ * it triggers an exception. If so, it cannot be boostable.
+ */
+static int __kprobes can_boost(bundle_t *bundle, uint slot,
+			       unsigned long bundle_addr)
+{
+	unsigned int template = bundle->quad0.template;
+
+	do {
+		if (search_exception_tables(bundle_addr + slot) ||
+		    __is_ia64_break_inst(bundle, slot))
+			return 0;	/* exception may occur in this bundle*/
+	} while ((++slot) < 3);
+	template &= 0x1e;
+	if (template >= 0x10 /* including B unit */ ||
+	    template == 0x04 /* including X unit */ ||
+	    template == 0x06) /* undefined */
+		return 0;
+
+	return 1;
+}
+
+/* Prepare long jump bundle and disables other boosters if need */
+static void __kprobes prepare_booster(struct kprobe *p)
+{
+	unsigned long addr = (unsigned long)p->addr & ~0xFULL;
+	unsigned int slot = (unsigned long)p->addr & 0xf;
+	struct kprobe *other_kp;
+
+	if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) {
+		set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1);
+		p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
+	}
+
+	/* disables boosters in previous slots */
+	for (; addr < (unsigned long)p->addr; addr++) {
+		other_kp = get_kprobe((void *)addr);
+		if (other_kp)
+			other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE;
+	}
+}
+
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
 {
 	unsigned long addr = (unsigned long) p->addr;
@@ -530,6 +615,8 @@
 
 	prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
 
+	prepare_booster(p);
+
 	return 0;
 }
 
@@ -543,7 +630,9 @@
 	src = &p->opcode.bundle;
 
 	flush_icache_range((unsigned long)p->ainsn.insn,
-			(unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
+			   (unsigned long)p->ainsn.insn +
+			   sizeof(kprobe_opcode_t) * MAX_INSN_SIZE);
+
 	switch (p->ainsn.slot) {
 		case 0:
 			dest->quad0.slot0 = src->quad0.slot0;
@@ -584,13 +673,13 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn, 0);
+	free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE);
 	mutex_unlock(&kprobe_mutex);
 }
 /*
  * We are resuming execution after a single step fault, so the pt_regs
  * structure reflects the register state after we executed the instruction
- * located in the kprobe (p->ainsn.insn.bundle).  We still need to adjust
+ * located in the kprobe (p->ainsn.insn->bundle).  We still need to adjust
  * the ip to point back to the original stack address. To set the IP address
  * to original stack address, handle the case where we need to fixup the
  * relative IP address and/or fixup branch register.
@@ -607,7 +696,7 @@
 	if (slot == 1 && bundle_encoding[template][1] == L)
 		slot = 2;
 
-	if (p->ainsn.inst_flag) {
+	if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) {
 
 		if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
 			/* Fix relative IP address */
@@ -686,33 +775,12 @@
 static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
 {
 	unsigned int slot = ia64_psr(regs)->ri;
-	unsigned int template, major_opcode;
-	unsigned long kprobe_inst;
 	unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
 	bundle_t bundle;
 
 	memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
-	template = bundle.quad0.template;
 
-	/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
-	if (slot == 1 && bundle_encoding[template][1] == L)
-		slot++;
-
-	/* Get Kprobe probe instruction at given slot*/
-	get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
-
-	/* For break instruction,
-	 * Bits 37:40 Major opcode to be zero
-	 * Bits 27:32 X6 to be zero
-	 * Bits 32:35 X3 to be zero
-	 */
-	if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
-		/* Not a break instruction */
-		return 0;
-	}
-
-	/* Is a break instruction */
-	return 1;
+	return __is_ia64_break_inst(&bundle, slot);
 }
 
 static int __kprobes pre_kprobes_handler(struct die_args *args)
@@ -802,6 +870,19 @@
 		return 1;
 
 ss_probe:
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
+	if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
+		/* Boost up -- we can execute copied instructions directly */
+		ia64_psr(regs)->ri = p->ainsn.slot;
+		regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL;
+		/* turn single stepping off */
+		ia64_psr(regs)->ss = 0;
+
+		reset_current_kprobe();
+		preempt_enable_no_resched();
+		return 1;
+	}
+#endif
 	prepare_ss(p, regs);
 	kcb->kprobe_status = KPROBE_HIT_SS;
 	return 1;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6c18221..e51bced 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -69,6 +69,7 @@
  * 2007-04-27 Russ Anderson <rja@sgi.com>
  *	      Support multiple cpus going through OS_MCA in the same event.
  */
+#include <linux/jiffies.h>
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/sched.h>
@@ -97,6 +98,7 @@
 
 #include <asm/irq.h>
 #include <asm/hw_irq.h>
+#include <asm/tlb.h>
 
 #include "mca_drv.h"
 #include "entry.h"
@@ -112,6 +114,7 @@
 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
 DEFINE_PER_CPU(u64, ia64_mca_pal_pte);	    /* PTE to map PAL code */
 DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */
+DEFINE_PER_CPU(u64, ia64_mca_tr_reload);   /* Flag for TR reload */
 
 unsigned long __per_cpu_mca[NR_CPUS];
 
@@ -293,7 +296,8 @@
 	if (mlogbuf_finished)
 		return;
 
-	if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) {
+	if (mlogbuf_timestamp &&
+			time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
 		printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
 			" and the system seems to be messed up.\n");
 		ia64_mlogbuf_finish(0);
@@ -1182,6 +1186,49 @@
 	return;
 }
 
+/*  mca_insert_tr
+ *
+ *  Switch rid when TR reload and needed!
+ *  iord: 1: itr, 2: itr;
+ *
+*/
+static void mca_insert_tr(u64 iord)
+{
+
+	int i;
+	u64 old_rr;
+	struct ia64_tr_entry *p;
+	unsigned long psr;
+	int cpu = smp_processor_id();
+
+	psr = ia64_clear_ic();
+	for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
+		p = &__per_cpu_idtrs[cpu][iord-1][i];
+		if (p->pte & 0x1) {
+			old_rr = ia64_get_rr(p->ifa);
+			if (old_rr != p->rr) {
+				ia64_set_rr(p->ifa, p->rr);
+				ia64_srlz_d();
+			}
+			ia64_ptr(iord, p->ifa, p->itir >> 2);
+			ia64_srlz_i();
+			if (iord & 0x1) {
+				ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
+				ia64_srlz_i();
+			}
+			if (iord & 0x2) {
+				ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
+				ia64_srlz_i();
+			}
+			if (old_rr != p->rr) {
+				ia64_set_rr(p->ifa, old_rr);
+				ia64_srlz_d();
+			}
+		}
+	}
+	ia64_set_psr(psr);
+}
+
 /*
  * ia64_mca_handler
  *
@@ -1266,16 +1313,17 @@
 	} else {
 		/* Dump buffered message to console */
 		ia64_mlogbuf_finish(1);
-#ifdef CONFIG_KEXEC
-		atomic_set(&kdump_in_progress, 1);
-		monarch_cpu = -1;
-#endif
 	}
+
+	if (__get_cpu_var(ia64_mca_tr_reload)) {
+		mca_insert_tr(0x1); /*Reload dynamic itrs*/
+		mca_insert_tr(0x2); /*Reload dynamic itrs*/
+	}
+
 	if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
 			== NOTIFY_STOP)
 		ia64_mca_spin(__func__);
 
-
 	if (atomic_dec_return(&mca_count) > 0) {
 		int i;
 
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 8bc7d25..a06d465 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -219,8 +219,13 @@
 	mov r20=IA64_TR_CURRENT_STACK
 	;;
 	itr.d dtr[r20]=r16
+	GET_THIS_PADDR(r2, ia64_mca_tr_reload)
+	mov r18 = 1
 	;;
 	srlz.d
+	;;
+	st8 [r2] =r18
+	;;
 
 done_tlb_purge_and_reload:
 
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index c9ac8ba..7c548ac 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -3,6 +3,18 @@
 
 #include "entry.h"
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+/* read ar.itc in advance, and use it before leaving bank 0 */
+#define ACCOUNT_GET_STAMP				\
+(pUStk) mov.m r20=ar.itc;
+#define ACCOUNT_SYS_ENTER				\
+(pUStk) br.call.spnt rp=account_sys_enter		\
+	;;
+#else
+#define ACCOUNT_GET_STAMP
+#define ACCOUNT_SYS_ENTER
+#endif
+
 /*
  * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
  * the minimum state necessary that allows us to turn psr.ic back
@@ -122,11 +134,13 @@
 	;;											\
 .mem.offset 0,0; st8.spill [r16]=r2,16;								\
 .mem.offset 8,0; st8.spill [r17]=r3,16;								\
+	ACCOUNT_GET_STAMP									\
 	adds r2=IA64_PT_REGS_R16_OFFSET,r1;							\
 	;;											\
 	EXTRA;											\
 	movl r1=__gp;		/* establish kernel global pointer */				\
 	;;											\
+	ACCOUNT_SYS_ENTER									\
 	bsw.1;			/* switch back to bank 1 (must be last in insn group) */	\
 	;;
 
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index a78b45f..c93420c 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -73,7 +73,7 @@
 	for(node=0; node < MAX_NUMNODES; node++)
 		cpus_clear(node_to_cpu_mask[node]);
 
-	for(cpu = 0; cpu < NR_CPUS; ++cpu) {
+	for_each_possible_early_cpu(cpu) {
 		node = -1;
 		for (i = 0; i < NR_CPUS; ++i)
 			if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index 2cb9425..e0dca87 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -135,10 +135,10 @@
 
 	while (offp < (s32 *) end) {
 		wp = (u64 *) ia64_imva((char *) offp + *offp);
-		wp[0] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
-		wp[1] = 0x0004000000000200UL;
-		wp[2] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
-		wp[3] = 0x0084006880000200UL;
+		wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
+		wp[1] = 0x0084006880000200UL;
+		wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
+		wp[3] = 0x0004000000000200UL;
 		ia64_fc(wp); ia64_fc(wp + 2);
 		++offp;
 	}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index a2aabfd..d1d24f4 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -4204,10 +4204,10 @@
 	do_each_thread (g, t) {
 		if (t->thread.pfm_context == ctx) {
 			ret = 0;
-			break;
+			goto out;
 		}
 	} while_each_thread (g, t);
-
+out:
 	read_unlock(&tasklist_lock);
 
 	DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 49937a3..a5ea817 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -625,21 +625,6 @@
 	do_dump_task_fpu(current, info, arg);
 }
 
-int
-dump_task_regs(struct task_struct *task, elf_gregset_t *regs)
-{
-	struct unw_frame_info tcore_info;
-
-	if (current == task) {
-		unw_init_running(do_copy_regs, regs);
-	} else {
-		memset(&tcore_info, 0, sizeof(tcore_info));
-		unw_init_from_blocked_task(&tcore_info, task);
-		do_copy_task_regs(task, &tcore_info, regs);
-	}
-	return 1;
-}
-
 void
 ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
 {
@@ -647,21 +632,6 @@
 }
 
 int
-dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst)
-{
-	struct unw_frame_info tcore_info;
-
-	if (current == task) {
-		unw_init_running(do_dump_fpu, dst);
-	} else {
-		memset(&tcore_info, 0, sizeof(tcore_info));
-		unw_init_from_blocked_task(&tcore_info, task);
-		do_dump_task_fpu(task, &tcore_info, dst);
-	}
-	return 1;
-}
-
-int
 dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
 {
 	unw_init_running(do_dump_fpu, dst);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index ab784ec..2a9943b 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -3,6 +3,9 @@
  *
  * Copyright (C) 1999-2005 Hewlett-Packard Co
  *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2006 Intel Co
+ *  2006-08-12	- IA64 Native Utrace implementation support added by
+ *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  *
  * Derived from the x86 and Alpha versions.
  */
@@ -17,6 +20,8 @@
 #include <linux/security.h>
 #include <linux/audit.h>
 #include <linux/signal.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -740,25 +745,6 @@
 	psr->dfh = 1;
 }
 
-static int
-access_fr (struct unw_frame_info *info, int regnum, int hi,
-	   unsigned long *data, int write_access)
-{
-	struct ia64_fpreg fpval;
-	int ret;
-
-	ret = unw_get_fr(info, regnum, &fpval);
-	if (ret < 0)
-		return ret;
-
-	if (write_access) {
-		fpval.u.bits[hi] = *data;
-		ret = unw_set_fr(info, regnum, fpval);
-	} else
-		*data = fpval.u.bits[hi];
-	return ret;
-}
-
 /*
  * Change the machine-state of CHILD such that it will return via the normal
  * kernel exit-path, rather than the syscall-exit path.
@@ -860,309 +846,7 @@
 
 static int
 access_uarea (struct task_struct *child, unsigned long addr,
-	      unsigned long *data, int write_access)
-{
-	unsigned long *ptr, regnum, urbs_end, cfm;
-	struct switch_stack *sw;
-	struct pt_regs *pt;
-#	define pt_reg_addr(pt, reg)	((void *)			    \
-					 ((unsigned long) (pt)		    \
-					  + offsetof(struct pt_regs, reg)))
-
-
-	pt = task_pt_regs(child);
-	sw = (struct switch_stack *) (child->thread.ksp + 16);
-
-	if ((addr & 0x7) != 0) {
-		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
-		return -1;
-	}
-
-	if (addr < PT_F127 + 16) {
-		/* accessing fph */
-		if (write_access)
-			ia64_sync_fph(child);
-		else
-			ia64_flush_fph(child);
-		ptr = (unsigned long *)
-			((unsigned long) &child->thread.fph + addr);
-	} else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
-		/* scratch registers untouched by kernel (saved in pt_regs) */
-		ptr = pt_reg_addr(pt, f10) + (addr - PT_F10);
-	} else if (addr >= PT_F12 && addr < PT_F15 + 16) {
-		/*
-		 * Scratch registers untouched by kernel (saved in
-		 * switch_stack).
-		 */
-		ptr = (unsigned long *) ((long) sw
-					 + (addr - PT_NAT_BITS - 32));
-	} else if (addr < PT_AR_LC + 8) {
-		/* preserved state: */
-		struct unw_frame_info info;
-		char nat = 0;
-		int ret;
-
-		unw_init_from_blocked_task(&info, child);
-		if (unw_unwind_to_user(&info) < 0)
-			return -1;
-
-		switch (addr) {
-		      case PT_NAT_BITS:
-			return access_nat_bits(child, pt, &info,
-					       data, write_access);
-
-		      case PT_R4: case PT_R5: case PT_R6: case PT_R7:
-			if (write_access) {
-				/* read NaT bit first: */
-				unsigned long dummy;
-
-				ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4,
-						 &dummy, &nat);
-				if (ret < 0)
-					return ret;
-			}
-			return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data,
-					     &nat, write_access);
-
-		      case PT_B1: case PT_B2: case PT_B3:
-		      case PT_B4: case PT_B5:
-			return unw_access_br(&info, (addr - PT_B1)/8 + 1, data,
-					     write_access);
-
-		      case PT_AR_EC:
-			return unw_access_ar(&info, UNW_AR_EC, data,
-					     write_access);
-
-		      case PT_AR_LC:
-			return unw_access_ar(&info, UNW_AR_LC, data,
-					     write_access);
-
-		      default:
-			if (addr >= PT_F2 && addr < PT_F5 + 16)
-				return access_fr(&info, (addr - PT_F2)/16 + 2,
-						 (addr & 8) != 0, data,
-						 write_access);
-			else if (addr >= PT_F16 && addr < PT_F31 + 16)
-				return access_fr(&info,
-						 (addr - PT_F16)/16 + 16,
-						 (addr & 8) != 0,
-						 data, write_access);
-			else {
-				dprintk("ptrace: rejecting access to register "
-					"address 0x%lx\n", addr);
-				return -1;
-			}
-		}
-	} else if (addr < PT_F9+16) {
-		/* scratch state */
-		switch (addr) {
-		      case PT_AR_BSP:
-			/*
-			 * By convention, we use PT_AR_BSP to refer to
-			 * the end of the user-level backing store.
-			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
-			 * to get the real value of ar.bsp at the time
-			 * the kernel was entered.
-			 *
-			 * Furthermore, when changing the contents of
-			 * PT_AR_BSP (or PT_CFM) while the task is
-			 * blocked in a system call, convert the state
-			 * so that the non-system-call exit
-			 * path is used.  This ensures that the proper
-			 * state will be picked up when resuming
-			 * execution.  However, it *also* means that
-			 * once we write PT_AR_BSP/PT_CFM, it won't be
-			 * possible to modify the syscall arguments of
-			 * the pending system call any longer.  This
-			 * shouldn't be an issue because modifying
-			 * PT_AR_BSP/PT_CFM generally implies that
-			 * we're either abandoning the pending system
-			 * call or that we defer it's re-execution
-			 * (e.g., due to GDB doing an inferior
-			 * function call).
-			 */
-			urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
-			if (write_access) {
-				if (*data != urbs_end) {
-					if (in_syscall(pt))
-						convert_to_non_syscall(child,
-								       pt,
-								       cfm);
-					/*
-					 * Simulate user-level write
-					 * of ar.bsp:
-					 */
-					pt->loadrs = 0;
-					pt->ar_bspstore = *data;
-				}
-			} else
-				*data = urbs_end;
-			return 0;
-
-		      case PT_CFM:
-			urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
-			if (write_access) {
-				if (((cfm ^ *data) & PFM_MASK) != 0) {
-					if (in_syscall(pt))
-						convert_to_non_syscall(child,
-								       pt,
-								       cfm);
-					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
-						      | (*data & PFM_MASK));
-				}
-			} else
-				*data = cfm;
-			return 0;
-
-		      case PT_CR_IPSR:
-			if (write_access) {
-				unsigned long tmp = *data;
-				/* psr.ri==3 is a reserved value: SDM 2:25 */
-				if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
-					tmp &= ~IA64_PSR_RI;
-				pt->cr_ipsr = ((tmp & IPSR_MASK)
-					       | (pt->cr_ipsr & ~IPSR_MASK));
-			} else
-				*data = (pt->cr_ipsr & IPSR_MASK);
-			return 0;
-
-		      case PT_AR_RSC:
-			if (write_access)
-				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
-			else
-				*data = pt->ar_rsc;
-			return 0;
-
-		      case PT_AR_RNAT:
-			ptr = pt_reg_addr(pt, ar_rnat);
-			break;
-		      case PT_R1:
-			ptr = pt_reg_addr(pt, r1);
-			break;
-		      case PT_R2:  case PT_R3:
-			ptr = pt_reg_addr(pt, r2) + (addr - PT_R2);
-			break;
-		      case PT_R8:  case PT_R9:  case PT_R10: case PT_R11:
-			ptr = pt_reg_addr(pt, r8) + (addr - PT_R8);
-			break;
-		      case PT_R12: case PT_R13:
-			ptr = pt_reg_addr(pt, r12) + (addr - PT_R12);
-			break;
-		      case PT_R14:
-			ptr = pt_reg_addr(pt, r14);
-			break;
-		      case PT_R15:
-			ptr = pt_reg_addr(pt, r15);
-			break;
-		      case PT_R16: case PT_R17: case PT_R18: case PT_R19:
-		      case PT_R20: case PT_R21: case PT_R22: case PT_R23:
-		      case PT_R24: case PT_R25: case PT_R26: case PT_R27:
-		      case PT_R28: case PT_R29: case PT_R30: case PT_R31:
-			ptr = pt_reg_addr(pt, r16) + (addr - PT_R16);
-			break;
-		      case PT_B0:
-			ptr = pt_reg_addr(pt, b0);
-			break;
-		      case PT_B6:
-			ptr = pt_reg_addr(pt, b6);
-			break;
-		      case PT_B7:
-			ptr = pt_reg_addr(pt, b7);
-			break;
-		      case PT_F6:  case PT_F6+8: case PT_F7: case PT_F7+8:
-		      case PT_F8:  case PT_F8+8: case PT_F9: case PT_F9+8:
-			ptr = pt_reg_addr(pt, f6) + (addr - PT_F6);
-			break;
-		      case PT_AR_BSPSTORE:
-			ptr = pt_reg_addr(pt, ar_bspstore);
-			break;
-		      case PT_AR_UNAT:
-			ptr = pt_reg_addr(pt, ar_unat);
-			break;
-		      case PT_AR_PFS:
-			ptr = pt_reg_addr(pt, ar_pfs);
-			break;
-		      case PT_AR_CCV:
-			ptr = pt_reg_addr(pt, ar_ccv);
-			break;
-		      case PT_AR_FPSR:
-			ptr = pt_reg_addr(pt, ar_fpsr);
-			break;
-		      case PT_CR_IIP:
-			ptr = pt_reg_addr(pt, cr_iip);
-			break;
-		      case PT_PR:
-			ptr = pt_reg_addr(pt, pr);
-			break;
-			/* scratch register */
-
-		      default:
-			/* disallow accessing anything else... */
-			dprintk("ptrace: rejecting access to register "
-				"address 0x%lx\n", addr);
-			return -1;
-		}
-	} else if (addr <= PT_AR_SSD) {
-		ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD);
-	} else {
-		/* access debug registers */
-
-		if (addr >= PT_IBR) {
-			regnum = (addr - PT_IBR) >> 3;
-			ptr = &child->thread.ibr[0];
-		} else {
-			regnum = (addr - PT_DBR) >> 3;
-			ptr = &child->thread.dbr[0];
-		}
-
-		if (regnum >= 8) {
-			dprintk("ptrace: rejecting access to register "
-				"address 0x%lx\n", addr);
-			return -1;
-		}
-#ifdef CONFIG_PERFMON
-		/*
-		 * Check if debug registers are used by perfmon. This
-		 * test must be done once we know that we can do the
-		 * operation, i.e. the arguments are all valid, but
-		 * before we start modifying the state.
-		 *
-		 * Perfmon needs to keep a count of how many processes
-		 * are trying to modify the debug registers for system
-		 * wide monitoring sessions.
-		 *
-		 * We also include read access here, because they may
-		 * cause the PMU-installed debug register state
-		 * (dbr[], ibr[]) to be reset. The two arrays are also
-		 * used by perfmon, but we do not use
-		 * IA64_THREAD_DBG_VALID. The registers are restored
-		 * by the PMU context switch code.
-		 */
-		if (pfm_use_debug_registers(child)) return -1;
-#endif
-
-		if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
-			child->thread.flags |= IA64_THREAD_DBG_VALID;
-			memset(child->thread.dbr, 0,
-			       sizeof(child->thread.dbr));
-			memset(child->thread.ibr, 0,
-			       sizeof(child->thread.ibr));
-		}
-
-		ptr += regnum;
-
-		if ((regnum & 1) && write_access) {
-			/* don't let the user set kernel-level breakpoints: */
-			*ptr = *data & ~(7UL << 56);
-			return 0;
-		}
-	}
-	if (write_access)
-		*ptr = *data;
-	else
-		*data = *ptr;
-	return 0;
-}
+	      unsigned long *data, int write_access);
 
 static long
 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
@@ -1626,3 +1310,892 @@
 	if (test_thread_flag(TIF_RESTORE_RSE))
 		ia64_sync_krbs();
 }
+
+/* Utrace implementation starts here */
+struct regset_get {
+	void *kbuf;
+	void __user *ubuf;
+};
+
+struct regset_set {
+	const void *kbuf;
+	const void __user *ubuf;
+};
+
+struct regset_getset {
+	struct task_struct *target;
+	const struct user_regset *regset;
+	union {
+		struct regset_get get;
+		struct regset_set set;
+	} u;
+	unsigned int pos;
+	unsigned int count;
+	int ret;
+};
+
+static int
+access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
+		unsigned long addr, unsigned long *data, int write_access)
+{
+	struct pt_regs *pt;
+	unsigned long *ptr = NULL;
+	int ret;
+	char nat = 0;
+
+	pt = task_pt_regs(target);
+	switch (addr) {
+	case ELF_GR_OFFSET(1):
+		ptr = &pt->r1;
+		break;
+	case ELF_GR_OFFSET(2):
+	case ELF_GR_OFFSET(3):
+		ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
+		break;
+	case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
+		if (write_access) {
+			/* read NaT bit first: */
+			unsigned long dummy;
+
+			ret = unw_get_gr(info, addr/8, &dummy, &nat);
+			if (ret < 0)
+				return ret;
+		}
+		return unw_access_gr(info, addr/8, data, &nat, write_access);
+	case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
+		ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
+		break;
+	case ELF_GR_OFFSET(12):
+	case ELF_GR_OFFSET(13):
+		ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
+		break;
+	case ELF_GR_OFFSET(14):
+		ptr = &pt->r14;
+		break;
+	case ELF_GR_OFFSET(15):
+		ptr = &pt->r15;
+	}
+	if (write_access)
+		*ptr = *data;
+	else
+		*data = *ptr;
+	return 0;
+}
+
+static int
+access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
+		unsigned long addr, unsigned long *data, int write_access)
+{
+	struct pt_regs *pt;
+	unsigned long *ptr = NULL;
+
+	pt = task_pt_regs(target);
+	switch (addr) {
+	case ELF_BR_OFFSET(0):
+		ptr = &pt->b0;
+		break;
+	case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
+		return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
+				     data, write_access);
+	case ELF_BR_OFFSET(6):
+		ptr = &pt->b6;
+		break;
+	case ELF_BR_OFFSET(7):
+		ptr = &pt->b7;
+	}
+	if (write_access)
+		*ptr = *data;
+	else
+		*data = *ptr;
+	return 0;
+}
+
+static int
+access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
+		unsigned long addr, unsigned long *data, int write_access)
+{
+	struct pt_regs *pt;
+	unsigned long cfm, urbs_end;
+	unsigned long *ptr = NULL;
+
+	pt = task_pt_regs(target);
+	if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
+		switch (addr) {
+		case ELF_AR_RSC_OFFSET:
+			/* force PL3 */
+			if (write_access)
+				pt->ar_rsc = *data | (3 << 2);
+			else
+				*data = pt->ar_rsc;
+			return 0;
+		case ELF_AR_BSP_OFFSET:
+			/*
+			 * By convention, we use PT_AR_BSP to refer to
+			 * the end of the user-level backing store.
+			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
+			 * to get the real value of ar.bsp at the time
+			 * the kernel was entered.
+			 *
+			 * Furthermore, when changing the contents of
+			 * PT_AR_BSP (or PT_CFM) while the task is
+			 * blocked in a system call, convert the state
+			 * so that the non-system-call exit
+			 * path is used.  This ensures that the proper
+			 * state will be picked up when resuming
+			 * execution.  However, it *also* means that
+			 * once we write PT_AR_BSP/PT_CFM, it won't be
+			 * possible to modify the syscall arguments of
+			 * the pending system call any longer.  This
+			 * shouldn't be an issue because modifying
+			 * PT_AR_BSP/PT_CFM generally implies that
+			 * we're either abandoning the pending system
+			 * call or that we defer it's re-execution
+			 * (e.g., due to GDB doing an inferior
+			 * function call).
+			 */
+			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
+			if (write_access) {
+				if (*data != urbs_end) {
+					if (in_syscall(pt))
+						convert_to_non_syscall(target,
+								       pt,
+								       cfm);
+					/*
+					 * Simulate user-level write
+					 * of ar.bsp:
+					 */
+					pt->loadrs = 0;
+					pt->ar_bspstore = *data;
+				}
+			} else
+				*data = urbs_end;
+			return 0;
+		case ELF_AR_BSPSTORE_OFFSET:
+			ptr = &pt->ar_bspstore;
+			break;
+		case ELF_AR_RNAT_OFFSET:
+			ptr = &pt->ar_rnat;
+			break;
+		case ELF_AR_CCV_OFFSET:
+			ptr = &pt->ar_ccv;
+			break;
+		case ELF_AR_UNAT_OFFSET:
+			ptr = &pt->ar_unat;
+			break;
+		case ELF_AR_FPSR_OFFSET:
+			ptr = &pt->ar_fpsr;
+			break;
+		case ELF_AR_PFS_OFFSET:
+			ptr = &pt->ar_pfs;
+			break;
+		case ELF_AR_LC_OFFSET:
+			return unw_access_ar(info, UNW_AR_LC, data,
+					     write_access);
+		case ELF_AR_EC_OFFSET:
+			return unw_access_ar(info, UNW_AR_EC, data,
+					     write_access);
+		case ELF_AR_CSD_OFFSET:
+			ptr = &pt->ar_csd;
+			break;
+		case ELF_AR_SSD_OFFSET:
+			ptr = &pt->ar_ssd;
+		}
+	} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
+		switch (addr) {
+		case ELF_CR_IIP_OFFSET:
+			ptr = &pt->cr_iip;
+			break;
+		case ELF_CFM_OFFSET:
+			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
+			if (write_access) {
+				if (((cfm ^ *data) & PFM_MASK) != 0) {
+					if (in_syscall(pt))
+						convert_to_non_syscall(target,
+								       pt,
+								       cfm);
+					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
+						      | (*data & PFM_MASK));
+				}
+			} else
+				*data = cfm;
+			return 0;
+		case ELF_CR_IPSR_OFFSET:
+			if (write_access) {
+				unsigned long tmp = *data;
+				/* psr.ri==3 is a reserved value: SDM 2:25 */
+				if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
+					tmp &= ~IA64_PSR_RI;
+				pt->cr_ipsr = ((tmp & IPSR_MASK)
+					       | (pt->cr_ipsr & ~IPSR_MASK));
+			} else
+				*data = (pt->cr_ipsr & IPSR_MASK);
+			return 0;
+		}
+	} else if (addr == ELF_NAT_OFFSET)
+		return access_nat_bits(target, pt, info,
+				       data, write_access);
+	else if (addr == ELF_PR_OFFSET)
+		ptr = &pt->pr;
+	else
+		return -1;
+
+	if (write_access)
+		*ptr = *data;
+	else
+		*data = *ptr;
+
+	return 0;
+}
+
+static int
+access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
+		unsigned long addr, unsigned long *data, int write_access)
+{
+	if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
+		return access_elf_gpreg(target, info, addr, data, write_access);
+	else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
+		return access_elf_breg(target, info, addr, data, write_access);
+	else
+		return access_elf_areg(target, info, addr, data, write_access);
+}
+
+void do_gpregs_get(struct unw_frame_info *info, void *arg)
+{
+	struct pt_regs *pt;
+	struct regset_getset *dst = arg;
+	elf_greg_t tmp[16];
+	unsigned int i, index, min_copy;
+
+	if (unw_unwind_to_user(info) < 0)
+		return;
+
+	/*
+	 * coredump format:
+	 *      r0-r31
+	 *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
+	 *      predicate registers (p0-p63)
+	 *      b0-b7
+	 *      ip cfm user-mask
+	 *      ar.rsc ar.bsp ar.bspstore ar.rnat
+	 *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
+	 */
+
+
+	/* Skip r0 */
+	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
+		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
+						      &dst->u.get.kbuf,
+						      &dst->u.get.ubuf,
+						      0, ELF_GR_OFFSET(1));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+
+	/* gr1 - gr15 */
+	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
+		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
+		min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
+			 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
+		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
+				index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 0) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
+				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+
+	/* r16-r31 */
+	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
+		pt = task_pt_regs(dst->target);
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
+				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+
+	/* nat, pr, b0 - b7 */
+	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
+		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
+		min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
+			 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
+		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
+				index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 0) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
+				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+
+	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
+	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
+	 */
+	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
+		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
+		min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
+			 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
+		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
+				index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 0) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
+				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
+	}
+}
+
+void do_gpregs_set(struct unw_frame_info *info, void *arg)
+{
+	struct pt_regs *pt;
+	struct regset_getset *dst = arg;
+	elf_greg_t tmp[16];
+	unsigned int i, index;
+
+	if (unw_unwind_to_user(info) < 0)
+		return;
+
+	/* Skip r0 */
+	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
+		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
+						       &dst->u.set.kbuf,
+						       &dst->u.set.ubuf,
+						       0, ELF_GR_OFFSET(1));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+
+	/* gr1-gr15 */
+	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
+		i = dst->pos;
+		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
+				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
+		if (dst->ret)
+			return;
+		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 1) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		if (dst->count == 0)
+			return;
+	}
+
+	/* gr16-gr31 */
+	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
+		pt = task_pt_regs(dst->target);
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
+				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+
+	/* nat, pr, b0 - b7 */
+	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
+		i = dst->pos;
+		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
+				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
+		if (dst->ret)
+			return;
+		for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 1) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		if (dst->count == 0)
+			return;
+	}
+
+	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
+	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
+	 */
+	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
+		i = dst->pos;
+		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
+				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
+		if (dst->ret)
+			return;
+		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 1) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+	}
+}
+
+#define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t))
+
+void do_fpregs_get(struct unw_frame_info *info, void *arg)
+{
+	struct regset_getset *dst = arg;
+	struct task_struct *task = dst->target;
+	elf_fpreg_t tmp[30];
+	int index, min_copy, i;
+
+	if (unw_unwind_to_user(info) < 0)
+		return;
+
+	/* Skip pos 0 and 1 */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
+		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
+						      &dst->u.get.kbuf,
+						      &dst->u.get.ubuf,
+						      0, ELF_FP_OFFSET(2));
+		if (dst->count == 0 || dst->ret)
+			return;
+	}
+
+	/* fr2-fr31 */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
+		index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
+
+		min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
+				dst->pos + dst->count);
+		for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
+				index++)
+			if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
+					 &tmp[index])) {
+				dst->ret = -EIO;
+				return;
+			}
+		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
+				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
+		if (dst->count == 0 || dst->ret)
+			return;
+	}
+
+	/* fph */
+	if (dst->count > 0) {
+		ia64_flush_fph(dst->target);
+		if (task->thread.flags & IA64_THREAD_FPH_VALID)
+			dst->ret = user_regset_copyout(
+				&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				&dst->target->thread.fph,
+				ELF_FP_OFFSET(32), -1);
+		else
+			/* Zero fill instead.  */
+			dst->ret = user_regset_copyout_zero(
+				&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				ELF_FP_OFFSET(32), -1);
+	}
+}
+
+void do_fpregs_set(struct unw_frame_info *info, void *arg)
+{
+	struct regset_getset *dst = arg;
+	elf_fpreg_t fpreg, tmp[30];
+	int index, start, end;
+
+	if (unw_unwind_to_user(info) < 0)
+		return;
+
+	/* Skip pos 0 and 1 */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
+		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
+						       &dst->u.set.kbuf,
+						       &dst->u.set.ubuf,
+						       0, ELF_FP_OFFSET(2));
+		if (dst->count == 0 || dst->ret)
+			return;
+	}
+
+	/* fr2-fr31 */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
+		start = dst->pos;
+		end = min(((unsigned int)ELF_FP_OFFSET(32)),
+			 dst->pos + dst->count);
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
+				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
+		if (dst->ret)
+			return;
+
+		if (start & 0xF) { /* only write high part */
+			if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
+					 &fpreg)) {
+				dst->ret = -EIO;
+				return;
+			}
+			tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
+				= fpreg.u.bits[0];
+			start &= ~0xFUL;
+		}
+		if (end & 0xF) { /* only write low part */
+			if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
+					&fpreg)) {
+				dst->ret = -EIO;
+				return;
+			}
+			tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
+				= fpreg.u.bits[1];
+			end = (end + 0xF) & ~0xFUL;
+		}
+
+		for ( ;	start < end ; start += sizeof(elf_fpreg_t)) {
+			index = start / sizeof(elf_fpreg_t);
+			if (unw_set_fr(info, index, tmp[index - 2])) {
+				dst->ret = -EIO;
+				return;
+			}
+		}
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+
+	/* fph */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
+		ia64_sync_fph(dst->target);
+		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
+						&dst->u.set.kbuf,
+						&dst->u.set.ubuf,
+						&dst->target->thread.fph,
+						ELF_FP_OFFSET(32), -1);
+	}
+}
+
+static int
+do_regset_call(void (*call)(struct unw_frame_info *, void *),
+	       struct task_struct *target,
+	       const struct user_regset *regset,
+	       unsigned int pos, unsigned int count,
+	       const void *kbuf, const void __user *ubuf)
+{
+	struct regset_getset info = { .target = target, .regset = regset,
+				 .pos = pos, .count = count,
+				 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
+				 .ret = 0 };
+
+	if (target == current)
+		unw_init_running(call, &info);
+	else {
+		struct unw_frame_info ufi;
+		memset(&ufi, 0, sizeof(ufi));
+		unw_init_from_blocked_task(&ufi, target);
+		(*call)(&ufi, &info);
+	}
+
+	return info.ret;
+}
+
+static int
+gpregs_get(struct task_struct *target,
+	   const struct user_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   void *kbuf, void __user *ubuf)
+{
+	return do_regset_call(do_gpregs_get, target, regset, pos, count,
+		kbuf, ubuf);
+}
+
+static int gpregs_set(struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	return do_regset_call(do_gpregs_set, target, regset, pos, count,
+		kbuf, ubuf);
+}
+
+static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
+{
+	do_sync_rbs(info, ia64_sync_user_rbs);
+}
+
+/*
+ * This is called to write back the register backing store.
+ * ptrace does this before it stops, so that a tracer reading the user
+ * memory after the thread stops will get the current register data.
+ */
+static int
+gpregs_writeback(struct task_struct *target,
+		 const struct user_regset *regset,
+		 int now)
+{
+	if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
+		return 0;
+	tsk_set_notify_resume(target);
+	return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
+		NULL, NULL);
+}
+
+static int
+fpregs_active(struct task_struct *target, const struct user_regset *regset)
+{
+	return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
+}
+
+static int fpregs_get(struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	return do_regset_call(do_fpregs_get, target, regset, pos, count,
+		kbuf, ubuf);
+}
+
+static int fpregs_set(struct task_struct *target,
+		const struct user_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	return do_regset_call(do_fpregs_set, target, regset, pos, count,
+		kbuf, ubuf);
+}
+
+static int
+access_uarea(struct task_struct *child, unsigned long addr,
+	      unsigned long *data, int write_access)
+{
+	unsigned int pos = -1; /* an invalid value */
+	int ret;
+	unsigned long *ptr, regnum;
+
+	if ((addr & 0x7) != 0) {
+		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
+		return -1;
+	}
+	if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
+		(addr >= PT_R7 + 8 && addr < PT_B1) ||
+		(addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
+		(addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
+		dprintk("ptrace: rejecting access to register "
+					"address 0x%lx\n", addr);
+		return -1;
+	}
+
+	switch (addr) {
+	case PT_F32 ... (PT_F127 + 15):
+		pos = addr - PT_F32 + ELF_FP_OFFSET(32);
+		break;
+	case PT_F2 ... (PT_F5 + 15):
+		pos = addr - PT_F2 + ELF_FP_OFFSET(2);
+		break;
+	case PT_F10 ... (PT_F31 + 15):
+		pos = addr - PT_F10 + ELF_FP_OFFSET(10);
+		break;
+	case PT_F6 ... (PT_F9 + 15):
+		pos = addr - PT_F6 + ELF_FP_OFFSET(6);
+		break;
+	}
+
+	if (pos != -1) {
+		if (write_access)
+			ret = fpregs_set(child, NULL, pos,
+				sizeof(unsigned long), data, NULL);
+		else
+			ret = fpregs_get(child, NULL, pos,
+				sizeof(unsigned long), data, NULL);
+		if (ret != 0)
+			return -1;
+		return 0;
+	}
+
+	switch (addr) {
+	case PT_NAT_BITS:
+		pos = ELF_NAT_OFFSET;
+		break;
+	case PT_R4 ... PT_R7:
+		pos = addr - PT_R4 + ELF_GR_OFFSET(4);
+		break;
+	case PT_B1 ... PT_B5:
+		pos = addr - PT_B1 + ELF_BR_OFFSET(1);
+		break;
+	case PT_AR_EC:
+		pos = ELF_AR_EC_OFFSET;
+		break;
+	case PT_AR_LC:
+		pos = ELF_AR_LC_OFFSET;
+		break;
+	case PT_CR_IPSR:
+		pos = ELF_CR_IPSR_OFFSET;
+		break;
+	case PT_CR_IIP:
+		pos = ELF_CR_IIP_OFFSET;
+		break;
+	case PT_CFM:
+		pos = ELF_CFM_OFFSET;
+		break;
+	case PT_AR_UNAT:
+		pos = ELF_AR_UNAT_OFFSET;
+		break;
+	case PT_AR_PFS:
+		pos = ELF_AR_PFS_OFFSET;
+		break;
+	case PT_AR_RSC:
+		pos = ELF_AR_RSC_OFFSET;
+		break;
+	case PT_AR_RNAT:
+		pos = ELF_AR_RNAT_OFFSET;
+		break;
+	case PT_AR_BSPSTORE:
+		pos = ELF_AR_BSPSTORE_OFFSET;
+		break;
+	case PT_PR:
+		pos = ELF_PR_OFFSET;
+		break;
+	case PT_B6:
+		pos = ELF_BR_OFFSET(6);
+		break;
+	case PT_AR_BSP:
+		pos = ELF_AR_BSP_OFFSET;
+		break;
+	case PT_R1 ... PT_R3:
+		pos = addr - PT_R1 + ELF_GR_OFFSET(1);
+		break;
+	case PT_R12 ... PT_R15:
+		pos = addr - PT_R12 + ELF_GR_OFFSET(12);
+		break;
+	case PT_R8 ... PT_R11:
+		pos = addr - PT_R8 + ELF_GR_OFFSET(8);
+		break;
+	case PT_R16 ... PT_R31:
+		pos = addr - PT_R16 + ELF_GR_OFFSET(16);
+		break;
+	case PT_AR_CCV:
+		pos = ELF_AR_CCV_OFFSET;
+		break;
+	case PT_AR_FPSR:
+		pos = ELF_AR_FPSR_OFFSET;
+		break;
+	case PT_B0:
+		pos = ELF_BR_OFFSET(0);
+		break;
+	case PT_B7:
+		pos = ELF_BR_OFFSET(7);
+		break;
+	case PT_AR_CSD:
+		pos = ELF_AR_CSD_OFFSET;
+		break;
+	case PT_AR_SSD:
+		pos = ELF_AR_SSD_OFFSET;
+		break;
+	}
+
+	if (pos != -1) {
+		if (write_access)
+			ret = gpregs_set(child, NULL, pos,
+				sizeof(unsigned long), data, NULL);
+		else
+			ret = gpregs_get(child, NULL, pos,
+				sizeof(unsigned long), data, NULL);
+		if (ret != 0)
+			return -1;
+		return 0;
+	}
+
+	/* access debug registers */
+	if (addr >= PT_IBR) {
+		regnum = (addr - PT_IBR) >> 3;
+		ptr = &child->thread.ibr[0];
+	} else {
+		regnum = (addr - PT_DBR) >> 3;
+		ptr = &child->thread.dbr[0];
+	}
+
+	if (regnum >= 8) {
+		dprintk("ptrace: rejecting access to register "
+				"address 0x%lx\n", addr);
+		return -1;
+	}
+#ifdef CONFIG_PERFMON
+	/*
+	 * Check if debug registers are used by perfmon. This
+	 * test must be done once we know that we can do the
+	 * operation, i.e. the arguments are all valid, but
+	 * before we start modifying the state.
+	 *
+	 * Perfmon needs to keep a count of how many processes
+	 * are trying to modify the debug registers for system
+	 * wide monitoring sessions.
+	 *
+	 * We also include read access here, because they may
+	 * cause the PMU-installed debug register state
+	 * (dbr[], ibr[]) to be reset. The two arrays are also
+	 * used by perfmon, but we do not use
+	 * IA64_THREAD_DBG_VALID. The registers are restored
+	 * by the PMU context switch code.
+	 */
+	if (pfm_use_debug_registers(child))
+		return -1;
+#endif
+
+	if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
+		child->thread.flags |= IA64_THREAD_DBG_VALID;
+		memset(child->thread.dbr, 0,
+				sizeof(child->thread.dbr));
+		memset(child->thread.ibr, 0,
+				sizeof(child->thread.ibr));
+	}
+
+	ptr += regnum;
+
+	if ((regnum & 1) && write_access) {
+		/* don't let the user set kernel-level breakpoints: */
+		*ptr = *data & ~(7UL << 56);
+		return 0;
+	}
+	if (write_access)
+		*ptr = *data;
+	else
+		*data = *ptr;
+	return 0;
+}
+
+static const struct user_regset native_regsets[] = {
+	{
+		.core_note_type = NT_PRSTATUS,
+		.n = ELF_NGREG,
+		.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
+		.get = gpregs_get, .set = gpregs_set,
+		.writeback = gpregs_writeback
+	},
+	{
+		.core_note_type = NT_PRFPREG,
+		.n = ELF_NFPREG,
+		.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
+		.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
+	},
+};
+
+static const struct user_regset_view user_ia64_view = {
+	.name = "ia64",
+	.e_machine = EM_IA_64,
+	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
+{
+#ifdef CONFIG_IA32_SUPPORT
+	extern const struct user_regset_view user_ia32_view;
+	if (IS_IA32_PROCESS(task_pt_regs(tsk)))
+		return &user_ia32_view;
+#endif
+	return &user_ia64_view;
+}
diff --git a/arch/ia64/kernel/semaphore.c b/arch/ia64/kernel/semaphore.c
deleted file mode 100644
index 2724ef3..0000000
--- a/arch/ia64/kernel/semaphore.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * IA-64 semaphore implementation (derived from x86 version).
- *
- * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
- *	David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-/*
- * Semaphores are implemented using a two-way counter: The "count"
- * variable is decremented for each process that tries to acquire the
- * semaphore, while the "sleepers" variable is a count of such
- * acquires.
- *
- * Notably, the inline "up()" and "down()" functions can efficiently
- * test if they need to do any extra work (up needs to do something
- * only if count was negative before the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is contention
- * on the lock, and as such all this is the "non-critical" part of the
- * whole semaphore business. The critical part is the inline stuff in
- * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
- */
-#include <linux/sched.h>
-#include <linux/init.h>
-
-#include <asm/errno.h>
-#include <asm/semaphore.h>
-
-/*
- * Logic:
- *  - Only on a boundary condition do we need to care. When we go
- *    from a negative count to a non-negative, we wake people up.
- *  - When we go from a non-negative count to a negative do we
- *    (a) synchronize with the "sleepers" count and (b) make sure
- *    that we're on the wakeup list before we synchronize so that
- *    we cannot lose wakeup events.
- */
-
-void
-__up (struct semaphore *sem)
-{
-	wake_up(&sem->wait);
-}
-
-void __sched __down (struct semaphore *sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	unsigned long flags;
-
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	spin_lock_irqsave(&sem->wait.lock, flags);
-	add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
-	sem->sleepers++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock in
-		 * the wait_queue_head.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-		schedule();
-
-		spin_lock_irqsave(&sem->wait.lock, flags);
-		tsk->state = TASK_UNINTERRUPTIBLE;
-	}
-	remove_wait_queue_locked(&sem->wait, &wait);
-	wake_up_locked(&sem->wait);
-	spin_unlock_irqrestore(&sem->wait.lock, flags);
-	tsk->state = TASK_RUNNING;
-}
-
-int __sched __down_interruptible (struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	unsigned long flags;
-
-	tsk->state = TASK_INTERRUPTIBLE;
-	spin_lock_irqsave(&sem->wait.lock, flags);
-	add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
-	sem->sleepers ++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * With signals pending, this turns into
-		 * the trylock failure case - we won't be
-		 * sleeping, and we* can't get the lock as
-		 * it has contention. Just correct the count
-		 * and exit.
-		 */
-		if (signal_pending(current)) {
-			retval = -EINTR;
-			sem->sleepers = 0;
-			atomic_add(sleepers, &sem->count);
-			break;
-		}
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock in
-		 * wait_queue_head. The "-1" is because we're
-		 * still hoping to get the semaphore.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-		schedule();
-
-		spin_lock_irqsave(&sem->wait.lock, flags);
-		tsk->state = TASK_INTERRUPTIBLE;
-	}
-	remove_wait_queue_locked(&sem->wait, &wait);
-	wake_up_locked(&sem->wait);
-	spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-	tsk->state = TASK_RUNNING;
-	return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for having decremented the
- * count.
- */
-int
-__down_trylock (struct semaphore *sem)
-{
-	unsigned long flags;
-	int sleepers;
-
-	spin_lock_irqsave(&sem->wait.lock, flags);
-	sleepers = sem->sleepers + 1;
-	sem->sleepers = 0;
-
-	/*
-	 * Add "everybody else" and us into it. They aren't
-	 * playing, because we own the spinlock in the
-	 * wait_queue_head.
-	 */
-	if (!atomic_add_negative(sleepers, &sem->count)) {
-		wake_up_locked(&sem->wait);
-	}
-
-	spin_unlock_irqrestore(&sem->wait.lock, flags);
-	return 1;
-}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 4aa9eae..5015ca1 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -59,6 +59,7 @@
 #include <asm/setup.h>
 #include <asm/smp.h>
 #include <asm/system.h>
+#include <asm/tlbflush.h>
 #include <asm/unistd.h>
 #include <asm/hpsim.h>
 
@@ -176,6 +177,29 @@
 	return 0;
 }
 
+/*
+ * Similar to "filter_rsvd_memory()", but the reserved memory ranges
+ * are not filtered out.
+ */
+int __init
+filter_memory(unsigned long start, unsigned long end, void *arg)
+{
+	void (*func)(unsigned long, unsigned long, int);
+
+#if IGNORE_PFN0
+	if (start == PAGE_OFFSET) {
+		printk(KERN_WARNING "warning: skipping physical page 0\n");
+		start += PAGE_SIZE;
+		if (start >= end)
+			return 0;
+	}
+#endif
+	func = arg;
+	if (start < end)
+		call_pernode_memory(__pa(start), end - start, func);
+	return 0;
+}
+
 static void __init
 sort_regions (struct rsvd_region *rsvd_region, int max)
 {
@@ -493,6 +517,8 @@
 	acpi_table_init();
 # ifdef CONFIG_ACPI_NUMA
 	acpi_numa_init();
+	per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
+		32 : cpus_weight(early_cpu_possible_map)), additional_cpus);
 # endif
 #else
 # ifdef CONFIG_SMP
@@ -946,9 +972,10 @@
 #endif
 
 	/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
-	if (ia64_pal_vm_summary(NULL, &vmi) == 0)
+	if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
 		max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
-	else {
+		setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
+	} else {
 		printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
 		max_ctx = (1U << 15) - 1;	/* use architected minimum */
 	}
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 4e446aa..9a9d4c4 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -213,6 +213,19 @@
  * Called with preemption disabled.
  */
 static inline void
+send_IPI_mask(cpumask_t mask, int op)
+{
+	unsigned int cpu;
+
+	for_each_cpu_mask(cpu, mask) {
+			send_IPI_single(cpu, op);
+	}
+}
+
+/*
+ * Called with preemption disabled.
+ */
+static inline void
 send_IPI_all (int op)
 {
 	int i;
@@ -401,6 +414,75 @@
 }
 EXPORT_SYMBOL(smp_call_function_single);
 
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * <mask>	The set of cpus to run on.  Must not include the current cpu.
+ * <func> 	The function to run. This must be fast and non-blocking.
+ * <info>	An arbitrary pointer to pass to the function.
+ * <wait>	If true, wait (atomically) until function
+ *		has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask,
+			   void (*func)(void *), void *info,
+			   int wait)
+{
+	struct call_data_struct data;
+	cpumask_t allbutself;
+	int cpus;
+
+	spin_lock(&call_lock);
+	allbutself = cpu_online_map;
+	cpu_clear(smp_processor_id(), allbutself);
+
+	cpus_and(mask, mask, allbutself);
+	cpus = cpus_weight(mask);
+	if (!cpus) {
+		spin_unlock(&call_lock);
+		return 0;
+	}
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
+	data.func = func;
+	data.info = info;
+	atomic_set(&data.started, 0);
+	data.wait = wait;
+	if (wait)
+		atomic_set(&data.finished, 0);
+
+	call_data = &data;
+	mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
+
+	/* Send a message to other CPUs */
+	if (cpus_equal(mask, allbutself))
+		send_IPI_allbutself(IPI_CALL_FUNC);
+	else
+		send_IPI_mask(mask, IPI_CALL_FUNC);
+
+	/* Wait for response */
+	while (atomic_read(&data.started) != cpus)
+		cpu_relax();
+
+	if (wait)
+		while (atomic_read(&data.finished) != cpus)
+			cpu_relax();
+	call_data = NULL;
+
+	spin_unlock(&call_lock);
+	return 0;
+
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
 /*
  * this function sends a 'generic call function' IPI to all other CPUs
  * in the system.
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 32ee597..16483be 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -400,9 +400,9 @@
 	/* Setup the per cpu irq handling data structures */
 	__setup_vector_irq(cpuid);
 	cpu_set(cpuid, cpu_online_map);
-	unlock_ipi_calllock();
 	per_cpu(cpu_state, cpuid) = CPU_ONLINE;
 	spin_unlock(&vector_lock);
+	unlock_ipi_calllock();
 
 	smp_setup_percpu_timer();
 
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 17fda52..48e15a5 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -59,6 +59,84 @@
 };
 static struct clocksource *itc_clocksource;
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+
+#include <linux/kernel_stat.h>
+
+extern cputime_t cycle_to_cputime(u64 cyc);
+
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
+{
+	struct thread_info *pi = task_thread_info(prev);
+	struct thread_info *ni = task_thread_info(next);
+	cputime_t delta_stime, delta_utime;
+	__u64 now;
+
+	now = ia64_get_itc();
+
+	delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
+	account_system_time(prev, 0, delta_stime);
+	account_system_time_scaled(prev, delta_stime);
+
+	if (pi->ac_utime) {
+		delta_utime = cycle_to_cputime(pi->ac_utime);
+		account_user_time(prev, delta_utime);
+		account_user_time_scaled(prev, delta_utime);
+	}
+
+	pi->ac_stamp = ni->ac_stamp = now;
+	ni->ac_stime = ni->ac_utime = 0;
+}
+
+/*
+ * Account time for a transition between system, hard irq or soft irq state.
+ * Note that this function is called with interrupts enabled.
+ */
+void account_system_vtime(struct task_struct *tsk)
+{
+	struct thread_info *ti = task_thread_info(tsk);
+	unsigned long flags;
+	cputime_t delta_stime;
+	__u64 now;
+
+	local_irq_save(flags);
+
+	now = ia64_get_itc();
+
+	delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
+	account_system_time(tsk, 0, delta_stime);
+	account_system_time_scaled(tsk, delta_stime);
+	ti->ac_stime = 0;
+
+	ti->ac_stamp = now;
+
+	local_irq_restore(flags);
+}
+
+/*
+ * Called from the timer interrupt handler to charge accumulated user time
+ * to the current process.  Must be called with interrupts disabled.
+ */
+void account_process_tick(struct task_struct *p, int user_tick)
+{
+	struct thread_info *ti = task_thread_info(p);
+	cputime_t delta_utime;
+
+	if (ti->ac_utime) {
+		delta_utime = cycle_to_cputime(ti->ac_utime);
+		account_user_time(p, delta_utime);
+		account_user_time_scaled(p, delta_utime);
+		ti->ac_utime = 0;
+	}
+}
+
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+
 static irqreturn_t
 timer_interrupt (int irq, void *dev_id)
 {
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 6903361..ff0e7c1 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -13,6 +13,7 @@
  * 2001/08/13	Correct size of extended floats (float_fsz) from 16 to 10 bytes.
  * 2001/01/17	Add support emulation of unaligned kernel accesses.
  */
+#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/tty.h>
@@ -1290,7 +1291,7 @@
 {
 	static unsigned long count, last_time;
 
-	if (jiffies - last_time > 5*HZ)
+	if (time_after(jiffies, last_time + 5 * HZ))
 		count = 0;
 	if (count < 5) {
 		last_time = jiffies;
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 344f64e..798bf98 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -45,8 +45,6 @@
 
 	printk(KERN_INFO "Mem-info:\n");
 	show_free_areas();
-	printk(KERN_INFO "Free swap:       %6ldkB\n",
-	       nr_swap_pages<<(PAGE_SHIFT-10));
 	printk(KERN_INFO "Node memory in pages:\n");
 	for_each_online_pgdat(pgdat) {
 		unsigned long present;
@@ -255,7 +253,7 @@
 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-	efi_memmap_walk(register_active_ranges, NULL);
+	efi_memmap_walk(filter_memory, register_active_ranges);
 	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
 	if (max_gap < LARGE_GAP) {
 		vmem_map = (struct page *) 0;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index ee5e68b..544dc42 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -104,7 +104,7 @@
 {
 	int cpu, n = 0;
 
-	for (cpu = 0; cpu < NR_CPUS; cpu++)
+	for_each_possible_early_cpu(cpu)
 		if (node == node_cpuid[cpu].nid)
 			n++;
 
@@ -124,6 +124,7 @@
 	pernodesize += node * L1_CACHE_BYTES;
 	pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
 	pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
+	pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
 	pernodesize = PAGE_ALIGN(pernodesize);
 	return pernodesize;
 }
@@ -142,7 +143,7 @@
 #ifdef CONFIG_SMP
 	int cpu;
 
-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+	for_each_possible_early_cpu(cpu) {
 		if (node == node_cpuid[cpu].nid) {
 			memcpy(__va(cpu_data), __phys_per_cpu_start,
 			       __per_cpu_end - __per_cpu_start);
@@ -345,7 +346,7 @@
 
 #ifdef CONFIG_SMP
 	/* Set the node_data pointer for each per-cpu struct */
-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+	for_each_possible_early_cpu(cpu) {
 		node = node_cpuid[cpu].nid;
 		per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
 	}
@@ -444,7 +445,7 @@
 			mem_data[node].min_pfn = ~0UL;
 		}
 
-	efi_memmap_walk(register_active_ranges, NULL);
+	efi_memmap_walk(filter_memory, register_active_ranges);
 
 	/*
 	 * Initialize the boot memory maps in reverse order since that's
@@ -493,13 +494,9 @@
 	int cpu;
 	static int first_time = 1;
 
-
-	if (smp_processor_id() != 0)
-		return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-
 	if (first_time) {
 		first_time = 0;
-		for (cpu = 0; cpu < NR_CPUS; cpu++)
+		for_each_possible_early_cpu(cpu)
 			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
 	}
 
@@ -522,8 +519,6 @@
 
 	printk(KERN_INFO "Mem-info:\n");
 	show_free_areas();
-	printk(KERN_INFO "Free swap:       %6ldkB\n",
-	       nr_swap_pages<<(PAGE_SHIFT-10));
 	printk(KERN_INFO "Node memory in pages:\n");
 	for_each_online_pgdat(pgdat) {
 		unsigned long present;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index a4ca657..5c1de53 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -58,7 +58,6 @@
 {
 	unsigned long addr;
 	struct page *page;
-	unsigned long order;
 
 	page = pte_page(pte);
 	addr = (unsigned long) page_address(page);
@@ -66,12 +65,7 @@
 	if (test_bit(PG_arch_1, &page->flags))
 		return;				/* i-cache is already coherent with d-cache */
 
-	if (PageCompound(page)) {
-		order = compound_order(page);
-		flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
-	}
-	else
-		flush_icache_range(addr, addr + PAGE_SIZE);
+	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
 	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
 }
 
@@ -553,12 +547,10 @@
 #endif /* CONFIG_VIRTUAL_MEM_MAP */
 
 int __init
-register_active_ranges(u64 start, u64 end, void *arg)
+register_active_ranges(u64 start, u64 len, int nid)
 {
-	int nid = paddr_to_nid(__pa(start));
+	u64 end = start + len;
 
-	if (nid < 0)
-		nid = 0;
 #ifdef CONFIG_KEXEC
 	if (start > crashk_res.start && start < crashk_res.end)
 		start = crashk_res.end;
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 7807fc5..b73bf18 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -27,7 +27,9 @@
  */
 int num_node_memblks;
 struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
-struct node_cpuid_s node_cpuid[NR_CPUS];
+struct node_cpuid_s node_cpuid[NR_CPUS] =
+	{ [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } };
+
 /*
  * This is a matrix with "distances" between nodes, they should be
  * proportional to the memory access latency ratios.
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 655da24..d52ec4e 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -11,6 +11,9 @@
  * Rohit Seth <rohit.seth@intel.com>
  * Ken Chen <kenneth.w.chen@intel.com>
  * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
+ * Copyright (C) 2007 Intel Corp
+ *	Fenghua Yu <fenghua.yu@intel.com>
+ *	Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
  */
 #include <linux/module.h>
 #include <linux/init.h>
@@ -26,6 +29,9 @@
 #include <asm/pal.h>
 #include <asm/tlbflush.h>
 #include <asm/dma.h>
+#include <asm/processor.h>
+#include <asm/sal.h>
+#include <asm/tlb.h>
 
 static struct {
 	unsigned long mask;	/* mask of supported purge page-sizes */
@@ -39,6 +45,10 @@
 };
 
 DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
+DEFINE_PER_CPU(u8, ia64_tr_num);  /*Number of TR slots in current processor*/
+DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
+
+struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
 
 /*
  * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -84,14 +94,140 @@
 	local_flush_tlb_all();
 }
 
+/*
+ * Implement "spinaphores" ... like counting semaphores, but they
+ * spin instead of sleeping.  If there are ever any other users for
+ * this primitive it can be moved up to a spinaphore.h header.
+ */
+struct spinaphore {
+	atomic_t	cur;
+};
+
+static inline void spinaphore_init(struct spinaphore *ss, int val)
+{
+	atomic_set(&ss->cur, val);
+}
+
+static inline void down_spin(struct spinaphore *ss)
+{
+	while (unlikely(!atomic_add_unless(&ss->cur, -1, 0)))
+		while (atomic_read(&ss->cur) == 0)
+			cpu_relax();
+}
+
+static inline void up_spin(struct spinaphore *ss)
+{
+	atomic_add(1, &ss->cur);
+}
+
+static struct spinaphore ptcg_sem;
+static u16 nptcg = 1;
+static int need_ptcg_sem = 1;
+static int toolatetochangeptcgsem = 0;
+
+/*
+ * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
+ * purges which is reported from either PAL or SAL PALO.
+ *
+ * We don't have sanity checking for nptcg value. It's the user's responsibility
+ * for valid nptcg value on the platform. Otherwise, kernel may hang in some
+ * cases.
+ */
+static int __init
+set_nptcg(char *str)
+{
+	int value = 0;
+
+	get_option(&str, &value);
+	setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
+
+	return 1;
+}
+
+__setup("nptcg=", set_nptcg);
+
+/*
+ * Maximum number of simultaneous ptc.g purges in the system can
+ * be defined by PAL_VM_SUMMARY (in which case we should take
+ * the smallest value for any cpu in the system) or by the PAL
+ * override table (in which case we should ignore the value from
+ * PAL_VM_SUMMARY).
+ *
+ * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
+ * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
+ * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
+ *
+ * Complicating the logic here is the fact that num_possible_cpus()
+ * isn't fully setup until we start bringing cpus online.
+ */
+void
+setup_ptcg_sem(int max_purges, int nptcg_from)
+{
+	static int kp_override;
+	static int palo_override;
+	static int firstcpu = 1;
+
+	if (toolatetochangeptcgsem) {
+		BUG_ON(max_purges < nptcg);
+		return;
+	}
+
+	if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
+		kp_override = 1;
+		nptcg = max_purges;
+		goto resetsema;
+	}
+	if (kp_override) {
+		need_ptcg_sem = num_possible_cpus() > nptcg;
+		return;
+	}
+
+	if (nptcg_from == NPTCG_FROM_PALO) {
+		palo_override = 1;
+
+		/* In PALO max_purges == 0 really means it! */
+		if (max_purges == 0)
+			panic("Whoa! Platform does not support global TLB purges.\n");
+		nptcg = max_purges;
+		if (nptcg == PALO_MAX_TLB_PURGES) {
+			need_ptcg_sem = 0;
+			return;
+		}
+		goto resetsema;
+	}
+	if (palo_override) {
+		if (nptcg != PALO_MAX_TLB_PURGES)
+			need_ptcg_sem = (num_possible_cpus() > nptcg);
+		return;
+	}
+
+	/* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
+	if (max_purges == 0) max_purges = 1;
+
+	if (firstcpu) {
+		nptcg = max_purges;
+		firstcpu = 0;
+	}
+	if (max_purges < nptcg)
+		nptcg = max_purges;
+	if (nptcg == PAL_MAX_PURGES) {
+		need_ptcg_sem = 0;
+		return;
+	} else
+		need_ptcg_sem = (num_possible_cpus() > nptcg);
+
+resetsema:
+	spinaphore_init(&ptcg_sem, max_purges);
+}
+
 void
 ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
 		       unsigned long end, unsigned long nbits)
 {
-	static DEFINE_SPINLOCK(ptcg_lock);
-
 	struct mm_struct *active_mm = current->active_mm;
 
+	toolatetochangeptcgsem = 1;
+
 	if (mm != active_mm) {
 		/* Restore region IDs for mm */
 		if (mm && active_mm) {
@@ -102,19 +238,20 @@
 		}
 	}
 
-	/* HW requires global serialization of ptc.ga.  */
-	spin_lock(&ptcg_lock);
-	{
-		do {
-			/*
-			 * Flush ALAT entries also.
-			 */
-			ia64_ptcga(start, (nbits<<2));
-			ia64_srlz_i();
-			start += (1UL << nbits);
-		} while (start < end);
-	}
-	spin_unlock(&ptcg_lock);
+	if (need_ptcg_sem)
+		down_spin(&ptcg_sem);
+
+	do {
+		/*
+		 * Flush ALAT entries also.
+		 */
+		ia64_ptcga(start, (nbits << 2));
+		ia64_srlz_i();
+		start += (1UL << nbits);
+	} while (start < end);
+
+	if (need_ptcg_sem)
+		up_spin(&ptcg_sem);
 
         if (mm != active_mm) {
                 activate_context(active_mm);
@@ -190,6 +327,9 @@
 	ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
 	unsigned long tr_pgbits;
 	long status;
+	pal_vm_info_1_u_t vm_info_1;
+	pal_vm_info_2_u_t vm_info_2;
+	int cpu = smp_processor_id();
 
 	if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
 		printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
@@ -206,4 +346,191 @@
 	local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
 
 	local_flush_tlb_all();	/* nuke left overs from bootstrapping... */
+	status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
+
+	if (status) {
+		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
+		per_cpu(ia64_tr_num, cpu) = 8;
+		return;
+	}
+	per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
+	if (per_cpu(ia64_tr_num, cpu) >
+				(vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
+		per_cpu(ia64_tr_num, cpu) =
+				vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
+	if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
+		per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
+		printk(KERN_DEBUG "TR register number exceeds IA64_TR_ALLOC_MAX!"
+			"IA64_TR_ALLOC_MAX should be extended\n");
+	}
 }
+
+/*
+ * is_tr_overlap
+ *
+ * Check overlap with inserted TRs.
+ */
+static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
+{
+	u64 tr_log_size;
+	u64 tr_end;
+	u64 va_rr = ia64_get_rr(va);
+	u64 va_rid = RR_TO_RID(va_rr);
+	u64 va_end = va + (1<<log_size) - 1;
+
+	if (va_rid != RR_TO_RID(p->rr))
+		return 0;
+	tr_log_size = (p->itir & 0xff) >> 2;
+	tr_end = p->ifa + (1<<tr_log_size) - 1;
+
+	if (va > tr_end || p->ifa > va_end)
+		return 0;
+	return 1;
+
+}
+
+/*
+ * ia64_insert_tr in virtual mode. Allocate a TR slot
+ *
+ * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
+ *
+ * va 	: virtual address.
+ * pte 	: pte entries inserted.
+ * log_size: range to be covered.
+ *
+ * Return value:  <0 :  error No.
+ *
+ *		  >=0 : slot number allocated for TR.
+ * Must be called with preemption disabled.
+ */
+int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
+{
+	int i, r;
+	unsigned long psr;
+	struct ia64_tr_entry *p;
+	int cpu = smp_processor_id();
+
+	r = -EINVAL;
+	/*Check overlap with existing TR entries*/
+	if (target_mask & 0x1) {
+		p = &__per_cpu_idtrs[cpu][0][0];
+		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
+								i++, p++) {
+			if (p->pte & 0x1)
+				if (is_tr_overlap(p, va, log_size)) {
+					printk(KERN_DEBUG "Overlapped Entry"
+						"Inserted for TR Reigster!!\n");
+					goto out;
+			}
+		}
+	}
+	if (target_mask & 0x2) {
+		p = &__per_cpu_idtrs[cpu][1][0];
+		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
+								i++, p++) {
+			if (p->pte & 0x1)
+				if (is_tr_overlap(p, va, log_size)) {
+					printk(KERN_DEBUG "Overlapped Entry"
+						"Inserted for TR Reigster!!\n");
+					goto out;
+				}
+		}
+	}
+
+	for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
+		switch (target_mask & 0x3) {
+		case 1:
+			if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
+				goto found;
+			continue;
+		case 2:
+			if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+				goto found;
+			continue;
+		case 3:
+			if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
+				!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+				goto found;
+			continue;
+		default:
+			r = -EINVAL;
+			goto out;
+		}
+	}
+found:
+	if (i >= per_cpu(ia64_tr_num, cpu))
+		return -EBUSY;
+
+	/*Record tr info for mca hander use!*/
+	if (i > per_cpu(ia64_tr_used, cpu))
+		per_cpu(ia64_tr_used, cpu) = i;
+
+	psr = ia64_clear_ic();
+	if (target_mask & 0x1) {
+		ia64_itr(0x1, i, va, pte, log_size);
+		ia64_srlz_i();
+		p = &__per_cpu_idtrs[cpu][0][i];
+		p->ifa = va;
+		p->pte = pte;
+		p->itir = log_size << 2;
+		p->rr = ia64_get_rr(va);
+	}
+	if (target_mask & 0x2) {
+		ia64_itr(0x2, i, va, pte, log_size);
+		ia64_srlz_i();
+		p = &__per_cpu_idtrs[cpu][1][i];
+		p->ifa = va;
+		p->pte = pte;
+		p->itir = log_size << 2;
+		p->rr = ia64_get_rr(va);
+	}
+	ia64_set_psr(psr);
+	r = i;
+out:
+	return r;
+}
+EXPORT_SYMBOL_GPL(ia64_itr_entry);
+
+/*
+ * ia64_purge_tr
+ *
+ * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
+ * slot: slot number to be freed.
+ *
+ * Must be called with preemption disabled.
+ */
+void ia64_ptr_entry(u64 target_mask, int slot)
+{
+	int cpu = smp_processor_id();
+	int i;
+	struct ia64_tr_entry *p;
+
+	if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
+		return;
+
+	if (target_mask & 0x1) {
+		p = &__per_cpu_idtrs[cpu][0][slot];
+		if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
+			p->pte = 0;
+			ia64_ptr(0x1, p->ifa, p->itir>>2);
+			ia64_srlz_i();
+		}
+	}
+
+	if (target_mask & 0x2) {
+		p = &__per_cpu_idtrs[cpu][1][slot];
+		if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
+			p->pte = 0;
+			ia64_ptr(0x2, p->ifa, p->itir>>2);
+			ia64_srlz_i();
+		}
+	}
+
+	for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
+		if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
+				(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+			break;
+	}
+	per_cpu(ia64_tr_used, cpu) = i;
+}
+EXPORT_SYMBOL_GPL(ia64_ptr_entry);
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 81785b7..9e0b164 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -199,7 +199,7 @@
 	struct xpc_partition *part = (struct xpc_partition *) data;
 
 
-	DBUG_ON(jiffies < part->disengage_request_timeout);
+	DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
 
 	(void) xpc_partition_disengaged(part);
 
@@ -230,7 +230,7 @@
 {
 	xpc_vars->heartbeat++;
 
-	if (jiffies >= xpc_hb_check_timeout) {
+	if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
 		wake_up_interruptible(&xpc_act_IRQ_wq);
 	}
 
@@ -270,7 +270,7 @@
 
 
 		/* checking of remote heartbeats is skewed by IRQ handling */
-		if (jiffies >= xpc_hb_check_timeout) {
+		if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
 			dev_dbg(xpc_part, "checking remote heartbeats\n");
 			xpc_check_remote_hb();
 
@@ -305,7 +305,7 @@
 		/* wait for IRQ or timeout */
 		(void) wait_event_interruptible(xpc_act_IRQ_wq,
 			    (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
-					jiffies >= xpc_hb_check_timeout ||
+					time_after_eq(jiffies, xpc_hb_check_timeout) ||
 						(volatile int) xpc_exiting));
 	}
 
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 7ba4032..9e97c26 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -877,7 +877,7 @@
 	disengaged = (xpc_partition_engaged(1UL << partid) == 0);
 	if (part->disengage_request_timeout) {
 		if (!disengaged) {
-			if (jiffies < part->disengage_request_timeout) {
+			if (time_before(jiffies, part->disengage_request_timeout)) {
 				/* timelimit hasn't been reached yet */
 				return 0;
 			}
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
index e97e26e..09200d4 100644
--- a/arch/m32r/kernel/Makefile
+++ b/arch/m32r/kernel/Makefile
@@ -5,7 +5,7 @@
 extra-y	:= head.o init_task.o vmlinux.lds
 
 obj-y	:= process.o entry.o traps.o align.o irq.o setup.o time.o \
-	m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o
+	m32r_ksyms.o sys_m32r.o signal.o ptrace.o
 
 obj-$(CONFIG_SMP)		+= smp.o smpboot.o
 obj-$(CONFIG_MODULES)		+= module.o
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index 41a4c95..e6709fe 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -7,7 +7,6 @@
 #include <linux/interrupt.h>
 #include <linux/string.h>
 
-#include <asm/semaphore.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/checksum.h>
@@ -22,10 +21,6 @@
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
 EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down_trylock);
 
 /* Networking helper routines. */
 /* Delay loops */
diff --git a/arch/m32r/kernel/semaphore.c b/arch/m32r/kernel/semaphore.c
deleted file mode 100644
index 940c2d3..0000000
--- a/arch/m32r/kernel/semaphore.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- *  linux/arch/m32r/semaphore.c
- *    orig : i386 2.6.4
- *
- *  M32R semaphore implementation.
- *
- *	Copyright (c) 2002 - 2004 Hitoshi Yamamoto
- */
-
-/*
- * i386 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- *	This program is free software; you can redistribute it and/or
- *	modify it under the terms of the GNU General Public License
- *	as published by the Free Software Foundation; either version
- *	2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- *  - only on a boundary condition do we need to care. When we go
- *    from a negative count to a non-negative, we wake people up.
- *  - when we go from a non-negative count to a negative do we
- *    (a) synchronize with the "sleeper" count and (b) make sure
- *    that we're on the wakeup list before we synchronize so that
- *    we cannot lose wakeup events.
- */
-
-asmlinkage void __up(struct semaphore *sem)
-{
-	wake_up(&sem->wait);
-}
-
-asmlinkage void __sched __down(struct semaphore * sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	unsigned long flags;
-
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	spin_lock_irqsave(&sem->wait.lock, flags);
-	add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
-	sem->sleepers++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock in
-		 * the wait_queue_head.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-		schedule();
-
-		spin_lock_irqsave(&sem->wait.lock, flags);
-		tsk->state = TASK_UNINTERRUPTIBLE;
-	}
-	remove_wait_queue_locked(&sem->wait, &wait);
-	wake_up_locked(&sem->wait);
-	spin_unlock_irqrestore(&sem->wait.lock, flags);
-	tsk->state = TASK_RUNNING;
-}
-
-asmlinkage int __sched __down_interruptible(struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	unsigned long flags;
-
-	tsk->state = TASK_INTERRUPTIBLE;
-	spin_lock_irqsave(&sem->wait.lock, flags);
-	add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
-	sem->sleepers++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * With signals pending, this turns into
-		 * the trylock failure case - we won't be
-		 * sleeping, and we* can't get the lock as
-		 * it has contention. Just correct the count
-		 * and exit.
-		 */
-		if (signal_pending(current)) {
-			retval = -EINTR;
-			sem->sleepers = 0;
-			atomic_add(sleepers, &sem->count);
-			break;
-		}
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock in
-		 * wait_queue_head. The "-1" is because we're
-		 * still hoping to get the semaphore.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-		schedule();
-
-		spin_lock_irqsave(&sem->wait.lock, flags);
-		tsk->state = TASK_INTERRUPTIBLE;
-	}
-	remove_wait_queue_locked(&sem->wait, &wait);
-	wake_up_locked(&sem->wait);
-	spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-	tsk->state = TASK_RUNNING;
-	return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-asmlinkage int __down_trylock(struct semaphore * sem)
-{
-	int sleepers;
-	unsigned long flags;
-
-	spin_lock_irqsave(&sem->wait.lock, flags);
-	sleepers = sem->sleepers + 1;
-	sem->sleepers = 0;
-
-	/*
-	 * Add "everybody else" and us into it. They aren't
-	 * playing, because we own the spinlock in the
-	 * wait_queue_head.
-	 */
-	if (!atomic_add_negative(sleepers, &sem->count)) {
-		wake_up_locked(&sem->wait);
-	}
-
-	spin_unlock_irqrestore(&sem->wait.lock, flags);
-	return 1;
-}
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index a806208..7a62a71 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -10,7 +10,7 @@
 extra-y	+= vmlinux.lds
 
 obj-y	:= entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
-	   sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o
+	   sys_m68k.o time.o setup.o m68k_ksyms.o devres.o
 
 devres-y = ../../../kernel/irq/devres.o
 
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 6fc69c7..d900e77 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -1,5 +1,4 @@
 #include <linux/module.h>
-#include <asm/semaphore.h>
 
 asmlinkage long long __ashldi3 (long long, int);
 asmlinkage long long __ashrdi3 (long long, int);
@@ -15,8 +14,3 @@
 EXPORT_SYMBOL(__lshrdi3);
 EXPORT_SYMBOL(__muldi3);
 
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_failed_interruptible);
-EXPORT_SYMBOL(__down_failed_trylock);
-EXPORT_SYMBOL(__up_wakeup);
-
diff --git a/arch/m68k/kernel/semaphore.c b/arch/m68k/kernel/semaphore.c
deleted file mode 100644
index d12cbbf..0000000
--- a/arch/m68k/kernel/semaphore.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- *  Generic semaphore code. Buyer beware. Do your own
- * specific changes in <asm/semaphore-helper.h>
- */
-
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <asm/semaphore-helper.h>
-
-#ifndef CONFIG_RMW_INSNS
-spinlock_t semaphore_wake_lock;
-#endif
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
- *
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to
- * wake up and exit.  ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore.  The others will go back
- * to sleep.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-void __up(struct semaphore *sem)
-{
-	wake_one_more(sem);
-	wake_up(&sem->wait);
-}
-
-/*
- * Perform the "down" function.  Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from __down, the return is ignored and the wait loop is
- * not interruptible.  This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from __down_interruptible, the return value gets checked
- * upon return.  If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- *
- */
-
-
-#define DOWN_HEAD(task_state)						\
-									\
-									\
-	current->state = (task_state);					\
-	add_wait_queue(&sem->wait, &wait);				\
-									\
-	/*								\
-	 * Ok, we're set up.  sem->count is known to be less than zero	\
-	 * so we must wait.						\
-	 *								\
-	 * We can let go the lock for purposes of waiting.		\
-	 * We re-acquire it after awaking so as to protect		\
-	 * all semaphore operations.					\
-	 *								\
-	 * If "up()" is called before we call waking_non_zero() then	\
-	 * we will catch it right away.  If it is called later then	\
-	 * we will have to go through a wakeup cycle to catch it.	\
-	 *								\
-	 * Multiple waiters contend for the semaphore lock to see	\
-	 * who gets to gate through and who has to wait some more.	\
-	 */								\
-	for (;;) {
-
-#define DOWN_TAIL(task_state)			\
-		current->state = (task_state);	\
-	}					\
-	current->state = TASK_RUNNING;		\
-	remove_wait_queue(&sem->wait, &wait);
-
-void __sched __down(struct semaphore * sem)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
-	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
-	if (waking_non_zero(sem))
-		break;
-	schedule();
-	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	DECLARE_WAITQUEUE(wait, current);
-	int ret = 0;
-
-	DOWN_HEAD(TASK_INTERRUPTIBLE)
-
-	ret = waking_non_zero_interruptible(sem, current);
-	if (ret)
-	{
-		if (ret == 1)
-			/* ret != 0 only if we get interrupted -arca */
-			ret = 0;
-		break;
-	}
-	schedule();
-	DOWN_TAIL(TASK_INTERRUPTIBLE)
-	return ret;
-}
-
-int __down_trylock(struct semaphore * sem)
-{
-	return waking_non_zero_trylock(sem);
-}
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index 6bbf19f..a18af09 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -5,4 +5,4 @@
 EXTRA_AFLAGS := -traditional
 
 lib-y	:= ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
-	   checksum.o string.o semaphore.o uaccess.o
+	   checksum.o string.o uaccess.o
diff --git a/arch/m68k/lib/semaphore.S b/arch/m68k/lib/semaphore.S
deleted file mode 100644
index 0215624c..0000000
--- a/arch/m68k/lib/semaphore.S
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- *  linux/arch/m68k/lib/semaphore.S
- *
- *  Copyright (C) 1996  Linus Torvalds
- *
- *  m68k version by Andreas Schwab
- */
-
-#include <linux/linkage.h>
-#include <asm/semaphore.h>
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- */
-ENTRY(__down_failed)
-	moveml %a0/%d0/%d1,-(%sp)
-	movel %a1,-(%sp)
-	jbsr __down
-	movel (%sp)+,%a1
-	moveml (%sp)+,%a0/%d0/%d1
-	rts
-
-ENTRY(__down_failed_interruptible)
-	movel %a0,-(%sp)
-	movel %d1,-(%sp)
-	movel %a1,-(%sp)
-	jbsr __down_interruptible
-	movel (%sp)+,%a1
-	movel (%sp)+,%d1
-	movel (%sp)+,%a0
-	rts
-
-ENTRY(__down_failed_trylock)
-	movel %a0,-(%sp)
-	movel %d1,-(%sp)
-	movel %a1,-(%sp)
-	jbsr __down_trylock
-	movel (%sp)+,%a1
-	movel (%sp)+,%d1
-	movel (%sp)+,%a0
-	rts
-
-ENTRY(__up_wakeup)
-	moveml %a0/%d0/%d1,-(%sp)
-	movel %a1,-(%sp)
-	jbsr __up
-	movel (%sp)+,%a1
-	moveml (%sp)+,%a0/%d0/%d1
-	rts
-
diff --git a/arch/m68knommu/kernel/Makefile b/arch/m68knommu/kernel/Makefile
index 1524b39..f0eab3d 100644
--- a/arch/m68knommu/kernel/Makefile
+++ b/arch/m68knommu/kernel/Makefile
@@ -5,7 +5,7 @@
 extra-y := vmlinux.lds
 
 obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \
-	 semaphore.o setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
+	 setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
 
 obj-$(CONFIG_MODULES)	+= module.o
 obj-$(CONFIG_COMEMPCI)	+= comempci.o
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index 53fad14..39fe0a7 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -13,7 +13,6 @@
 #include <asm/pgalloc.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/semaphore.h>
 #include <asm/checksum.h>
 #include <asm/current.h>
 
@@ -39,11 +38,6 @@
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
 
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_failed_interruptible);
-EXPORT_SYMBOL(__down_failed_trylock);
-EXPORT_SYMBOL(__up_wakeup);
-
 /*
  * libgcc functions - functions that are used internally by the
  * compiler...  (prototypes are not correct though, but that
diff --git a/arch/m68knommu/kernel/semaphore.c b/arch/m68knommu/kernel/semaphore.c
deleted file mode 100644
index bce2bc7..0000000
--- a/arch/m68knommu/kernel/semaphore.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- *  Generic semaphore code. Buyer beware. Do your own
- * specific changes in <asm/semaphore-helper.h>
- */
-
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <asm/semaphore-helper.h>
-
-#ifndef CONFIG_RMW_INSNS
-spinlock_t semaphore_wake_lock;
-#endif
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
- *
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to
- * wake up and exit.  ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore.  The others will go back
- * to sleep.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-void __up(struct semaphore *sem)
-{
-	wake_one_more(sem);
-	wake_up(&sem->wait);
-}
-
-/*
- * Perform the "down" function.  Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from __down, the return is ignored and the wait loop is
- * not interruptible.  This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from __down_interruptible, the return value gets checked
- * upon return.  If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- *
- */
-
-
-#define DOWN_HEAD(task_state)						\
-									\
-									\
-	current->state = (task_state);					\
-	add_wait_queue(&sem->wait, &wait);				\
-									\
-	/*								\
-	 * Ok, we're set up.  sem->count is known to be less than zero	\
-	 * so we must wait.						\
-	 *								\
-	 * We can let go the lock for purposes of waiting.		\
-	 * We re-acquire it after awaking so as to protect		\
-	 * all semaphore operations.					\
-	 *								\
-	 * If "up()" is called before we call waking_non_zero() then	\
-	 * we will catch it right away.  If it is called later then	\
-	 * we will have to go through a wakeup cycle to catch it.	\
-	 *								\
-	 * Multiple waiters contend for the semaphore lock to see	\
-	 * who gets to gate through and who has to wait some more.	\
-	 */								\
-	for (;;) {
-
-#define DOWN_TAIL(task_state)			\
-		current->state = (task_state);	\
-	}					\
-	current->state = TASK_RUNNING;		\
-	remove_wait_queue(&sem->wait, &wait);
-
-void __sched __down(struct semaphore * sem)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
-	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
-	if (waking_non_zero(sem))
-		break;
-	schedule();
-	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	DECLARE_WAITQUEUE(wait, current);
-	int ret = 0;
-
-	DOWN_HEAD(TASK_INTERRUPTIBLE)
-
-	ret = waking_non_zero_interruptible(sem, current);
-	if (ret)
-	{
-		if (ret == 1)
-			/* ret != 0 only if we get interrupted -arca */
-			ret = 0;
-		break;
-	}
-	schedule();
-	DOWN_TAIL(TASK_INTERRUPTIBLE)
-	return ret;
-}
-
-int __down_trylock(struct semaphore * sem)
-{
-	return waking_non_zero_trylock(sem);
-}
diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile
index e051a79..d94d709 100644
--- a/arch/m68knommu/lib/Makefile
+++ b/arch/m68knommu/lib/Makefile
@@ -4,4 +4,4 @@
 
 lib-y	:= ashldi3.o ashrdi3.o lshrdi3.o \
 	   muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
-	   checksum.o semaphore.o memcpy.o memset.o delay.o
+	   checksum.o memcpy.o memset.o delay.o
diff --git a/arch/m68knommu/lib/semaphore.S b/arch/m68knommu/lib/semaphore.S
deleted file mode 100644
index 87c7460..0000000
--- a/arch/m68knommu/lib/semaphore.S
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- *  linux/arch/m68k/lib/semaphore.S
- *
- *  Copyright (C) 1996  Linus Torvalds
- *
- *  m68k version by Andreas Schwab
- *
- *  MAR/1999 -- modified to support ColdFire (gerg@snapgear.com)
- */
-
-#include <linux/linkage.h>
-#include <asm/semaphore.h>
-
-/*
- * "down_failed" is called with the eventual return address
- * in %a0, and the address of the semaphore in %a1. We need
- * to increment the number of waiters on the semaphore,
- * call "__down()", and then eventually return to try again.
- */
-ENTRY(__down_failed)
-#ifdef CONFIG_COLDFIRE
-	subl #12,%sp
-	moveml %a0/%d0/%d1,(%sp)
-#else
-	moveml %a0/%d0/%d1,-(%sp)
-#endif
-	movel %a1,-(%sp)
-	jbsr __down
-	movel (%sp)+,%a1
-	movel (%sp)+,%d0
-	movel (%sp)+,%d1
-	rts
-
-ENTRY(__down_failed_interruptible)
-	movel %a0,-(%sp)
-	movel %d1,-(%sp)
-	movel %a1,-(%sp)
-	jbsr __down_interruptible
-	movel (%sp)+,%a1
-	movel (%sp)+,%d1
-	rts
-
-ENTRY(__up_wakeup)
-#ifdef CONFIG_COLDFIRE
-	subl #12,%sp
-	moveml %a0/%d0/%d1,(%sp)
-#else
-	moveml %a0/%d0/%d1,-(%sp)
-#endif
-	movel %a1,-(%sp)
-	jbsr __up
-	movel (%sp)+,%a1
-	movel (%sp)+,%d0
-	movel (%sp)+,%d1
-	rts
-
-ENTRY(__down_failed_trylock)
-	movel %a0,-(%sp)
-	movel %d1,-(%sp)
-	movel %a1,-(%sp)
-	jbsr __down_trylock
-	movel (%sp)+,%a1
-	movel (%sp)+,%d1
-	movel (%sp)+,%a0
-	rts
-
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 9e78e1a..6fcdb6f 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -5,7 +5,7 @@
 extra-y		:= head.o init_task.o vmlinux.lds
 
 obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
-		   ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \
+		   ptrace.o reset.o setup.o signal.o syscall.o \
 		   time.o topology.o traps.o unaligned.o
 
 obj-$(CONFIG_CEVT_BCM1480)	+= cevt-bcm1480.o
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c
deleted file mode 100644
index 1265358..0000000
--- a/arch/mips/kernel/semaphore.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * MIPS-specific semaphore code.
- *
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
- * to eliminate the SMP races in the old version between the updates
- * of `count' and `waking'.  Now we use negative `count' values to
- * indicate that some process(es) are waiting for the semaphore.
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <asm/atomic.h>
-#include <asm/cpu-features.h>
-#include <asm/errno.h>
-#include <asm/semaphore.h>
-#include <asm/war.h>
-/*
- * Atomically update sem->count.
- * This does the equivalent of the following:
- *
- *	old_count = sem->count;
- *	tmp = MAX(old_count, 0) + incr;
- *	sem->count = tmp;
- *	return old_count;
- *
- * On machines without lld/scd we need a spinlock to make the manipulation of
- * sem->count and sem->waking atomic.  Scalability isn't an issue because
- * this lock is used on UP only so it's just an empty variable.
- */
-static inline int __sem_update_count(struct semaphore *sem, int incr)
-{
-	int old_count, tmp;
-
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
-		__asm__ __volatile__(
-		"	.set	mips3					\n"
-		"1:	ll	%0, %2		# __sem_update_count	\n"
-		"	sra	%1, %0, 31				\n"
-		"	not	%1					\n"
-		"	and	%1, %0, %1				\n"
-		"	addu	%1, %1, %3				\n"
-		"	sc	%1, %2					\n"
-		"	beqzl	%1, 1b					\n"
-		"	.set	mips0					\n"
-		: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
-		: "r" (incr), "m" (sem->count));
-	} else if (cpu_has_llsc) {
-		__asm__ __volatile__(
-		"	.set	mips3					\n"
-		"1:	ll	%0, %2		# __sem_update_count	\n"
-		"	sra	%1, %0, 31				\n"
-		"	not	%1					\n"
-		"	and	%1, %0, %1				\n"
-		"	addu	%1, %1, %3				\n"
-		"	sc	%1, %2					\n"
-		"	beqz	%1, 1b					\n"
-		"	.set	mips0					\n"
-		: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
-		: "r" (incr), "m" (sem->count));
-	} else {
-		static DEFINE_SPINLOCK(semaphore_lock);
-		unsigned long flags;
-
-		spin_lock_irqsave(&semaphore_lock, flags);
-		old_count = atomic_read(&sem->count);
-		tmp = max_t(int, old_count, 0) + incr;
-		atomic_set(&sem->count, tmp);
-		spin_unlock_irqrestore(&semaphore_lock, flags);
-	}
-
-	return old_count;
-}
-
-void __up(struct semaphore *sem)
-{
-	/*
-	 * Note that we incremented count in up() before we came here,
-	 * but that was ineffective since the result was <= 0, and
-	 * any negative value of count is equivalent to 0.
-	 * This ends up setting count to 1, unless count is now > 0
-	 * (i.e. because some other cpu has called up() in the meantime),
-	 * in which case we just increment count.
-	 */
-	__sem_update_count(sem, 1);
-	wake_up(&sem->wait);
-}
-
-EXPORT_SYMBOL(__up);
-
-/*
- * Note that when we come in to __down or __down_interruptible,
- * we have already decremented count, but that decrement was
- * ineffective since the result was < 0, and any negative value
- * of count is equivalent to 0.
- * Thus it is only when we decrement count from some value > 0
- * that we have actually got the semaphore.
- */
-void __sched __down(struct semaphore *sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	/*
-	 * Try to get the semaphore.  If the count is > 0, then we've
-	 * got the semaphore; we decrement count and exit the loop.
-	 * If the count is 0 or negative, we set it to -1, indicating
-	 * that we are asleep, and then sleep.
-	 */
-	while (__sem_update_count(sem, -1) <= 0) {
-		schedule();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	}
-	remove_wait_queue(&sem->wait, &wait);
-	__set_task_state(tsk, TASK_RUNNING);
-
-	/*
-	 * If there are any more sleepers, wake one of them up so
-	 * that it can either get the semaphore, or set count to -1
-	 * indicating that there are still processes sleeping.
-	 */
-	wake_up(&sem->wait);
-}
-
-EXPORT_SYMBOL(__down);
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	__set_task_state(tsk, TASK_INTERRUPTIBLE);
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	while (__sem_update_count(sem, -1) <= 0) {
-		if (signal_pending(current)) {
-			/*
-			 * A signal is pending - give up trying.
-			 * Set sem->count to 0 if it is negative,
-			 * since we are no longer sleeping.
-			 */
-			__sem_update_count(sem, 0);
-			retval = -EINTR;
-			break;
-		}
-		schedule();
-		set_task_state(tsk, TASK_INTERRUPTIBLE);
-	}
-	remove_wait_queue(&sem->wait, &wait);
-	__set_task_state(tsk, TASK_RUNNING);
-
-	wake_up(&sem->wait);
-	return retval;
-}
-
-EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile
index ef07c95..23f2ab6 100644
--- a/arch/mn10300/kernel/Makefile
+++ b/arch/mn10300/kernel/Makefile
@@ -3,7 +3,7 @@
 #
 extra-y := head.o init_task.o vmlinux.lds
 
-obj-y   := process.o semaphore.o signal.o entry.o fpu.o traps.o irq.o \
+obj-y   := process.o signal.o entry.o fpu.o traps.o irq.o \
 	   ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
 	   switch_to.o mn10300_ksyms.o kernel_execve.o
 
diff --git a/arch/mn10300/kernel/semaphore.c b/arch/mn10300/kernel/semaphore.c
deleted file mode 100644
index 9153c40..0000000
--- a/arch/mn10300/kernel/semaphore.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/* MN10300 Semaphore implementation
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <asm/semaphore.h>
-
-struct sem_waiter {
-	struct list_head	list;
-	struct task_struct	*task;
-};
-
-#if SEMAPHORE_DEBUG
-void semtrace(struct semaphore *sem, const char *str)
-{
-	if (sem->debug)
-		printk(KERN_DEBUG "[%d] %s({%d,%d})\n",
-		       current->pid,
-		       str,
-		       atomic_read(&sem->count),
-		       list_empty(&sem->wait_list) ? 0 : 1);
-}
-#else
-#define semtrace(SEM, STR) do { } while (0)
-#endif
-
-/*
- * wait for a token to be granted from a semaphore
- * - entered with lock held and interrupts disabled
- */
-void __down(struct semaphore *sem, unsigned long flags)
-{
-	struct task_struct *tsk = current;
-	struct sem_waiter waiter;
-
-	semtrace(sem, "Entering __down");
-
-	/* set up my own style of waitqueue */
-	waiter.task = tsk;
-	get_task_struct(tsk);
-
-	list_add_tail(&waiter.list, &sem->wait_list);
-
-	/* we don't need to touch the semaphore struct anymore */
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-	/* wait to be given the semaphore */
-	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-
-	for (;;) {
-		if (!waiter.task)
-			break;
-		schedule();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	}
-
-	tsk->state = TASK_RUNNING;
-	semtrace(sem, "Leaving __down");
-}
-EXPORT_SYMBOL(__down);
-
-/*
- * interruptibly wait for a token to be granted from a semaphore
- * - entered with lock held and interrupts disabled
- */
-int __down_interruptible(struct semaphore *sem, unsigned long flags)
-{
-	struct task_struct *tsk = current;
-	struct sem_waiter waiter;
-	int ret;
-
-	semtrace(sem, "Entering __down_interruptible");
-
-	/* set up my own style of waitqueue */
-	waiter.task = tsk;
-	get_task_struct(tsk);
-
-	list_add_tail(&waiter.list, &sem->wait_list);
-
-	/* we don't need to touch the semaphore struct anymore */
-	set_task_state(tsk, TASK_INTERRUPTIBLE);
-
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-	/* wait to be given the semaphore */
-	ret = 0;
-	for (;;) {
-		if (!waiter.task)
-			break;
-		if (unlikely(signal_pending(current)))
-			goto interrupted;
-		schedule();
-		set_task_state(tsk, TASK_INTERRUPTIBLE);
-	}
-
- out:
-	tsk->state = TASK_RUNNING;
-	semtrace(sem, "Leaving __down_interruptible");
-	return ret;
-
- interrupted:
-	spin_lock_irqsave(&sem->wait_lock, flags);
-	list_del(&waiter.list);
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-	ret = 0;
-	if (!waiter.task) {
-		put_task_struct(current);
-		ret = -EINTR;
-	}
-	goto out;
-}
-EXPORT_SYMBOL(__down_interruptible);
-
-/*
- * release a single token back to a semaphore
- * - entered with lock held and interrupts disabled
- */
-void __up(struct semaphore *sem)
-{
-	struct task_struct *tsk;
-	struct sem_waiter *waiter;
-
-	semtrace(sem, "Entering __up");
-
-	/* grant the token to the process at the front of the queue */
-	waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
-
-	/* We must be careful not to touch 'waiter' after we set ->task = NULL.
-	 * It is an allocated on the waiter's stack and may become invalid at
-	 * any time after that point (due to a wakeup from another source).
-	 */
-	list_del_init(&waiter->list);
-	tsk = waiter->task;
-	smp_mb();
-	waiter->task = NULL;
-	wake_up_process(tsk);
-	put_task_struct(tsk);
-
-	semtrace(sem, "Leaving __up");
-}
-EXPORT_SYMBOL(__up);
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 27827bc..1f6585a 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -9,7 +9,7 @@
 
 obj-y	     	:= cache.o pacache.o setup.o traps.o time.o irq.o \
 		   pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
-		   ptrace.o hardware.o inventory.o drivers.o semaphore.o \
+		   ptrace.o hardware.o inventory.o drivers.o \
 		   signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
 		   process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
 		   topology.o
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7aca704..5b7fc4a 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -69,11 +69,6 @@
 EXPORT_SYMBOL(memcpy_fromio);
 EXPORT_SYMBOL(memset_io);
 
-#include <asm/semaphore.h>
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down);
-
 extern void $$divI(void);
 extern void $$divU(void);
 extern void $$remI(void);
diff --git a/arch/parisc/kernel/semaphore.c b/arch/parisc/kernel/semaphore.c
deleted file mode 100644
index ee806bc..0000000
--- a/arch/parisc/kernel/semaphore.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard
- */
-
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-/*
- * Semaphores are complex as we wish to avoid using two variables.
- * `count' has multiple roles, depending on its value.  If it is positive
- * or zero, there are no waiters.  The functions here will never be
- * called; see <asm/semaphore.h>
- *
- * When count is -1 it indicates there is at least one task waiting
- * for the semaphore.
- *
- * When count is less than that, there are '- count - 1' wakeups
- * pending.  ie if it has value -3, there are 2 wakeups pending.
- *
- * Note that these functions are only called when there is contention
- * on the lock, and as such all this is the "non-critical" part of the
- * whole semaphore business. The critical part is the inline stuff in
- * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
- */
-void __up(struct semaphore *sem)
-{
-	sem->count--;
-	wake_up(&sem->wait);
-}
-
-#define wakers(count) (-1 - count)
-
-#define DOWN_HEAD							\
-	int ret = 0;							\
-	DECLARE_WAITQUEUE(wait, current);				\
-									\
-	/* Note that someone is waiting */				\
-	if (sem->count == 0)						\
-		sem->count = -1;					\
-									\
-	/* protected by the sentry still -- use unlocked version */	\
-	wait.flags = WQ_FLAG_EXCLUSIVE;					\
-	__add_wait_queue_tail(&sem->wait, &wait);			\
- lost_race:								\
-	spin_unlock_irq(&sem->sentry);					\
-
-#define DOWN_TAIL							\
-	spin_lock_irq(&sem->sentry);					\
-	if (wakers(sem->count) == 0 && ret == 0)			\
-		goto lost_race;	/* Someone stole our wakeup */		\
-	__remove_wait_queue(&sem->wait, &wait);				\
-	current->state = TASK_RUNNING;					\
-	if (!waitqueue_active(&sem->wait) && (sem->count < 0))		\
-		sem->count = wakers(sem->count);
-
-#define UPDATE_COUNT							\
-	sem->count += (sem->count < 0) ? 1 : - 1;
-	
-
-void __sched __down(struct semaphore * sem)
-{
-	DOWN_HEAD
-
-	for(;;) {
-		set_task_state(current, TASK_UNINTERRUPTIBLE);
-		/* we can _read_ this without the sentry */
-		if (sem->count != -1)
-			break;
- 		schedule();
- 	}
-
-	DOWN_TAIL
-	UPDATE_COUNT
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	DOWN_HEAD
-
-	for(;;) {
-		set_task_state(current, TASK_INTERRUPTIBLE);
-		/* we can _read_ this without the sentry */
-		if (sem->count != -1)
-			break;
-
-		if (signal_pending(current)) {
-			ret = -EINTR;
-			break;
-		}
-		schedule();
-	}
-
-	DOWN_TAIL
-
-	if (!ret) {
-		UPDATE_COUNT
-	}
-
-	return ret;
-}
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index c1baf9d..b9dbfff 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -12,7 +12,7 @@
 CFLAGS_btext.o		+= -fPIC
 endif
 
-obj-y				:= semaphore.o cputable.o ptrace.o syscalls.o \
+obj-y				:= cputable.o ptrace.o syscalls.o \
 				   irq.o align.o signal_32.o pmc.o vdso.o \
 				   init_task.o process.o systbl.o idle.o \
 				   signal.o
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 9c98424..65d14e6 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -15,7 +15,6 @@
 #include <linux/bitops.h>
 
 #include <asm/page.h>
-#include <asm/semaphore.h>
 #include <asm/processor.h>
 #include <asm/cacheflush.h>
 #include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c
deleted file mode 100644
index 2f8c3c9..0000000
--- a/arch/powerpc/kernel/semaphore.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * PowerPC-specific semaphore code.
- *
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
- * to eliminate the SMP races in the old version between the updates
- * of `count' and `waking'.  Now we use negative `count' values to
- * indicate that some process(es) are waiting for the semaphore.
- */
-
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/atomic.h>
-#include <asm/semaphore.h>
-#include <asm/errno.h>
-
-/*
- * Atomically update sem->count.
- * This does the equivalent of the following:
- *
- *	old_count = sem->count;
- *	tmp = MAX(old_count, 0) + incr;
- *	sem->count = tmp;
- *	return old_count;
- */
-static inline int __sem_update_count(struct semaphore *sem, int incr)
-{
-	int old_count, tmp;
-
-	__asm__ __volatile__("\n"
-"1:	lwarx	%0,0,%3\n"
-"	srawi	%1,%0,31\n"
-"	andc	%1,%0,%1\n"
-"	add	%1,%1,%4\n"
-	PPC405_ERR77(0,%3)
-"	stwcx.	%1,0,%3\n"
-"	bne	1b"
-	: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
-	: "r" (&sem->count), "r" (incr), "m" (sem->count)
-	: "cc");
-
-	return old_count;
-}
-
-void __up(struct semaphore *sem)
-{
-	/*
-	 * Note that we incremented count in up() before we came here,
-	 * but that was ineffective since the result was <= 0, and
-	 * any negative value of count is equivalent to 0.
-	 * This ends up setting count to 1, unless count is now > 0
-	 * (i.e. because some other cpu has called up() in the meantime),
-	 * in which case we just increment count.
-	 */
-	__sem_update_count(sem, 1);
-	wake_up(&sem->wait);
-}
-EXPORT_SYMBOL(__up);
-
-/*
- * Note that when we come in to __down or __down_interruptible,
- * we have already decremented count, but that decrement was
- * ineffective since the result was < 0, and any negative value
- * of count is equivalent to 0.
- * Thus it is only when we decrement count from some value > 0
- * that we have actually got the semaphore.
- */
-void __sched __down(struct semaphore *sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	/*
-	 * Try to get the semaphore.  If the count is > 0, then we've
-	 * got the semaphore; we decrement count and exit the loop.
-	 * If the count is 0 or negative, we set it to -1, indicating
-	 * that we are asleep, and then sleep.
-	 */
-	while (__sem_update_count(sem, -1) <= 0) {
-		schedule();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	}
-	remove_wait_queue(&sem->wait, &wait);
-	__set_task_state(tsk, TASK_RUNNING);
-
-	/*
-	 * If there are any more sleepers, wake one of them up so
-	 * that it can either get the semaphore, or set count to -1
-	 * indicating that there are still processes sleeping.
-	 */
-	wake_up(&sem->wait);
-}
-EXPORT_SYMBOL(__down);
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	__set_task_state(tsk, TASK_INTERRUPTIBLE);
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	while (__sem_update_count(sem, -1) <= 0) {
-		if (signal_pending(current)) {
-			/*
-			 * A signal is pending - give up trying.
-			 * Set sem->count to 0 if it is negative,
-			 * since we are no longer sleeping.
-			 */
-			__sem_update_count(sem, 0);
-			retval = -EINTR;
-			break;
-		}
-		schedule();
-		set_task_state(tsk, TASK_INTERRUPTIBLE);
-	}
-	remove_wait_queue(&sem->wait, &wait);
-	__set_task_state(tsk, TASK_RUNNING);
-
-	wake_up(&sem->wait);
-	return retval;
-}
-EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index cd870a8..06d918d 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -10,9 +10,6 @@
 #include <linux/reboot.h>
 #include <linux/delay.h>
 #include <linux/initrd.h>
-#if defined(CONFIG_IDE) || defined(CONFIG_IDE_MODULE)
-#include <linux/ide.h>
-#endif
 #include <linux/tty.h>
 #include <linux/bootmem.h>
 #include <linux/seq_file.h>
@@ -51,11 +48,6 @@
 
 extern void bootx_init(unsigned long r4, unsigned long phys);
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-struct ide_machdep_calls ppc_ide_md;
-EXPORT_SYMBOL(ppc_ide_md);
-#endif
-
 int boot_cpuid;
 EXPORT_SYMBOL_GPL(boot_cpuid);
 int boot_cpuid_phys;
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 1c58db9..bcf50d7 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -1144,28 +1144,6 @@
 {
 	struct device_node* nd;
 
-#ifdef CONFIG_BLK_DEV_IDE
-	struct pci_dev *dev = NULL;
-
-	/* OF fails to initialize IDE controllers on macs
-	 * (and maybe other machines)
-	 *
-	 * Ideally, this should be moved to the IDE layer, but we need
-	 * to check specifically with Andre Hedrick how to do it cleanly
-	 * since the common IDE code seem to care about the fact that the
-	 * BIOS may have disabled a controller.
-	 *
-	 * -- BenH
-	 */
-	for_each_pci_dev(dev) {
-		if ((dev->class >> 16) != PCI_BASE_CLASS_STORAGE)
-			continue;
-		if (pci_enable_device(dev))
-			printk(KERN_WARNING
-			       "pci: Failed to enable %s\n", pci_name(dev));
-	}
-#endif /* CONFIG_BLK_DEV_IDE */
-
 	for_each_node_by_name(nd, "firewire") {
 		if (nd->parent && (of_device_is_compatible(nd, "pci106b,18") ||
 				   of_device_is_compatible(nd, "pci106b,30") ||
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index b3abaaf..3362e78 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -2,7 +2,6 @@
 #define __PMAC_H__
 
 #include <linux/pci.h>
-#include <linux/ide.h>
 #include <linux/irq.h>
 
 /*
@@ -35,10 +34,6 @@
 
 extern void pmac_setup_smp(void);
 
-extern unsigned long pmac_ide_get_base(int index);
-extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
-	unsigned long data_port, unsigned long ctrl_port, int *irq);
-
 extern int pmac_nvram_init(void);
 extern void pmac_pic_init(void);
 
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 36ff1b6..2693fc3 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -574,14 +574,6 @@
 	ISA_DMA_THRESHOLD = ~0L;
 	DMA_MODE_READ = 1;
 	DMA_MODE_WRITE = 2;
-
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
-        ppc_ide_md.ide_init_hwif	= pmac_ide_init_hwif_ports;
-        ppc_ide_md.default_io_base	= pmac_ide_get_base;
-#endif /* CONFIG_BLK_DEV_IDE_PMAC */
-#endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */
-
 #endif /* CONFIG_PPC32 */
 
 #ifdef CONFIG_PMAC_SMU
diff --git a/arch/ppc/configs/sandpoint_defconfig b/arch/ppc/configs/sandpoint_defconfig
index fb493a6..9525e341 100644
--- a/arch/ppc/configs/sandpoint_defconfig
+++ b/arch/ppc/configs/sandpoint_defconfig
@@ -189,7 +189,7 @@
 #
 # IDE chipset support/bugfixes
 #
-CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_SL82C105=y
 # CONFIG_BLK_DEV_IDEPCI is not set
 # CONFIG_BLK_DEV_IDEDMA is not set
 # CONFIG_IDEDMA_AUTO is not set
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index c353502..2ba659f 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -12,7 +12,6 @@
 #include <linux/irq.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/pm.h>
 #include <linux/bitops.h>
 
@@ -124,10 +123,6 @@
 EXPORT_SYMBOL(iounmap);
 EXPORT_SYMBOL(ioremap_bot);	/* aka VMALLOC_END */
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-EXPORT_SYMBOL(ppc_ide_md);
-#endif
-
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL(isa_io_base);
 EXPORT_SYMBOL(isa_mem_base);
diff --git a/arch/ppc/kernel/semaphore.c b/arch/ppc/kernel/semaphore.c
deleted file mode 100644
index 2fe429b..0000000
--- a/arch/ppc/kernel/semaphore.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * PowerPC-specific semaphore code.
- *
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
- * to eliminate the SMP races in the old version between the updates
- * of `count' and `waking'.  Now we use negative `count' values to
- * indicate that some process(es) are waiting for the semaphore.
- */
-
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <asm/atomic.h>
-#include <asm/semaphore.h>
-#include <asm/errno.h>
-
-/*
- * Atomically update sem->count.
- * This does the equivalent of the following:
- *
- *	old_count = sem->count;
- *	tmp = MAX(old_count, 0) + incr;
- *	sem->count = tmp;
- *	return old_count;
- */
-static inline int __sem_update_count(struct semaphore *sem, int incr)
-{
-	int old_count, tmp;
-
-	__asm__ __volatile__("\n"
-"1:	lwarx	%0,0,%3\n"
-"	srawi	%1,%0,31\n"
-"	andc	%1,%0,%1\n"
-"	add	%1,%1,%4\n"
-	PPC405_ERR77(0,%3)
-"	stwcx.	%1,0,%3\n"
-"	bne	1b"
-	: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
-	: "r" (&sem->count), "r" (incr), "m" (sem->count)
-	: "cc");
-
-	return old_count;
-}
-
-void __up(struct semaphore *sem)
-{
-	/*
-	 * Note that we incremented count in up() before we came here,
-	 * but that was ineffective since the result was <= 0, and
-	 * any negative value of count is equivalent to 0.
-	 * This ends up setting count to 1, unless count is now > 0
-	 * (i.e. because some other cpu has called up() in the meantime),
-	 * in which case we just increment count.
-	 */
-	__sem_update_count(sem, 1);
-	wake_up(&sem->wait);
-}
-
-/*
- * Note that when we come in to __down or __down_interruptible,
- * we have already decremented count, but that decrement was
- * ineffective since the result was < 0, and any negative value
- * of count is equivalent to 0.
- * Thus it is only when we decrement count from some value > 0
- * that we have actually got the semaphore.
- */
-void __sched __down(struct semaphore *sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-	smp_wmb();
-
-	/*
-	 * Try to get the semaphore.  If the count is > 0, then we've
-	 * got the semaphore; we decrement count and exit the loop.
-	 * If the count is 0 or negative, we set it to -1, indicating
-	 * that we are asleep, and then sleep.
-	 */
-	while (__sem_update_count(sem, -1) <= 0) {
-		schedule();
-		tsk->state = TASK_UNINTERRUPTIBLE;
-	}
-	remove_wait_queue(&sem->wait, &wait);
-	tsk->state = TASK_RUNNING;
-
-	/*
-	 * If there are any more sleepers, wake one of them up so
-	 * that it can either get the semaphore, or set count to -1
-	 * indicating that there are still processes sleeping.
-	 */
-	wake_up(&sem->wait);
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	tsk->state = TASK_INTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-	smp_wmb();
-
-	while (__sem_update_count(sem, -1) <= 0) {
-		if (signal_pending(current)) {
-			/*
-			 * A signal is pending - give up trying.
-			 * Set sem->count to 0 if it is negative,
-			 * since we are no longer sleeping.
-			 */
-			__sem_update_count(sem, 0);
-			retval = -EINTR;
-			break;
-		}
-		schedule();
-		tsk->state = TASK_INTERRUPTIBLE;
-	}
-	tsk->state = TASK_RUNNING;
-	remove_wait_queue(&sem->wait, &wait);
-	wake_up(&sem->wait);
-	return retval;
-}
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 2940559..bfddfde 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -10,7 +10,6 @@
 #include <linux/reboot.h>
 #include <linux/delay.h>
 #include <linux/initrd.h>
-#include <linux/ide.h>
 #include <linux/screen_info.h>
 #include <linux/bootmem.h>
 #include <linux/seq_file.h>
@@ -57,7 +56,6 @@
 extern void power4_idle(void);
 
 extern boot_infos_t *boot_infos;
-struct ide_machdep_calls ppc_ide_md;
 
 /* Used with the BI_MEMSIZE bootinfo parameter to store the memory
    size value reported by the boot loader. */
diff --git a/arch/ppc/platforms/4xx/bamboo.c b/arch/ppc/platforms/4xx/bamboo.c
index 017623c..01f20f4 100644
--- a/arch/ppc/platforms/4xx/bamboo.c
+++ b/arch/ppc/platforms/4xx/bamboo.c
@@ -22,7 +22,6 @@
 #include <linux/blkdev.h>
 #include <linux/console.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/initrd.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index 453643a..8027a36 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -25,7 +25,6 @@
 #include <linux/blkdev.h>
 #include <linux/console.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/initrd.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/4xx/luan.c b/arch/ppc/platforms/4xx/luan.c
index b79ebb8..f6d8c2e 100644
--- a/arch/ppc/platforms/4xx/luan.c
+++ b/arch/ppc/platforms/4xx/luan.c
@@ -23,7 +23,6 @@
 #include <linux/blkdev.h>
 #include <linux/console.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/initrd.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c
index 28a712c..308386e 100644
--- a/arch/ppc/platforms/4xx/ocotea.c
+++ b/arch/ppc/platforms/4xx/ocotea.c
@@ -23,7 +23,6 @@
 #include <linux/blkdev.h>
 #include <linux/console.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/initrd.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/4xx/taishan.c b/arch/ppc/platforms/4xx/taishan.c
index f6a0c66..1156942 100644
--- a/arch/ppc/platforms/4xx/taishan.c
+++ b/arch/ppc/platforms/4xx/taishan.c
@@ -23,7 +23,6 @@
 #include <linux/blkdev.h>
 #include <linux/console.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/initrd.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/4xx/yucca.c b/arch/ppc/platforms/4xx/yucca.c
index 66a44ff..f6cfd44 100644
--- a/arch/ppc/platforms/4xx/yucca.c
+++ b/arch/ppc/platforms/4xx/yucca.c
@@ -24,7 +24,6 @@
 #include <linux/blkdev.h>
 #include <linux/console.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/initrd.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c
index dcd6070..27c140f 100644
--- a/arch/ppc/platforms/chestnut.c
+++ b/arch/ppc/platforms/chestnut.c
@@ -22,7 +22,6 @@
 #include <linux/initrd.h>
 #include <linux/delay.h>
 #include <linux/seq_file.h>
-#include <linux/ide.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 #include <linux/serial_8250.h>
diff --git a/arch/ppc/platforms/cpci690.c b/arch/ppc/platforms/cpci690.c
index e78bccf..07f672d 100644
--- a/arch/ppc/platforms/cpci690.c
+++ b/arch/ppc/platforms/cpci690.c
@@ -10,7 +10,6 @@
  */
 #include <linux/delay.h>
 #include <linux/pci.h>
-#include <linux/ide.h>
 #include <linux/irq.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
diff --git a/arch/ppc/platforms/ev64260.c b/arch/ppc/platforms/ev64260.c
index c1f77e1..f522b31 100644
--- a/arch/ppc/platforms/ev64260.c
+++ b/arch/ppc/platforms/ev64260.c
@@ -23,7 +23,6 @@
 
 #include <linux/delay.h>
 #include <linux/pci.h>
-#include <linux/ide.h>
 #include <linux/irq.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
diff --git a/arch/ppc/platforms/hdpu.c b/arch/ppc/platforms/hdpu.c
index ca5de13..904b518 100644
--- a/arch/ppc/platforms/hdpu.c
+++ b/arch/ppc/platforms/hdpu.c
@@ -16,7 +16,6 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/irq.h>
-#include <linux/ide.h>
 #include <linux/seq_file.h>
 #include <linux/platform_device.h>
 
@@ -604,41 +603,6 @@
 	}
 }
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-static void
-hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
-{
-	request_region(from, extent, name);
-	return;
-}
-
-static void hdpu_ide_release_region(ide_ioreg_t from, unsigned int extent)
-{
-	release_region(from, extent);
-	return;
-}
-
-static void __init
-hdpu_ide_pci_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port,
-			     ide_ioreg_t ctrl_port, int *irq)
-{
-	struct pci_dev *dev;
-
-	pci_for_each_dev(dev) {
-		if (((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) ||
-		    ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)) {
-			hw->irq = dev->irq;
-
-			if (irq != NULL) {
-				*irq = dev->irq;
-			}
-		}
-	}
-
-	return;
-}
-#endif
-
 void hdpu_heartbeat(void)
 {
 	if (mv64x60_read(&bh, MV64x60_GPP_VALUE) & (1 << 5))
diff --git a/arch/ppc/platforms/lopec.c b/arch/ppc/platforms/lopec.c
index b947c77..1e3aa6e 100644
--- a/arch/ppc/platforms/lopec.c
+++ b/arch/ppc/platforms/lopec.c
@@ -15,7 +15,6 @@
 #include <linux/pci_ids.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
-#include <linux/ide.h>
 #include <linux/seq_file.h>
 #include <linux/initrd.h>
 #include <linux/console.h>
@@ -168,85 +167,6 @@
 	lopec_halt();
 }
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-int lopec_ide_ports_known = 0;
-static unsigned long lopec_ide_regbase[MAX_HWIFS];
-static unsigned long lopec_ide_ctl_regbase[MAX_HWIFS];
-static unsigned long lopec_idedma_regbase;
-
-static void
-lopec_ide_probe(void)
-{
-	struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_WINBOND,
-					      PCI_DEVICE_ID_WINBOND_82C105,
-					      NULL);
-	lopec_ide_ports_known = 1;
-
-	if (dev) {
-		lopec_ide_regbase[0] = dev->resource[0].start;
-		lopec_ide_regbase[1] = dev->resource[2].start;
-		lopec_ide_ctl_regbase[0] = dev->resource[1].start;
-		lopec_ide_ctl_regbase[1] = dev->resource[3].start;
-		lopec_idedma_regbase = dev->resource[4].start;
-		pci_dev_put(dev);
-	}
-}
-
-static int
-lopec_ide_default_irq(unsigned long base)
-{
-	if (lopec_ide_ports_known == 0)
-		lopec_ide_probe();
-
-	if (base == lopec_ide_regbase[0])
-		return 14;
-	else if (base == lopec_ide_regbase[1])
-		return 15;
-	else
-		return 0;
-}
-
-static unsigned long
-lopec_ide_default_io_base(int index)
-{
-	if (lopec_ide_ports_known == 0)
-		lopec_ide_probe();
-	return lopec_ide_regbase[index];
-}
-
-static void __init
-lopec_ide_init_hwif_ports(hw_regs_t *hw, unsigned long data,
-			  unsigned long ctl, int *irq)
-{
-	unsigned long reg = data;
-	uint alt_status_base;
-	int i;
-
-	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
-		hw->io_ports[i] = reg++;
-
-	if (data == lopec_ide_regbase[0]) {
-		alt_status_base = lopec_ide_ctl_regbase[0] + 2;
-		hw->irq = 14;
-	} else if (data == lopec_ide_regbase[1]) {
-		alt_status_base = lopec_ide_ctl_regbase[1] + 2;
-		hw->irq = 15;
-	} else {
-		alt_status_base = 0;
-		hw->irq = 0;
-	}
-
-	if (ctl)
-		hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
-	else
-		hw->io_ports[IDE_CONTROL_OFFSET] = alt_status_base;
-
-	if (irq != NULL)
-		*irq = hw->irq;
-
-}
-#endif /* BLK_DEV_IDE */
-
 static void __init
 lopec_init_IRQ(void)
 {
@@ -384,11 +304,6 @@
 	ppc_md.nvram_read_val = todc_direct_read_val;
 	ppc_md.nvram_write_val = todc_direct_write_val;
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-	ppc_ide_md.default_irq = lopec_ide_default_irq;
-	ppc_ide_md.default_io_base = lopec_ide_default_io_base;
-	ppc_ide_md.ide_init_hwif = lopec_ide_init_hwif_ports;
-#endif
 #ifdef CONFIG_SERIAL_TEXT_DEBUG
 	ppc_md.progress = gen550_progress;
 #endif
diff --git a/arch/ppc/platforms/mvme5100.c b/arch/ppc/platforms/mvme5100.c
index bb8d4a4..053b54a 100644
--- a/arch/ppc/platforms/mvme5100.c
+++ b/arch/ppc/platforms/mvme5100.c
@@ -17,7 +17,6 @@
 #include <linux/initrd.h>
 #include <linux/console.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/seq_file.h>
 #include <linux/kdev_t.h>
 #include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/powerpmc250.c b/arch/ppc/platforms/powerpmc250.c
index 4d46650..162dc85 100644
--- a/arch/ppc/platforms/powerpmc250.c
+++ b/arch/ppc/platforms/powerpmc250.c
@@ -25,7 +25,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
-#include <linux/ide.h>
 #include <linux/root_dev.h>
 
 #include <asm/byteorder.h>
diff --git a/arch/ppc/platforms/pplus.c b/arch/ppc/platforms/pplus.c
index 8a1788c..cbcac85 100644
--- a/arch/ppc/platforms/pplus.c
+++ b/arch/ppc/platforms/pplus.c
@@ -19,7 +19,6 @@
 #include <linux/ioport.h>
 #include <linux/console.h>
 #include <linux/pci.h>
-#include <linux/ide.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
 
@@ -668,57 +667,6 @@
 		ppc_md.progress("init_irq: exit", 0);
 }
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-/*
- * IDE stuff.
- */
-static int pplus_ide_default_irq(unsigned long base)
-{
-	switch (base) {
-	case 0x1f0:
-		return 14;
-	case 0x170:
-		return 15;
-	default:
-		return 0;
-	}
-}
-
-static unsigned long pplus_ide_default_io_base(int index)
-{
-	switch (index) {
-	case 0:
-		return 0x1f0;
-	case 1:
-		return 0x170;
-	default:
-		return 0;
-	}
-}
-
-static void __init
-pplus_ide_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
-			  unsigned long ctrl_port, int *irq)
-{
-	unsigned long reg = data_port;
-	int i;
-
-	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
-		hw->io_ports[i] = reg;
-		reg += 1;
-	}
-
-	if (ctrl_port)
-		hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
-	else
-		hw->io_ports[IDE_CONTROL_OFFSET] =
-		    hw->io_ports[IDE_DATA_OFFSET] + 0x206;
-
-	if (irq != NULL)
-		*irq = pplus_ide_default_irq(data_port);
-}
-#endif
-
 #ifdef CONFIG_SMP
 /* PowerPlus (MTX) support */
 static int __init smp_pplus_probe(void)
@@ -884,12 +832,6 @@
 	ppc_md.find_end_of_memory = pplus_find_end_of_memory;
 	ppc_md.setup_io_mappings = pplus_map_io;
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-	ppc_ide_md.default_irq = pplus_ide_default_irq;
-	ppc_ide_md.default_io_base = pplus_ide_default_io_base;
-	ppc_ide_md.ide_init_hwif = pplus_ide_init_hwif_ports;
-#endif
-
 #ifdef CONFIG_SERIAL_TEXT_DEBUG
 	ppc_md.progress = gen550_progress;
 #endif				/* CONFIG_SERIAL_TEXT_DEBUG */
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index 3844985..465b658 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -33,7 +33,6 @@
 #include <linux/console.h>
 #include <linux/timex.h>
 #include <linux/pci.h>
-#include <linux/ide.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
 
@@ -894,38 +893,6 @@
 		i8259_init(MPC10X_MAPA_PCI_INTACK_ADDR, 0);
 }
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-/*
- * IDE stuff.
- */
-static int
-prep_ide_default_irq(unsigned long base)
-{
-	switch (base) {
-		case 0x1f0: return 13;
-		case 0x170: return 13;
-		case 0x1e8: return 11;
-		case 0x168: return 10;
-		case 0xfff0: return 14;		/* MCP(N)750 ide0 */
-		case 0xffe0: return 15;		/* MCP(N)750 ide1 */
-		default: return 0;
-	}
-}
-
-static unsigned long
-prep_ide_default_io_base(int index)
-{
-	switch (index) {
-		case 0: return 0x1f0;
-		case 1: return 0x170;
-		case 2: return 0x1e8;
-		case 3: return 0x168;
-		default:
-			return 0;
-	}
-}
-#endif
-
 #ifdef CONFIG_SMP
 /* PReP (MTX) support */
 static int __init
@@ -1070,11 +1037,6 @@
 
 	ppc_md.setup_io_mappings = prep_map_io;
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-	ppc_ide_md.default_irq = prep_ide_default_irq;
-	ppc_ide_md.default_io_base = prep_ide_default_io_base;
-#endif
-
 #ifdef CONFIG_SMP
 	smp_ops			 = &prep_smp_ops;
 #endif /* CONFIG_SMP */
diff --git a/arch/ppc/platforms/prpmc750.c b/arch/ppc/platforms/prpmc750.c
index fcab513..93bd593 100644
--- a/arch/ppc/platforms/prpmc750.c
+++ b/arch/ppc/platforms/prpmc750.c
@@ -22,7 +22,6 @@
 #include <linux/console.h>
 #include <linux/delay.h>
 #include <linux/seq_file.h>
-#include <linux/ide.h>
 #include <linux/root_dev.h>
 #include <linux/slab.h>
 #include <linux/serial_reg.h>
diff --git a/arch/ppc/platforms/prpmc800.c b/arch/ppc/platforms/prpmc800.c
index f4ade5c..5bcda7f 100644
--- a/arch/ppc/platforms/prpmc800.c
+++ b/arch/ppc/platforms/prpmc800.c
@@ -20,7 +20,6 @@
 #include <linux/console.h>
 #include <linux/delay.h>
 #include <linux/seq_file.h>
-#include <linux/ide.h>
 #include <linux/root_dev.h>
 #include <linux/harrier_defs.h>
 
diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c
index 44d4398..179b4a9 100644
--- a/arch/ppc/platforms/radstone_ppc7d.c
+++ b/arch/ppc/platforms/radstone_ppc7d.c
@@ -29,7 +29,6 @@
 #include <linux/initrd.h>
 #include <linux/console.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
 #include <linux/serial.h>
diff --git a/arch/ppc/platforms/residual.c b/arch/ppc/platforms/residual.c
index c991160..18495e7 100644
--- a/arch/ppc/platforms/residual.c
+++ b/arch/ppc/platforms/residual.c
@@ -38,7 +38,6 @@
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
-#include <linux/ide.h>
 
 #include <asm/sections.h>
 #include <asm/mmu.h>
diff --git a/arch/ppc/platforms/sandpoint.c b/arch/ppc/platforms/sandpoint.c
index 3352fae..b4897bd 100644
--- a/arch/ppc/platforms/sandpoint.c
+++ b/arch/ppc/platforms/sandpoint.c
@@ -71,7 +71,6 @@
 #include <linux/initrd.h>
 #include <linux/console.h>
 #include <linux/delay.h>
-#include <linux/ide.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
 #include <linux/serial.h>
@@ -559,93 +558,6 @@
 	return 0;
 }
 
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-/*
- * IDE support.
- */
-static int		sandpoint_ide_ports_known = 0;
-static unsigned long	sandpoint_ide_regbase[MAX_HWIFS];
-static unsigned long	sandpoint_ide_ctl_regbase[MAX_HWIFS];
-static unsigned long	sandpoint_idedma_regbase;
-
-static void
-sandpoint_ide_probe(void)
-{
-	struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_WINBOND,
-			PCI_DEVICE_ID_WINBOND_82C105, NULL);
-
-	if (pdev) {
-		sandpoint_ide_regbase[0]=pdev->resource[0].start;
-		sandpoint_ide_regbase[1]=pdev->resource[2].start;
-		sandpoint_ide_ctl_regbase[0]=pdev->resource[1].start;
-		sandpoint_ide_ctl_regbase[1]=pdev->resource[3].start;
-		sandpoint_idedma_regbase=pdev->resource[4].start;
-		pci_dev_put(pdev);
-	}
-
-	sandpoint_ide_ports_known = 1;
-}
-
-static int
-sandpoint_ide_default_irq(unsigned long base)
-{
-	if (sandpoint_ide_ports_known == 0)
-		sandpoint_ide_probe();
-
-	if (base == sandpoint_ide_regbase[0])
-		return SANDPOINT_IDE_INT0;
-	else if (base == sandpoint_ide_regbase[1])
-		return SANDPOINT_IDE_INT1;
-	else
-		return 0;
-}
-
-static unsigned long
-sandpoint_ide_default_io_base(int index)
-{
-	if (sandpoint_ide_ports_known == 0)
-		sandpoint_ide_probe();
-
-	return sandpoint_ide_regbase[index];
-}
-
-static void __init
-sandpoint_ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port,
-		unsigned long ctrl_port, int *irq)
-{
-	unsigned long reg = data_port;
-	uint	alt_status_base;
-	int	i;
-
-	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
-		hw->io_ports[i] = reg++;
-	}
-
-	if (data_port == sandpoint_ide_regbase[0]) {
-		alt_status_base = sandpoint_ide_ctl_regbase[0] + 2;
-		hw->irq = 14;
-	}
-	else if (data_port == sandpoint_ide_regbase[1]) {
-		alt_status_base = sandpoint_ide_ctl_regbase[1] + 2;
-		hw->irq = 15;
-	}
-	else {
-		alt_status_base = 0;
-		hw->irq = 0;
-	}
-
-	if (ctrl_port) {
-		hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
-	} else {
-		hw->io_ports[IDE_CONTROL_OFFSET] = alt_status_base;
-	}
-
-	if (irq != NULL) {
-		*irq = hw->irq;
-	}
-}
-#endif
-
 /*
  * Set BAT 3 to map 0xf8000000 to end of physical memory space 1-to-1.
  */
@@ -736,10 +648,4 @@
 #ifdef CONFIG_SERIAL_TEXT_DEBUG
 	ppc_md.progress = gen550_progress;
 #endif
-
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-	ppc_ide_md.default_irq = sandpoint_ide_default_irq;
-	ppc_ide_md.default_io_base = sandpoint_ide_default_io_base;
-	ppc_ide_md.ide_init_hwif = sandpoint_ide_init_hwif_ports;
-#endif
 }
diff --git a/arch/ppc/platforms/sandpoint.h b/arch/ppc/platforms/sandpoint.h
index 3b64e64..ed83759 100644
--- a/arch/ppc/platforms/sandpoint.h
+++ b/arch/ppc/platforms/sandpoint.h
@@ -28,9 +28,6 @@
  */
 #define SANDPOINT_IDE_INT0		23	/* EPIC 7 */
 #define SANDPOINT_IDE_INT1		24	/* EPIC 8 */
-#else
-#define SANDPOINT_IDE_INT0		14	/* 8259 Test */
-#define SANDPOINT_IDE_INT1		15	/* 8259 Test */
 #endif
 
 /*
diff --git a/arch/ppc/platforms/spruce.c b/arch/ppc/platforms/spruce.c
index f4de50b..a344134 100644
--- a/arch/ppc/platforms/spruce.c
+++ b/arch/ppc/platforms/spruce.c
@@ -22,7 +22,6 @@
 #include <linux/console.h>
 #include <linux/delay.h>
 #include <linux/seq_file.h>
-#include <linux/ide.h>
 #include <linux/root_dev.h>
 #include <linux/serial.h>
 #include <linux/tty.h>
diff --git a/arch/ppc/syslib/m8xx_setup.c b/arch/ppc/syslib/m8xx_setup.c
index 9caf850..19749e9 100644
--- a/arch/ppc/syslib/m8xx_setup.c
+++ b/arch/ppc/syslib/m8xx_setup.c
@@ -87,8 +87,6 @@
 
 unsigned char __res[sizeof(bd_t)];
 
-extern void m8xx_ide_init(void);
-
 extern unsigned long find_available_memory(void);
 extern void m8xx_cpm_reset(void);
 extern void m8xx_wdt_handler_install(bd_t *bp);
@@ -474,8 +472,4 @@
 
 	ppc_md.find_end_of_memory	= m8xx_find_end_of_memory;
 	ppc_md.setup_io_mappings	= m8xx_map_io;
-
-#if defined(CONFIG_BLK_DEV_MPC8xx_IDE)
-	m8xx_ide_init();
-#endif
 }
diff --git a/arch/ppc/syslib/ppc4xx_setup.c b/arch/ppc/syslib/ppc4xx_setup.c
index debe14c..353d746 100644
--- a/arch/ppc/syslib/ppc4xx_setup.c
+++ b/arch/ppc/syslib/ppc4xx_setup.c
@@ -24,7 +24,6 @@
 #include <linux/pci.h>
 #include <linux/rtc.h>
 #include <linux/console.h>
-#include <linux/ide.h>
 #include <linux/serial_reg.h>
 #include <linux/seq_file.h>
 
@@ -189,24 +188,6 @@
 	mtspr(SPRN_PIT, tb_ticks_per_jiffy);
 }
 
-/*
- * IDE stuff.
- * should be generic for every IDE PCI chipset
- */
-#if defined(CONFIG_PCI) && defined(CONFIG_IDE)
-static void
-ppc4xx_ide_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
-			   unsigned long ctrl_port, int *irq)
-{
-	int i;
-
-	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; ++i)
-		hw->io_ports[i] = data_port + i - IDE_DATA_OFFSET;
-
-	hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
-}
-#endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */
-
 TODC_ALLOC();
 
 /*
@@ -271,10 +252,6 @@
 #ifdef CONFIG_SERIAL_TEXT_DEBUG
 	ppc_md.progress = gen550_progress;
 #endif
-
-#if defined(CONFIG_PCI) && defined(CONFIG_IDE)
-	ppc_ide_md.ide_init_hwif = ppc4xx_ide_init_hwif_ports;
-#endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */
 }
 
 /* Called from machine_check_exception */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1831833..f6a68e1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -3,6 +3,10 @@
 # see Documentation/kbuild/kconfig-language.txt.
 #
 
+config SCHED_MC
+	def_bool y
+	depends on SMP
+
 config MMU
 	def_bool y
 
@@ -39,6 +43,9 @@
 config GENERIC_TIME
 	def_bool y
 
+config GENERIC_CLOCKEVENTS
+	def_bool y
+
 config GENERIC_BUG
 	bool
 	depends on BUG
@@ -69,6 +76,8 @@
 
 comment "Processor type and features"
 
+source "kernel/time/Kconfig"
+
 config 64BIT
 	bool "64 bit kernel"
 	help
@@ -301,10 +310,7 @@
 	tristate "QDIO support"
 	---help---
 	  This driver provides the Queued Direct I/O base support for
-	  IBM mainframes.
-
-	  For details please refer to the documentation provided by IBM at
-	  <http://www10.software.ibm.com/developerworks/opensource/linux390>
+	  IBM System z.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called qdio.
@@ -486,25 +492,6 @@
 
 source kernel/Kconfig.hz
 
-config NO_IDLE_HZ
-	bool "No HZ timer ticks in idle"
-	help
-	  Switches the regular HZ timer off when the system is going idle.
-	  This helps z/VM to detect that the Linux system is idle. VM can
-	  then "swap-out" this guest which reduces memory usage. It also
-	  reduces the overhead of idle systems.
-
-	  The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer.
-	  hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ
-	  timer is active.
-
-config NO_IDLE_HZ_INIT
-	bool "HZ timer in idle off by default"
-	depends on NO_IDLE_HZ
-	help
-	  The HZ timer is switched off in idle by default. That means the
-	  HZ timer is already disabled at boot time.
-
 config S390_HYPFS_FS
 	bool "s390 hypervisor file system support"
 	select SYS_HYPERVISOR
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index a3f67f8..e33f32b 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -499,7 +499,7 @@
 	}
 };
 
-static int __init aes_init(void)
+static int __init aes_s390_init(void)
 {
 	int ret;
 
@@ -542,15 +542,15 @@
 	goto out;
 }
 
-static void __exit aes_fini(void)
+static void __exit aes_s390_fini(void)
 {
 	crypto_unregister_alg(&cbc_aes_alg);
 	crypto_unregister_alg(&ecb_aes_alg);
 	crypto_unregister_alg(&aes_alg);
 }
 
-module_init(aes_init);
-module_exit(aes_fini);
+module_init(aes_s390_init);
+module_exit(aes_s390_fini);
 
 MODULE_ALIAS("aes");
 
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index ea22707..4aba83b 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -550,7 +550,7 @@
 	}
 };
 
-static int init(void)
+static int des_s390_init(void)
 {
 	int ret = 0;
 
@@ -612,7 +612,7 @@
 	goto out;
 }
 
-static void __exit fini(void)
+static void __exit des_s390_fini(void)
 {
 	crypto_unregister_alg(&cbc_des3_192_alg);
 	crypto_unregister_alg(&ecb_des3_192_alg);
@@ -625,8 +625,8 @@
 	crypto_unregister_alg(&des_alg);
 }
 
-module_init(init);
-module_exit(fini);
+module_init(des_s390_init);
+module_exit(des_s390_fini);
 
 MODULE_ALIAS("des");
 MODULE_ALIAS("des3_ede");
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 5a834f6..9cf9eca 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -137,7 +137,7 @@
 	.dia_final	=	sha1_final } }
 };
 
-static int __init init(void)
+static int __init sha1_s390_init(void)
 {
 	if (!crypt_s390_func_available(KIMD_SHA_1))
 		return -EOPNOTSUPP;
@@ -145,13 +145,13 @@
 	return crypto_register_alg(&alg);
 }
 
-static void __exit fini(void)
+static void __exit sha1_s390_fini(void)
 {
 	crypto_unregister_alg(&alg);
 }
 
-module_init(init);
-module_exit(fini);
+module_init(sha1_s390_init);
+module_exit(sha1_s390_fini);
 
 MODULE_ALIAS("sha1");
 
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index ccf8633..2a3d756 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -133,7 +133,7 @@
 	.dia_final	=	sha256_final } }
 };
 
-static int init(void)
+static int sha256_s390_init(void)
 {
 	if (!crypt_s390_func_available(KIMD_SHA_256))
 		return -EOPNOTSUPP;
@@ -141,13 +141,13 @@
 	return crypto_register_alg(&alg);
 }
 
-static void __exit fini(void)
+static void __exit sha256_s390_fini(void)
 {
 	crypto_unregister_alg(&alg);
 }
 
-module_init(init);
-module_exit(fini);
+module_init(sha256_s390_init);
+module_exit(sha256_s390_fini);
 
 MODULE_ALIAS("sha256");
 
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index cb93bf2..a72f208 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -3,6 +3,7 @@
 # Linux kernel version: 2.6.25-rc4
 # Wed Mar  5 11:22:59 2008
 #
+CONFIG_SCHED_MC=y
 CONFIG_MMU=y
 CONFIG_ZONE_DMA=y
 CONFIG_LOCKDEP_SUPPORT=y
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4d3e383..77051cd 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -11,7 +11,7 @@
 
 obj-y	:=  bitmap.o traps.o time.o process.o base.o early.o \
             setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
-	    semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o
+	    s390_ext.o debug.o irq.o ipl.o dis.o diag.o
 
 obj-y	+= $(if $(CONFIG_64BIT),entry64.o,entry.o)
 obj-y	+= $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -19,7 +19,7 @@
 extra-y				+= head.o init_task.o vmlinux.lds
 
 obj-$(CONFIG_MODULES)		+= s390_ksyms.o module.o
-obj-$(CONFIG_SMP)		+= smp.o
+obj-$(CONFIG_SMP)		+= smp.o topology.o
 
 obj-$(CONFIG_AUDIT)		+= audit.o
 compat-obj-$(CONFIG_AUDIT)	+= compat_audit.o
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index e89f8c0..20723a0 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -162,4 +162,77 @@
 	compat_sigset_t		uc_sigmask;	/* mask last for extensibility */
 };
 
+struct __sysctl_args32;
+struct stat64_emu31;
+struct mmap_arg_struct_emu31;
+struct fadvise64_64_args;
+struct old_sigaction32;
+struct old_sigaction32;
+
+long sys32_chown16(const char __user * filename, u16 user, u16 group);
+long sys32_lchown16(const char __user * filename, u16 user, u16 group);
+long sys32_fchown16(unsigned int fd, u16 user, u16 group);
+long sys32_setregid16(u16 rgid, u16 egid);
+long sys32_setgid16(u16 gid);
+long sys32_setreuid16(u16 ruid, u16 euid);
+long sys32_setuid16(u16 uid);
+long sys32_setresuid16(u16 ruid, u16 euid, u16 suid);
+long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
+long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid);
+long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
+long sys32_setfsuid16(u16 uid);
+long sys32_setfsgid16(u16 gid);
+long sys32_getgroups16(int gidsetsize, u16 __user *grouplist);
+long sys32_setgroups16(int gidsetsize, u16 __user *grouplist);
+long sys32_getuid16(void);
+long sys32_geteuid16(void);
+long sys32_getgid16(void);
+long sys32_getegid16(void);
+long sys32_ipc(u32 call, int first, int second, int third, u32 ptr);
+long sys32_truncate64(const char __user * path, unsigned long high,
+		      unsigned long low);
+long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low);
+long sys32_sched_rr_get_interval(compat_pid_t pid,
+				 struct compat_timespec __user *interval);
+long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
+			  compat_sigset_t __user *oset, size_t sigsetsize);
+long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize);
+long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo);
+long sys32_execve(void);
+long sys32_init_module(void __user *umod, unsigned long len,
+		       const char __user *uargs);
+long sys32_delete_module(const char __user *name_user, unsigned int flags);
+long sys32_gettimeofday(struct compat_timeval __user *tv,
+			struct timezone __user *tz);
+long sys32_settimeofday(struct compat_timeval __user *tv,
+			struct timezone __user *tz);
+long sys32_pause(void);
+long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count,
+		   u32 poshi, u32 poslo);
+long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
+		    size_t count, u32 poshi, u32 poslo);
+compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count);
+long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
+		    size_t count);
+long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset,
+		      s32 count);
+long sys32_sysctl(struct __sysctl_args32 __user *args);
+long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf);
+long sys32_lstat64(char __user * filename,
+		   struct stat64_emu31 __user * statbuf);
+long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf);
+long sys32_fstatat64(unsigned int dfd, char __user *filename,
+		     struct stat64_emu31 __user* statbuf, int flag);
+unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg);
+long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg);
+long sys32_read(unsigned int fd, char __user * buf, size_t count);
+long sys32_write(unsigned int fd, char __user * buf, size_t count);
+long sys32_clone(void);
+long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise);
+long sys32_fadvise64_64(struct fadvise64_64_args __user *args);
+long sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
+		     struct old_sigaction32 __user *oact);
+long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
+			struct sigaction32 __user *oact, size_t sigsetsize);
+long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss);
 #endif /* _ASM_S390X_S390_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index a5692c4..c7f02e7 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -29,6 +29,7 @@
 #include <asm/lowcore.h>
 #include "compat_linux.h"
 #include "compat_ptrace.h"
+#include "entry.h"
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
@@ -428,6 +429,10 @@
 	/* Default to using normal stack */
 	sp = (unsigned long) A(regs->gprs[15]);
 
+	/* Overflow on alternate signal stack gives SIGSEGV. */
+	if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
+		return (void __user *) -1UL;
+
 	/* This is the X/Open sanctioned signal stack switching.  */
 	if (ka->sa.sa_flags & SA_ONSTACK) {
 		if (! sas_ss_flags(sp))
@@ -461,6 +466,9 @@
 	if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
 		goto give_sigsegv;
 
+	if (frame == (void __user *) -1UL)
+		goto give_sigsegv;
+
 	if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
 		goto give_sigsegv;
 
@@ -514,6 +522,9 @@
 	if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
 		goto give_sigsegv;
 
+	if (frame == (void __user *) -1UL)
+		goto give_sigsegv;
+
 	if (copy_siginfo_to_user32(&frame->info, info))
 		goto give_sigsegv;
 
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 1b2f5ce..1e7d4ac 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -73,7 +73,7 @@
 static int debug_open(struct inode *inode, struct file *file);
 static int debug_close(struct inode *inode, struct file *file);
 static debug_info_t*  debug_info_create(char *name, int pages_per_area,
-			int nr_areas, int buf_size);
+			int nr_areas, int buf_size, mode_t mode);
 static void debug_info_get(debug_info_t *);
 static void debug_info_put(debug_info_t *);
 static int debug_prolog_level_fn(debug_info_t * id,
@@ -157,7 +157,7 @@
 };
 
 /* used by dump analysis tools to determine version of debug feature */
-unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
+static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
 
 /* static globals */
 
@@ -327,7 +327,8 @@
  */
 
 static debug_info_t*
-debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size)
+debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size,
+		  mode_t mode)
 {
 	debug_info_t* rc;
 
@@ -336,6 +337,8 @@
         if(!rc) 
 		goto out;
 
+	rc->mode = mode & ~S_IFMT;
+
 	/* create root directory */
         rc->debugfs_root_entry = debugfs_create_dir(rc->name,
 					debug_debugfs_root_entry);
@@ -676,23 +679,30 @@
 }
 
 /*
- * debug_register:
- * - creates and initializes debug area for the caller
- * - returns handle for debug area
+ * debug_register_mode:
+ * - Creates and initializes debug area for the caller
+ *   The mode parameter allows to specify access rights for the s390dbf files
+ * - Returns handle for debug area
  */
 
-debug_info_t*
-debug_register (char *name, int pages_per_area, int nr_areas, int buf_size)
+debug_info_t *debug_register_mode(char *name, int pages_per_area, int nr_areas,
+				  int buf_size, mode_t mode, uid_t uid,
+				  gid_t gid)
 {
 	debug_info_t *rc = NULL;
 
+	/* Since debugfs currently does not support uid/gid other than root, */
+	/* we do not allow gid/uid != 0 until we get support for that. */
+	if ((uid != 0) || (gid != 0))
+		printk(KERN_WARNING "debug: Warning - Currently only uid/gid "
+		       "= 0 are supported. Using root as owner now!");
 	if (!initialized)
 		BUG();
 	mutex_lock(&debug_mutex);
 
         /* create new debug_info */
 
-	rc = debug_info_create(name, pages_per_area, nr_areas, buf_size);
+	rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
 	if(!rc) 
 		goto out;
 	debug_register_view(rc, &debug_level_view);
@@ -705,6 +715,20 @@
 	mutex_unlock(&debug_mutex);
 	return rc;
 }
+EXPORT_SYMBOL(debug_register_mode);
+
+/*
+ * debug_register:
+ * - creates and initializes debug area for the caller
+ * - returns handle for debug area
+ */
+
+debug_info_t *debug_register(char *name, int pages_per_area, int nr_areas,
+			     int buf_size)
+{
+	return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
+				   S_IRUSR | S_IWUSR, 0, 0);
+}
 
 /*
  * debug_unregister:
@@ -1073,15 +1097,16 @@
 	int rc = 0;
 	int i;
 	unsigned long flags;
-	mode_t mode = S_IFREG;
+	mode_t mode;
 	struct dentry *pde;
 
 	if (!id)
 		goto out;
-	if (view->prolog_proc || view->format_proc || view->header_proc)
-		mode |= S_IRUSR;
-	if (view->input_proc)
-		mode |= S_IWUSR;
+	mode = (id->mode | S_IFREG) & ~S_IXUGO;
+	if (!(view->prolog_proc || view->format_proc || view->header_proc))
+		mode &= ~(S_IRUSR | S_IRGRP | S_IROTH);
+	if (!view->input_proc)
+		mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
 	pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
 				id , &debug_file_ops);
 	if (!pde){
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 01832c4..540a67f 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -21,6 +21,7 @@
 #include <asm/setup.h>
 #include <asm/cpcmd.h>
 #include <asm/sclp.h>
+#include "entry.h"
 
 /*
  * Create a Kernel NSS if the SAVESYS= parameter is defined
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
new file mode 100644
index 0000000..6b18963
--- /dev/null
+++ b/arch/s390/kernel/entry.h
@@ -0,0 +1,60 @@
+#ifndef _ENTRY_H
+#define _ENTRY_H
+
+#include <linux/types.h>
+#include <linux/signal.h>
+#include <asm/ptrace.h>
+
+typedef void pgm_check_handler_t(struct pt_regs *, long);
+extern pgm_check_handler_t *pgm_check_table[128];
+pgm_check_handler_t do_protection_exception;
+pgm_check_handler_t do_dat_exception;
+
+extern int sysctl_userprocess_debug;
+
+void do_single_step(struct pt_regs *regs);
+void syscall_trace(struct pt_regs *regs, int entryexit);
+void kernel_stack_overflow(struct pt_regs * regs);
+void do_signal(struct pt_regs *regs);
+int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
+
+void do_extint(struct pt_regs *regs, unsigned short code);
+int __cpuinit start_secondary(void *cpuvoid);
+void __init startup_init(void);
+void die(const char * str, struct pt_regs * regs, long err);
+
+struct new_utsname;
+struct mmap_arg_struct;
+struct fadvise64_64_args;
+struct old_sigaction;
+struct sel_arg_struct;
+
+long sys_pipe(unsigned long __user *fildes);
+long sys_mmap2(struct mmap_arg_struct __user  *arg);
+long old_mmap(struct mmap_arg_struct __user *arg);
+long sys_ipc(uint call, int first, unsigned long second,
+	     unsigned long third, void __user *ptr);
+long s390x_newuname(struct new_utsname __user *name);
+long s390x_personality(unsigned long personality);
+long s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
+		    size_t len, int advice);
+long s390_fadvise64_64(struct fadvise64_64_args __user *args);
+long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low);
+long sys_fork(void);
+long sys_clone(void);
+long sys_vfork(void);
+void execve_tail(void);
+long sys_execve(void);
+int sys_sigsuspend(int history0, int history1, old_sigset_t mask);
+long sys_sigaction(int sig, const struct old_sigaction __user *act,
+		   struct old_sigaction __user *oact);
+long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss);
+long sys_sigreturn(void);
+long sys_rt_sigreturn(void);
+long sys32_sigreturn(void);
+long sys32_rt_sigreturn(void);
+long old_select(struct sel_arg_struct __user *arg);
+long sys_ptrace(long request, long pid, long addr, long data);
+
+#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index efde6e1..cd959c0 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -475,6 +475,7 @@
 pgm_no_vtime:
 #endif
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	mvc	SP_ARGS(8,%r15),__LC_LAST_BREAK
 	TRACE_IRQS_OFF
 	lgf	%r3,__LC_PGM_ILC	# load program interruption code
 	lghi	%r8,0x7f
@@ -847,6 +848,7 @@
 	je	0f
 	la	%r1,__LC_SAVE_AREA+32
 0:	mvc	SP_R12(32,%r15),0(%r1)	# move %r12-%r15 to stack
+	mvc	SP_ARGS(8,%r15),__LC_LAST_BREAK
 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	jg	kernel_stack_overflow
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 375232c..5325424 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -655,7 +655,7 @@
 
 static struct kset *reipl_kset;
 
-void reipl_run(struct shutdown_trigger *trigger)
+static void reipl_run(struct shutdown_trigger *trigger)
 {
 	struct ccw_dev_id devid;
 	static char buf[100];
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index c5549a2..ed04d13 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -360,7 +360,7 @@
  *	- When the probed function returns, this probe
  *		causes the handlers to fire
  */
-void kretprobe_trampoline_holder(void)
+static void __used kretprobe_trampoline_holder(void)
 {
 	asm volatile(".global kretprobe_trampoline\n"
 		     "kretprobe_trampoline: bcr 0,0\n");
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ce20315..c1aff19 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -36,6 +36,8 @@
 #include <linux/module.h>
 #include <linux/notifier.h>
 #include <linux/utsname.h>
+#include <linux/tick.h>
+#include <linux/elfcore.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -44,6 +46,7 @@
 #include <asm/irq.h>
 #include <asm/timer.h>
 #include <asm/cpu.h>
+#include "entry.h"
 
 asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 
@@ -76,6 +79,7 @@
  * Need to know about CPUs going idle?
  */
 static ATOMIC_NOTIFIER_HEAD(idle_chain);
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 
 int register_idle_notifier(struct notifier_block *nb)
 {
@@ -89,9 +93,33 @@
 }
 EXPORT_SYMBOL(unregister_idle_notifier);
 
-void do_monitor_call(struct pt_regs *regs, long interruption_code)
+static int s390_idle_enter(void)
 {
-#ifdef CONFIG_SMP
+	struct s390_idle_data *idle;
+	int nr_calls = 0;
+	void *hcpu;
+	int rc;
+
+	hcpu = (void *)(long)smp_processor_id();
+	rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
+					  &nr_calls);
+	if (rc == NOTIFY_BAD) {
+		nr_calls--;
+		__atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
+					     hcpu, nr_calls, NULL);
+		return rc;
+	}
+	idle = &__get_cpu_var(s390_idle);
+	spin_lock(&idle->lock);
+	idle->idle_count++;
+	idle->in_idle = 1;
+	idle->idle_enter = get_clock();
+	spin_unlock(&idle->lock);
+	return NOTIFY_OK;
+}
+
+void s390_idle_leave(void)
+{
 	struct s390_idle_data *idle;
 
 	idle = &__get_cpu_var(s390_idle);
@@ -99,10 +127,6 @@
 	idle->idle_time += get_clock() - idle->idle_enter;
 	idle->in_idle = 0;
 	spin_unlock(&idle->lock);
-#endif
-	/* disable monitor call class 0 */
-	__ctl_clear_bit(8, 15);
-
 	atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
 				   (void *)(long) smp_processor_id());
 }
@@ -113,61 +137,30 @@
  */
 static void default_idle(void)
 {
-	int cpu, rc;
-	int nr_calls = 0;
-	void *hcpu;
-#ifdef CONFIG_SMP
-	struct s390_idle_data *idle;
-#endif
-
 	/* CPU is going idle. */
-	cpu = smp_processor_id();
-	hcpu = (void *)(long)cpu;
 	local_irq_disable();
 	if (need_resched()) {
 		local_irq_enable();
 		return;
 	}
-
-	rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
-					  &nr_calls);
-	if (rc == NOTIFY_BAD) {
-		nr_calls--;
-		__atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
-					     hcpu, nr_calls, NULL);
+	if (s390_idle_enter() == NOTIFY_BAD) {
 		local_irq_enable();
 		return;
 	}
-
-	/* enable monitor call class 0 */
-	__ctl_set_bit(8, 15);
-
 #ifdef CONFIG_HOTPLUG_CPU
-	if (cpu_is_offline(cpu)) {
+	if (cpu_is_offline(smp_processor_id())) {
 		preempt_enable_no_resched();
 		cpu_die();
 	}
 #endif
-
 	local_mcck_disable();
 	if (test_thread_flag(TIF_MCCK_PENDING)) {
 		local_mcck_enable();
-		/* disable monitor call class 0 */
-		__ctl_clear_bit(8, 15);
-		atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
-					   hcpu);
+		s390_idle_leave();
 		local_irq_enable();
 		s390_handle_mcck();
 		return;
 	}
-#ifdef CONFIG_SMP
-	idle = &__get_cpu_var(s390_idle);
-	spin_lock(&idle->lock);
-	idle->idle_count++;
-	idle->in_idle = 1;
-	idle->idle_enter = get_clock();
-	spin_unlock(&idle->lock);
-#endif
 	trace_hardirqs_on();
 	/* Wait for external, I/O or machine check interrupt. */
 	__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
@@ -177,9 +170,10 @@
 void cpu_idle(void)
 {
 	for (;;) {
+		tick_nohz_stop_sched_tick();
 		while (!need_resched())
 			default_idle();
-
+		tick_nohz_restart_sched_tick();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
@@ -201,6 +195,7 @@
 	/* Show stack backtrace if pt_regs is from kernel mode */
 	if (!(regs->psw.mask & PSW_MASK_PSTATE))
 		show_trace(NULL, (unsigned long *) regs->gprs[15]);
+	show_last_breaking_event(regs);
 }
 
 extern void kernel_thread_starter(void);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 6e036ba..58a0642 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -41,6 +41,7 @@
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
+#include "entry.h"
 
 #ifdef CONFIG_COMPAT
 #include "compat_ptrace.h"
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index acf93db..e019b41 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -13,11 +13,12 @@
 #include <linux/errno.h>
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
-
+#include <asm/cpu.h>
 #include <asm/lowcore.h>
 #include <asm/s390_ext.h>
 #include <asm/irq_regs.h>
 #include <asm/irq.h>
+#include "entry.h"
 
 /*
  * ext_int_hash[index] is the start of the list for all external interrupts
@@ -119,13 +120,10 @@
 
 	old_regs = set_irq_regs(regs);
 	irq_enter();
-	asm volatile ("mc 0,0");
-	if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
-		/**
-		 * Make sure that the i/o interrupt did not "overtake"
-		 * the last HZ timer interrupt.
-		 */
-		account_ticks(S390_lowcore.int_clock);
+	s390_idle_check();
+	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+		/* Serve timer interrupts first. */
+		clock_comparator_work();
 	kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
         index = ext_hash(code);
 	for (p = ext_int_hash[index]; p; p = p->next) {
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 7234c73..48238a1 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -27,13 +27,6 @@
 EXPORT_SYMBOL(_sb_findmap);
 
 /*
- * semaphore ops
- */
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-
-/*
  * binfmt_elf loader 
  */
 extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c
deleted file mode 100644
index 191303f..0000000
--- a/arch/s390/kernel/semaphore.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- *  linux/arch/s390/kernel/semaphore.c
- *
- *  S390 version
- *    Copyright (C) 1998-2000 IBM Corporation
- *    Author(s): Martin Schwidefsky
- *
- *  Derived from "linux/arch/i386/kernel/semaphore.c
- *    Copyright (C) 1999, Linus Torvalds
- *
- */
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-#include <asm/semaphore.h>
-
-/*
- * Atomically update sem->count. Equivalent to:
- *   old_val = sem->count.counter;
- *   new_val = ((old_val >= 0) ? old_val : 0) + incr;
- *   sem->count.counter = new_val;
- *   return old_val;
- */
-static inline int __sem_update_count(struct semaphore *sem, int incr)
-{
-	int old_val, new_val;
-
-	asm volatile(
-		"	l	%0,0(%3)\n"
-		"0:	ltr	%1,%0\n"
-		"	jhe	1f\n"
-		"	lhi	%1,0\n"
-		"1:	ar	%1,%4\n"
-		"	cs	%0,%1,0(%3)\n"
-		"	jl	0b\n"
-		: "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
-		: "a" (&sem->count), "d" (incr), "m" (sem->count)
-		: "cc");
-	return old_val;
-}
-
-/*
- * The inline function up() incremented count but the result
- * was <= 0. This indicates that some process is waiting on
- * the semaphore. The semaphore is free and we'll wake the
- * first sleeping process, so we set count to 1 unless some
- * other cpu has called up in the meantime in which case
- * we just increment count by 1.
- */
-void __up(struct semaphore *sem)
-{
-	__sem_update_count(sem, 1);
-	wake_up(&sem->wait);
-}
-
-/*
- * The inline function down() decremented count and the result
- * was < 0. The wait loop will atomically test and update the
- * semaphore counter following the rules:
- *   count > 0: decrement count, wake up queue and exit.
- *   count <= 0: set count to -1, go to sleep.
- */
-void __sched __down(struct semaphore * sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	add_wait_queue_exclusive(&sem->wait, &wait);
-	while (__sem_update_count(sem, -1) <= 0) {
-		schedule();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	}
-	remove_wait_queue(&sem->wait, &wait);
-	__set_task_state(tsk, TASK_RUNNING);
-	wake_up(&sem->wait);
-}
-
-/*
- * Same as __down() with an additional test for signals.
- * If a signal is pending the count is updated as follows:
- *   count > 0: wake up queue and exit.
- *   count <= 0: set count to 0, wake up queue and exit.
- */
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	__set_task_state(tsk, TASK_INTERRUPTIBLE);
-	add_wait_queue_exclusive(&sem->wait, &wait);
-	while (__sem_update_count(sem, -1) <= 0) {
-		if (signal_pending(current)) {
-			__sem_update_count(sem, 0);
-			retval = -EINTR;
-			break;
-		}
-		schedule();
-		set_task_state(tsk, TASK_INTERRUPTIBLE);
-	}
-	remove_wait_queue(&sem->wait, &wait);
-	__set_task_state(tsk, TASK_RUNNING);
-	wake_up(&sem->wait);
-	return retval;
-}
-
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 290e504..7141147 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -39,6 +39,7 @@
 #include <linux/pfn.h>
 #include <linux/ctype.h>
 #include <linux/reboot.h>
+#include <linux/topology.h>
 
 #include <asm/ipl.h>
 #include <asm/uaccess.h>
@@ -427,7 +428,7 @@
 	lc->io_new_psw.mask = psw_kernel_bits;
 	lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
 	lc->ipl_device = S390_lowcore.ipl_device;
-	lc->jiffy_timer = -1LL;
+	lc->clock_comparator = -1ULL;
 	lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
 	lc->async_stack = (unsigned long)
 		__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
@@ -687,7 +688,7 @@
 	return S390_lowcore.stfl_fac_list;
 }
 
-static __init int stfle(unsigned long long *list, int doublewords)
+static int __init __stfle(unsigned long long *list, int doublewords)
 {
 	typedef struct { unsigned long long _[doublewords]; } addrtype;
 	register unsigned long __nr asm("0") = doublewords - 1;
@@ -697,6 +698,13 @@
 	return __nr + 1;
 }
 
+int __init stfle(unsigned long long *list, int doublewords)
+{
+	if (!(stfl() & (1UL << 24)))
+		return -EOPNOTSUPP;
+	return __stfle(list, doublewords);
+}
+
 /*
  * Setup hardware capabilities.
  */
@@ -741,7 +749,7 @@
 	 *   HWCAP_S390_DFP bit 6.
 	 */
 	if ((elf_hwcap & (1UL << 2)) &&
-	    stfle(&facility_list_extended, 1) > 0) {
+	    __stfle(&facility_list_extended, 1) > 0) {
 		if (facility_list_extended & (1ULL << (64 - 43)))
 			elf_hwcap |= 1UL << 6;
 	}
@@ -823,6 +831,7 @@
 
         cpu_init();
         __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
+	s390_init_cpu_topology();
 
 	/*
 	 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 4449bf3..b976820 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -27,6 +27,7 @@
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/lowcore.h>
+#include "entry.h"
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
@@ -235,6 +236,10 @@
 	/* Default to using normal stack */
 	sp = regs->gprs[15];
 
+	/* Overflow on alternate signal stack gives SIGSEGV. */
+	if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
+		return (void __user *) -1UL;
+
 	/* This is the X/Open sanctioned signal stack switching.  */
 	if (ka->sa.sa_flags & SA_ONSTACK) {
 		if (! sas_ss_flags(sp))
@@ -270,6 +275,9 @@
 	if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
 		goto give_sigsegv;
 
+	if (frame == (void __user *) -1UL)
+		goto give_sigsegv;
+
 	if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
 		goto give_sigsegv;
 
@@ -327,6 +335,9 @@
 	if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
 		goto give_sigsegv;
 
+	if (frame == (void __user *) -1UL)
+		goto give_sigsegv;
+
 	if (copy_siginfo_to_user(&frame->info, info))
 		goto give_sigsegv;
 
@@ -474,11 +485,6 @@
 		int ret;
 #ifdef CONFIG_COMPAT
 		if (test_thread_flag(TIF_31BIT)) {
-			extern int handle_signal32(unsigned long sig,
-						   struct k_sigaction *ka,
-						   siginfo_t *info,
-						   sigset_t *oldset,
-						   struct pt_regs *regs);
 			ret = handle_signal32(signr, &ka, &info, oldset, regs);
 	        }
 		else
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8f894d3..0dfa988 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -44,6 +44,7 @@
 #include <asm/lowcore.h>
 #include <asm/sclp.h>
 #include <asm/cpu.h>
+#include "entry.h"
 
 /*
  * An array with a pointer the lowcore of every CPU.
@@ -67,13 +68,12 @@
 	CPU_STATE_CONFIGURED,
 };
 
-#ifdef CONFIG_HOTPLUG_CPU
-static DEFINE_MUTEX(smp_cpu_state_mutex);
-#endif
+DEFINE_MUTEX(smp_cpu_state_mutex);
+int smp_cpu_polarization[NR_CPUS];
 static int smp_cpu_state[NR_CPUS];
+static int cpu_management;
 
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 
 static void smp_ext_bitcall(int, ec_bit_sig);
 
@@ -298,7 +298,7 @@
 /*
  * this function sends a 'purge tlb' signal to another CPU.
  */
-void smp_ptlb_callback(void *info)
+static void smp_ptlb_callback(void *info)
 {
 	__tlb_flush_local();
 }
@@ -456,6 +456,7 @@
 		if (cpu_known(cpu_id))
 			continue;
 		__cpu_logical_map[logical_cpu] = cpu_id;
+		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
 		if (!cpu_stopped(logical_cpu))
 			continue;
 		cpu_set(logical_cpu, cpu_present_map);
@@ -489,6 +490,7 @@
 		if (cpu_known(cpu_id))
 			continue;
 		__cpu_logical_map[logical_cpu] = cpu_id;
+		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
 		cpu_set(logical_cpu, cpu_present_map);
 		if (cpu >= info->configured)
 			smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
@@ -846,6 +848,7 @@
 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
 	current_set[0] = current;
 	smp_cpu_state[0] = CPU_STATE_CONFIGURED;
+	smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
 	spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
 }
 
@@ -897,15 +900,19 @@
 	case 0:
 		if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
 			rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
-			if (!rc)
+			if (!rc) {
 				smp_cpu_state[cpu] = CPU_STATE_STANDBY;
+				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+			}
 		}
 		break;
 	case 1:
 		if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
 			rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
-			if (!rc)
+			if (!rc) {
 				smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
+				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+			}
 		}
 		break;
 	default:
@@ -919,6 +926,34 @@
 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
 #endif /* CONFIG_HOTPLUG_CPU */
 
+static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf)
+{
+	int cpu = dev->id;
+	ssize_t count;
+
+	mutex_lock(&smp_cpu_state_mutex);
+	switch (smp_cpu_polarization[cpu]) {
+	case POLARIZATION_HRZ:
+		count = sprintf(buf, "horizontal\n");
+		break;
+	case POLARIZATION_VL:
+		count = sprintf(buf, "vertical:low\n");
+		break;
+	case POLARIZATION_VM:
+		count = sprintf(buf, "vertical:medium\n");
+		break;
+	case POLARIZATION_VH:
+		count = sprintf(buf, "vertical:high\n");
+		break;
+	default:
+		count = sprintf(buf, "unknown\n");
+		break;
+	}
+	mutex_unlock(&smp_cpu_state_mutex);
+	return count;
+}
+static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
+
 static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
 {
 	return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
@@ -931,6 +966,7 @@
 	&attr_configure.attr,
 #endif
 	&attr_address.attr,
+	&attr_polarization.attr,
 	NULL,
 };
 
@@ -1075,11 +1111,48 @@
 out:
 	put_online_cpus();
 	mutex_unlock(&smp_cpu_state_mutex);
+	if (!cpus_empty(newcpus))
+		topology_schedule_update();
 	return rc ? rc : count;
 }
 static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
 #endif /* CONFIG_HOTPLUG_CPU */
 
+static ssize_t dispatching_show(struct sys_device *dev, char *buf)
+{
+	ssize_t count;
+
+	mutex_lock(&smp_cpu_state_mutex);
+	count = sprintf(buf, "%d\n", cpu_management);
+	mutex_unlock(&smp_cpu_state_mutex);
+	return count;
+}
+
+static ssize_t dispatching_store(struct sys_device *dev, const char *buf,
+				 size_t count)
+{
+	int val, rc;
+	char delim;
+
+	if (sscanf(buf, "%d %c", &val, &delim) != 1)
+		return -EINVAL;
+	if (val != 0 && val != 1)
+		return -EINVAL;
+	rc = 0;
+	mutex_lock(&smp_cpu_state_mutex);
+	get_online_cpus();
+	if (cpu_management == val)
+		goto out;
+	rc = topology_set_cpu_management(val);
+	if (!rc)
+		cpu_management = val;
+out:
+	put_online_cpus();
+	mutex_unlock(&smp_cpu_state_mutex);
+	return rc ? rc : count;
+}
+static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store);
+
 static int __init topology_init(void)
 {
 	int cpu;
@@ -1093,6 +1166,10 @@
 	if (rc)
 		return rc;
 #endif
+	rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+			       &attr_dispatching.attr);
+	if (rc)
+		return rc;
 	for_each_present_cpu(cpu) {
 		rc = smp_add_present_cpu(cpu);
 		if (rc)
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index fefee99..988d0d6 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -29,8 +29,8 @@
 #include <linux/personality.h>
 #include <linux/unistd.h>
 #include <linux/ipc.h>
-
 #include <asm/uaccess.h>
+#include "entry.h"
 
 /*
  * sys_pipe() is the normal C calling standard for creating
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index cb232c1..7aec676 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -30,7 +30,7 @@
 #include <linux/timex.h>
 #include <linux/notifier.h>
 #include <linux/clocksource.h>
-
+#include <linux/clockchips.h>
 #include <asm/uaccess.h>
 #include <asm/delay.h>
 #include <asm/s390_ext.h>
@@ -39,6 +39,7 @@
 #include <asm/irq_regs.h>
 #include <asm/timer.h>
 #include <asm/etr.h>
+#include <asm/cio.h>
 
 /* change this if you have some constant time drift */
 #define USECS_PER_JIFFY     ((unsigned long) 1000000/HZ)
@@ -57,16 +58,16 @@
 
 static ext_int_info_t ext_int_info_cc;
 static ext_int_info_t ext_int_etr_cc;
-static u64 init_timer_cc;
 static u64 jiffies_timer_cc;
-static u64 xtime_cc;
+
+static DEFINE_PER_CPU(struct clock_event_device, comparators);
 
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
 unsigned long long sched_clock(void)
 {
-	return ((get_clock() - jiffies_timer_cc) * 125) >> 9;
+	return ((get_clock_xt() - jiffies_timer_cc) * 125) >> 9;
 }
 
 /*
@@ -95,162 +96,40 @@
 #define s390_do_profile()	do { ; } while(0)
 #endif /* CONFIG_PROFILING */
 
-/*
- * Advance the per cpu tick counter up to the time given with the
- * "time" argument. The per cpu update consists of accounting
- * the virtual cpu time, calling update_process_times and calling
- * the profiling hook. If xtime is before time it is advanced as well.
- */
-void account_ticks(u64 time)
+void clock_comparator_work(void)
 {
-	__u32 ticks;
-	__u64 tmp;
+	struct clock_event_device *cd;
 
-	/* Calculate how many ticks have passed. */
-	if (time < S390_lowcore.jiffy_timer)
-		return;
-	tmp = time - S390_lowcore.jiffy_timer;
-	if (tmp >= 2*CLK_TICKS_PER_JIFFY) {  /* more than two ticks ? */
-		ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
-		S390_lowcore.jiffy_timer +=
-			CLK_TICKS_PER_JIFFY * (__u64) ticks;
-	} else if (tmp >= CLK_TICKS_PER_JIFFY) {
-		ticks = 2;
-		S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
-	} else {
-		ticks = 1;
-		S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
-	}
-
-#ifdef CONFIG_SMP
-	/*
-	 * Do not rely on the boot cpu to do the calls to do_timer.
-	 * Spread it over all cpus instead.
-	 */
-	write_seqlock(&xtime_lock);
-	if (S390_lowcore.jiffy_timer > xtime_cc) {
-		__u32 xticks;
-		tmp = S390_lowcore.jiffy_timer - xtime_cc;
-		if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
-			xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
-			xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
-		} else {
-			xticks = 1;
-			xtime_cc += CLK_TICKS_PER_JIFFY;
-		}
-		do_timer(xticks);
-	}
-	write_sequnlock(&xtime_lock);
-#else
-	do_timer(ticks);
-#endif
-
-	while (ticks--)
-		update_process_times(user_mode(get_irq_regs()));
-
+	S390_lowcore.clock_comparator = -1ULL;
+	set_clock_comparator(S390_lowcore.clock_comparator);
+	cd = &__get_cpu_var(comparators);
+	cd->event_handler(cd);
 	s390_do_profile();
 }
 
-#ifdef CONFIG_NO_IDLE_HZ
-
-#ifdef CONFIG_NO_IDLE_HZ_INIT
-int sysctl_hz_timer = 0;
-#else
-int sysctl_hz_timer = 1;
-#endif
-
 /*
- * Stop the HZ tick on the current CPU.
- * Only cpu_idle may call this function.
+ * Fixup the clock comparator.
  */
-static void stop_hz_timer(void)
+static void fixup_clock_comparator(unsigned long long delta)
 {
-	unsigned long flags;
-	unsigned long seq, next;
-	__u64 timer, todval;
-	int cpu = smp_processor_id();
-
-	if (sysctl_hz_timer != 0)
+	/* If nobody is waiting there's nothing to fix. */
+	if (S390_lowcore.clock_comparator == -1ULL)
 		return;
-
-	cpu_set(cpu, nohz_cpu_mask);
-
-	/*
-	 * Leave the clock comparator set up for the next timer
-	 * tick if either rcu or a softirq is pending.
-	 */
-	if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
-		cpu_clear(cpu, nohz_cpu_mask);
-		return;
-	}
-
-	/*
-	 * This cpu is going really idle. Set up the clock comparator
-	 * for the next event.
-	 */
-	next = next_timer_interrupt();
-	do {
-		seq = read_seqbegin_irqsave(&xtime_lock, flags);
-		timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
-	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-	todval = -1ULL;
-	/* Be careful about overflows. */
-	if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
-		timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
-		if (timer >= jiffies_timer_cc)
-			todval = timer;
-	}
-	set_clock_comparator(todval);
+	S390_lowcore.clock_comparator += delta;
+	set_clock_comparator(S390_lowcore.clock_comparator);
 }
 
-/*
- * Start the HZ tick on the current CPU.
- * Only cpu_idle may call this function.
- */
-static void start_hz_timer(void)
+static int s390_next_event(unsigned long delta,
+			   struct clock_event_device *evt)
 {
-	if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
-		return;
-	account_ticks(get_clock());
-	set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
-	cpu_clear(smp_processor_id(), nohz_cpu_mask);
+	S390_lowcore.clock_comparator = get_clock() + delta;
+	set_clock_comparator(S390_lowcore.clock_comparator);
+	return 0;
 }
 
-static int nohz_idle_notify(struct notifier_block *self,
-			    unsigned long action, void *hcpu)
+static void s390_set_mode(enum clock_event_mode mode,
+			  struct clock_event_device *evt)
 {
-	switch (action) {
-	case S390_CPU_IDLE:
-		stop_hz_timer();
-		break;
-	case S390_CPU_NOT_IDLE:
-		start_hz_timer();
-		break;
-	}
-	return NOTIFY_OK;
-}
-
-static struct notifier_block nohz_idle_nb = {
-	.notifier_call = nohz_idle_notify,
-};
-
-static void __init nohz_init(void)
-{
-	if (register_idle_notifier(&nohz_idle_nb))
-		panic("Couldn't register idle notifier");
-}
-
-#endif
-
-/*
- * Set up per cpu jiffy timer and set the clock comparator.
- */
-static void setup_jiffy_timer(void)
-{
-	/* Set up clock comparator to next jiffy. */
-	S390_lowcore.jiffy_timer =
-		jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
-	set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
 }
 
 /*
@@ -259,7 +138,26 @@
  */
 void init_cpu_timer(void)
 {
-	setup_jiffy_timer();
+	struct clock_event_device *cd;
+	int cpu;
+
+	S390_lowcore.clock_comparator = -1ULL;
+	set_clock_comparator(S390_lowcore.clock_comparator);
+
+	cpu = smp_processor_id();
+	cd = &per_cpu(comparators, cpu);
+	cd->name		= "comparator";
+	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
+	cd->mult		= 16777;
+	cd->shift		= 12;
+	cd->min_delta_ns	= 1;
+	cd->max_delta_ns	= LONG_MAX;
+	cd->rating		= 400;
+	cd->cpumask		= cpumask_of_cpu(cpu);
+	cd->set_next_event	= s390_next_event;
+	cd->set_mode		= s390_set_mode;
+
+	clockevents_register_device(cd);
 
 	/* Enable clock comparator timer interrupt. */
 	__ctl_set_bit(0,11);
@@ -270,8 +168,6 @@
 
 static void clock_comparator_interrupt(__u16 code)
 {
-	/* set clock comparator for next tick */
-	set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
 }
 
 static void etr_reset(void);
@@ -316,8 +212,9 @@
  */
 void __init time_init(void)
 {
+	u64 init_timer_cc;
+
 	init_timer_cc = reset_tod_clock();
-	xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
 	jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
 
 	/* set xtime */
@@ -342,10 +239,6 @@
 	/* Enable TOD clock interrupts on the boot cpu. */
 	init_cpu_timer();
 
-#ifdef CONFIG_NO_IDLE_HZ
-	nohz_init();
-#endif
-
 #ifdef CONFIG_VIRT_TIMER
 	vtime_init();
 #endif
@@ -699,53 +592,49 @@
 }
 
 /*
- * The time is "clock". xtime is what we think the time is.
+ * The time is "clock". old is what we think the time is.
  * Adjust the value by a multiple of jiffies and add the delta to ntp.
  * "delay" is an approximation how long the synchronization took. If
  * the time correction is positive, then "delay" is subtracted from
  * the time difference and only the remaining part is passed to ntp.
  */
-static void etr_adjust_time(unsigned long long clock, unsigned long long delay)
+static unsigned long long etr_adjust_time(unsigned long long old,
+					  unsigned long long clock,
+					  unsigned long long delay)
 {
 	unsigned long long delta, ticks;
 	struct timex adjust;
 
-	/*
-	 * We don't have to take the xtime lock because the cpu
-	 * executing etr_adjust_time is running disabled in
-	 * tasklet context and all other cpus are looping in
-	 * etr_sync_cpu_start.
-	 */
-	if (clock > xtime_cc) {
+	if (clock > old) {
 		/* It is later than we thought. */
-		delta = ticks = clock - xtime_cc;
+		delta = ticks = clock - old;
 		delta = ticks = (delta < delay) ? 0 : delta - delay;
 		delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
-		init_timer_cc = init_timer_cc + delta;
-		jiffies_timer_cc = jiffies_timer_cc + delta;
-		xtime_cc = xtime_cc + delta;
 		adjust.offset = ticks * (1000000 / HZ);
 	} else {
 		/* It is earlier than we thought. */
-		delta = ticks = xtime_cc - clock;
+		delta = ticks = old - clock;
 		delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
-		init_timer_cc = init_timer_cc - delta;
-		jiffies_timer_cc = jiffies_timer_cc - delta;
-		xtime_cc = xtime_cc - delta;
+		delta = -delta;
 		adjust.offset = -ticks * (1000000 / HZ);
 	}
+	jiffies_timer_cc += delta;
 	if (adjust.offset != 0) {
 		printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
 		       adjust.offset);
 		adjust.modes = ADJ_OFFSET_SINGLESHOT;
 		do_adjtimex(&adjust);
 	}
+	return delta;
 }
 
+static struct {
+	int in_sync;
+	unsigned long long fixup_cc;
+} etr_sync;
+
 static void etr_sync_cpu_start(void *dummy)
 {
-	int *in_sync = dummy;
-
 	etr_enable_sync_clock();
 	/*
 	 * This looks like a busy wait loop but it isn't. etr_sync_cpus
@@ -753,7 +642,7 @@
 	 * __udelay will stop the cpu on an enabled wait psw until the
 	 * TOD is running again.
 	 */
-	while (*in_sync == 0) {
+	while (etr_sync.in_sync == 0) {
 		__udelay(1);
 		/*
 		 * A different cpu changes *in_sync. Therefore use
@@ -761,14 +650,14 @@
 		 */
 		barrier();
 	}
-	if (*in_sync != 1)
+	if (etr_sync.in_sync != 1)
 		/* Didn't work. Clear per-cpu in sync bit again. */
 		etr_disable_sync_clock(NULL);
 	/*
 	 * This round of TOD syncing is done. Set the clock comparator
 	 * to the next tick and let the processor continue.
 	 */
-	setup_jiffy_timer();
+	fixup_clock_comparator(etr_sync.fixup_cc);
 }
 
 static void etr_sync_cpu_end(void *dummy)
@@ -783,8 +672,8 @@
 static int etr_sync_clock(struct etr_aib *aib, int port)
 {
 	struct etr_aib *sync_port;
-	unsigned long long clock, delay;
-	int in_sync, follows;
+	unsigned long long clock, old_clock, delay, delta;
+	int follows;
 	int rc;
 
 	/* Check if the current aib is adjacent to the sync port aib. */
@@ -799,9 +688,9 @@
 	 * successfully synced the clock. smp_call_function will
 	 * return after all other cpus are in etr_sync_cpu_start.
 	 */
-	in_sync = 0;
+	memset(&etr_sync, 0, sizeof(etr_sync));
 	preempt_disable();
-	smp_call_function(etr_sync_cpu_start,&in_sync,0,0);
+	smp_call_function(etr_sync_cpu_start, NULL, 0, 0);
 	local_irq_disable();
 	etr_enable_sync_clock();
 
@@ -809,6 +698,7 @@
 	__ctl_set_bit(14, 21);
 	__ctl_set_bit(0, 29);
 	clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
+	old_clock = get_clock();
 	if (set_clock(clock) == 0) {
 		__udelay(1);	/* Wait for the clock to start. */
 		__ctl_clear_bit(0, 29);
@@ -817,16 +707,17 @@
 		/* Adjust Linux timing variables. */
 		delay = (unsigned long long)
 			(aib->edf2.etv - sync_port->edf2.etv) << 32;
-		etr_adjust_time(clock, delay);
-		setup_jiffy_timer();
+		delta = etr_adjust_time(old_clock, clock, delay);
+		etr_sync.fixup_cc = delta;
+		fixup_clock_comparator(delta);
 		/* Verify that the clock is properly set. */
 		if (!etr_aib_follows(sync_port, aib, port)) {
 			/* Didn't work. */
 			etr_disable_sync_clock(NULL);
-			in_sync = -EAGAIN;
+			etr_sync.in_sync = -EAGAIN;
 			rc = -EAGAIN;
 		} else {
-			in_sync = 1;
+			etr_sync.in_sync = 1;
 			rc = 0;
 		}
 	} else {
@@ -834,7 +725,7 @@
 		__ctl_clear_bit(0, 29);
 		__ctl_clear_bit(14, 21);
 		etr_disable_sync_clock(NULL);
-		in_sync = -EAGAIN;
+		etr_sync.in_sync = -EAGAIN;
 		rc = -EAGAIN;
 	}
 	local_irq_enable();
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
new file mode 100644
index 0000000..12b39b3
--- /dev/null
+++ b/arch/s390/kernel/topology.c
@@ -0,0 +1,314 @@
+/*
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/bootmem.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <asm/delay.h>
+#include <asm/s390_ext.h>
+#include <asm/sysinfo.h>
+
+#define CPU_BITS 64
+#define NR_MAG 6
+
+#define PTF_HORIZONTAL	(0UL)
+#define PTF_VERTICAL	(1UL)
+#define PTF_CHECK	(2UL)
+
+struct tl_cpu {
+	unsigned char reserved0[4];
+	unsigned char :6;
+	unsigned char pp:2;
+	unsigned char reserved1;
+	unsigned short origin;
+	unsigned long mask[CPU_BITS / BITS_PER_LONG];
+};
+
+struct tl_container {
+	unsigned char reserved[8];
+};
+
+union tl_entry {
+	unsigned char nl;
+	struct tl_cpu cpu;
+	struct tl_container container;
+};
+
+struct tl_info {
+	unsigned char reserved0[2];
+	unsigned short length;
+	unsigned char mag[NR_MAG];
+	unsigned char reserved1;
+	unsigned char mnest;
+	unsigned char reserved2[4];
+	union tl_entry tle[0];
+};
+
+struct core_info {
+	struct core_info *next;
+	cpumask_t mask;
+};
+
+static void topology_work_fn(struct work_struct *work);
+static struct tl_info *tl_info;
+static struct core_info core_info;
+static int machine_has_topology;
+static int machine_has_topology_irq;
+static struct timer_list topology_timer;
+static void set_topology_timer(void);
+static DECLARE_WORK(topology_work, topology_work_fn);
+
+cpumask_t cpu_coregroup_map(unsigned int cpu)
+{
+	struct core_info *core = &core_info;
+	cpumask_t mask;
+
+	cpus_clear(mask);
+	if (!machine_has_topology)
+		return cpu_present_map;
+	mutex_lock(&smp_cpu_state_mutex);
+	while (core) {
+		if (cpu_isset(cpu, core->mask)) {
+			mask = core->mask;
+			break;
+		}
+		core = core->next;
+	}
+	mutex_unlock(&smp_cpu_state_mutex);
+	if (cpus_empty(mask))
+		mask = cpumask_of_cpu(cpu);
+	return mask;
+}
+
+static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
+{
+	unsigned int cpu;
+
+	for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
+	     cpu < CPU_BITS;
+	     cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
+	{
+		unsigned int rcpu, lcpu;
+
+		rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
+		for_each_present_cpu(lcpu) {
+			if (__cpu_logical_map[lcpu] == rcpu) {
+				cpu_set(lcpu, core->mask);
+				smp_cpu_polarization[lcpu] = tl_cpu->pp;
+			}
+		}
+	}
+}
+
+static void clear_cores(void)
+{
+	struct core_info *core = &core_info;
+
+	while (core) {
+		cpus_clear(core->mask);
+		core = core->next;
+	}
+}
+
+static union tl_entry *next_tle(union tl_entry *tle)
+{
+	if (tle->nl)
+		return (union tl_entry *)((struct tl_container *)tle + 1);
+	else
+		return (union tl_entry *)((struct tl_cpu *)tle + 1);
+}
+
+static void tl_to_cores(struct tl_info *info)
+{
+	union tl_entry *tle, *end;
+	struct core_info *core = &core_info;
+
+	mutex_lock(&smp_cpu_state_mutex);
+	clear_cores();
+	tle = info->tle;
+	end = (union tl_entry *)((unsigned long)info + info->length);
+	while (tle < end) {
+		switch (tle->nl) {
+		case 5:
+		case 4:
+		case 3:
+		case 2:
+			break;
+		case 1:
+			core = core->next;
+			break;
+		case 0:
+			add_cpus_to_core(&tle->cpu, core);
+			break;
+		default:
+			clear_cores();
+			machine_has_topology = 0;
+			return;
+		}
+		tle = next_tle(tle);
+	}
+	mutex_unlock(&smp_cpu_state_mutex);
+}
+
+static void topology_update_polarization_simple(void)
+{
+	int cpu;
+
+	mutex_lock(&smp_cpu_state_mutex);
+	for_each_present_cpu(cpu)
+		smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
+	mutex_unlock(&smp_cpu_state_mutex);
+}
+
+static int ptf(unsigned long fc)
+{
+	int rc;
+
+	asm volatile(
+		"	.insn	rre,0xb9a20000,%1,%1\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (rc)
+		: "d" (fc)  : "cc");
+	return rc;
+}
+
+int topology_set_cpu_management(int fc)
+{
+	int cpu;
+	int rc;
+
+	if (!machine_has_topology)
+		return -EOPNOTSUPP;
+	if (fc)
+		rc = ptf(PTF_VERTICAL);
+	else
+		rc = ptf(PTF_HORIZONTAL);
+	if (rc)
+		return -EBUSY;
+	for_each_present_cpu(cpu)
+		smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+	return rc;
+}
+
+void arch_update_cpu_topology(void)
+{
+	struct tl_info *info = tl_info;
+	struct sys_device *sysdev;
+	int cpu;
+
+	if (!machine_has_topology) {
+		topology_update_polarization_simple();
+		return;
+	}
+	stsi(info, 15, 1, 2);
+	tl_to_cores(info);
+	for_each_online_cpu(cpu) {
+		sysdev = get_cpu_sysdev(cpu);
+		kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+	}
+}
+
+static void topology_work_fn(struct work_struct *work)
+{
+	arch_reinit_sched_domains();
+}
+
+void topology_schedule_update(void)
+{
+	schedule_work(&topology_work);
+}
+
+static void topology_timer_fn(unsigned long ignored)
+{
+	if (ptf(PTF_CHECK))
+		topology_schedule_update();
+	set_topology_timer();
+}
+
+static void set_topology_timer(void)
+{
+	topology_timer.function = topology_timer_fn;
+	topology_timer.data = 0;
+	topology_timer.expires = jiffies + 60 * HZ;
+	add_timer(&topology_timer);
+}
+
+static void topology_interrupt(__u16 code)
+{
+	schedule_work(&topology_work);
+}
+
+static int __init init_topology_update(void)
+{
+	int rc;
+
+	if (!machine_has_topology) {
+		topology_update_polarization_simple();
+		return 0;
+	}
+	init_timer_deferrable(&topology_timer);
+	if (machine_has_topology_irq) {
+		rc = register_external_interrupt(0x2005, topology_interrupt);
+		if (rc)
+			return rc;
+		ctl_set_bit(0, 8);
+	}
+	else
+		set_topology_timer();
+	return 0;
+}
+__initcall(init_topology_update);
+
+void __init s390_init_cpu_topology(void)
+{
+	unsigned long long facility_bits;
+	struct tl_info *info;
+	struct core_info *core;
+	int nr_cores;
+	int i;
+
+	if (stfle(&facility_bits, 1) <= 0)
+		return;
+	if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
+		return;
+	machine_has_topology = 1;
+
+	if (facility_bits & (1ULL << 51))
+		machine_has_topology_irq = 1;
+
+	tl_info = alloc_bootmem_pages(PAGE_SIZE);
+	if (!tl_info)
+		goto error;
+	info = tl_info;
+	stsi(info, 15, 1, 2);
+
+	nr_cores = info->mag[NR_MAG - 2];
+	for (i = 0; i < info->mnest - 2; i++)
+		nr_cores *= info->mag[NR_MAG - 3 - i];
+
+	printk(KERN_INFO "CPU topology:");
+	for (i = 0; i < NR_MAG; i++)
+		printk(" %d", info->mag[i]);
+	printk(" / %d\n", info->mnest);
+
+	core = &core_info;
+	for (i = 0; i < nr_cores; i++) {
+		core->next = alloc_bootmem(sizeof(struct core_info));
+		core = core->next;
+		if (!core)
+			goto error;
+	}
+	return;
+error:
+	machine_has_topology = 0;
+	machine_has_topology_irq = 0;
+}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 60f728a..57b607b 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -42,11 +42,8 @@
 #include <asm/s390_ext.h>
 #include <asm/lowcore.h>
 #include <asm/debug.h>
+#include "entry.h"
 
-/* Called from entry.S only */
-extern void handle_per_exception(struct pt_regs *regs);
-
-typedef void pgm_check_handler_t(struct pt_regs *, long);
 pgm_check_handler_t *pgm_check_table[128];
 
 #ifdef CONFIG_SYSCTL
@@ -59,7 +56,6 @@
 
 extern pgm_check_handler_t do_protection_exception;
 extern pgm_check_handler_t do_dat_exception;
-extern pgm_check_handler_t do_monitor_call;
 extern pgm_check_handler_t do_asce_exception;
 
 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -138,7 +134,6 @@
 	else
 		__show_trace(sp, S390_lowcore.thread_info,
 			     S390_lowcore.thread_info + THREAD_SIZE);
-	printk("\n");
 	if (!task)
 		task = current;
 	debug_show_held_locks(task);
@@ -166,6 +161,15 @@
 	show_trace(task, sp);
 }
 
+#ifdef CONFIG_64BIT
+void show_last_breaking_event(struct pt_regs *regs)
+{
+	printk("Last Breaking-Event-Address:\n");
+	printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
+	print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
+}
+#endif
+
 /*
  * The architecture-independent dump_stack generator
  */
@@ -739,6 +743,5 @@
         pgm_check_table[0x15] = &operand_exception;
         pgm_check_table[0x1C] = &space_switch_exception;
         pgm_check_table[0x1D] = &hfp_sqrt_exception;
-	pgm_check_table[0x40] = &do_monitor_call;
 	pfault_irq_init();
 }
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 70f2a86..eae21a8 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -34,7 +34,7 @@
  */
 void __udelay(unsigned long usecs)
 {
-	u64 end, time, jiffy_timer = 0;
+	u64 end, time, old_cc = 0;
 	unsigned long flags, cr0, mask, dummy;
 	int irq_context;
 
@@ -43,8 +43,8 @@
 		local_bh_disable();
 	local_irq_save(flags);
 	if (raw_irqs_disabled_flags(flags)) {
-		jiffy_timer = S390_lowcore.jiffy_timer;
-		S390_lowcore.jiffy_timer = -1ULL - (4096 << 12);
+		old_cc = S390_lowcore.clock_comparator;
+		S390_lowcore.clock_comparator = -1ULL;
 		__ctl_store(cr0, 0, 0);
 		dummy = (cr0 & 0xffff00e0) | 0x00000800;
 		__ctl_load(dummy , 0, 0);
@@ -55,8 +55,8 @@
 
 	end = get_clock() + ((u64) usecs << 12);
 	do {
-		time = end < S390_lowcore.jiffy_timer ?
-			end : S390_lowcore.jiffy_timer;
+		time = end < S390_lowcore.clock_comparator ?
+			end : S390_lowcore.clock_comparator;
 		set_clock_comparator(time);
 		trace_hardirqs_on();
 		__load_psw_mask(mask);
@@ -65,10 +65,10 @@
 
 	if (raw_irqs_disabled_flags(flags)) {
 		__ctl_load(cr0, 0, 0);
-		S390_lowcore.jiffy_timer = jiffy_timer;
+		S390_lowcore.clock_comparator = old_cc;
 	}
 	if (!irq_context)
 		_local_bh_enable();
-	set_clock_comparator(S390_lowcore.jiffy_timer);
+	set_clock_comparator(S390_lowcore.clock_comparator);
 	local_irq_restore(flags);
 }
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 5efdfe9..d66215b 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -302,6 +302,10 @@
 	pte_t *pte_from, *pte_to;
 	int write_user;
 
+	if (segment_eq(get_fs(), KERNEL_DS)) {
+		memcpy((void __force *) to, (void __force *) from, n);
+		return 0;
+	}
 	done = 0;
 retry:
 	spin_lock(&mm->page_table_lock);
@@ -361,18 +365,10 @@
 		     : "0" (-EFAULT), "d" (oparg), "a" (uaddr),		\
 		       "m" (*uaddr) : "cc" );
 
-int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
+static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
 {
 	int oldval = 0, newval, ret;
 
-	spin_lock(&current->mm->page_table_lock);
-	uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
-	if (!uaddr) {
-		spin_unlock(&current->mm->page_table_lock);
-		return -EFAULT;
-	}
-	get_page(virt_to_page(uaddr));
-	spin_unlock(&current->mm->page_table_lock);
 	switch (op) {
 	case FUTEX_OP_SET:
 		__futex_atomic_op("lr %2,%5\n",
@@ -397,17 +393,17 @@
 	default:
 		ret = -ENOSYS;
 	}
-	put_page(virt_to_page(uaddr));
-	*old = oldval;
+	if (ret == 0)
+		*old = oldval;
 	return ret;
 }
 
-int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
+int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
 {
 	int ret;
 
-	if (!current->mm)
-		return -EFAULT;
+	if (segment_eq(get_fs(), KERNEL_DS))
+		return __futex_atomic_op_pt(op, uaddr, oparg, old);
 	spin_lock(&current->mm->page_table_lock);
 	uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
 	if (!uaddr) {
@@ -416,13 +412,40 @@
 	}
 	get_page(virt_to_page(uaddr));
 	spin_unlock(&current->mm->page_table_lock);
-	asm volatile("   cs   %1,%4,0(%5)\n"
-		     "0: lr   %0,%1\n"
-		     "1:\n"
-		     EX_TABLE(0b,1b)
+	ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
+	put_page(virt_to_page(uaddr));
+	return ret;
+}
+
+static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
+{
+	int ret;
+
+	asm volatile("0: cs   %1,%4,0(%5)\n"
+		     "1: lr   %0,%1\n"
+		     "2:\n"
+		     EX_TABLE(0b,2b) EX_TABLE(1b,2b)
 		     : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
 		     : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
 		     : "cc", "memory" );
+	return ret;
+}
+
+int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
+{
+	int ret;
+
+	if (segment_eq(get_fs(), KERNEL_DS))
+		return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
+	spin_lock(&current->mm->page_table_lock);
+	uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
+	if (!uaddr) {
+		spin_unlock(&current->mm->page_table_lock);
+		return -EFAULT;
+	}
+	get_page(virt_to_page(uaddr));
+	spin_unlock(&current->mm->page_table_lock);
+	ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
 	put_page(virt_to_page(uaddr));
 	return ret;
 }
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 880b0eb..ed2af0a 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -289,22 +289,8 @@
 
 	rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
 
-	switch (rc) {
-	case 0:
-		break;
-	case -ENOSPC:
-		PRINT_WARN("segment_load: not loading segment %s - overlaps "
-			   "storage/segment\n", name);
+	if (rc)
 		goto out_free;
-	case -ERANGE:
-		PRINT_WARN("segment_load: not loading segment %s - exceeds "
-			   "kernel mapping range\n", name);
-		goto out_free;
-	default:
-		PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
-			   name, rc);
-		goto out_free;
-	}
 
 	seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
 	if (seg->res == NULL) {
@@ -582,8 +568,59 @@
 	mutex_unlock(&dcss_lock);
 }
 
+/*
+ * print appropriate error message for segment_load()/segment_type()
+ * return code
+ */
+void segment_warning(int rc, char *seg_name)
+{
+	switch (rc) {
+	case -ENOENT:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "does not exist\n", seg_name);
+		break;
+	case -ENOSYS:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "not running on VM\n", seg_name);
+		break;
+	case -EIO:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "hardware error\n", seg_name);
+		break;
+	case -ENOTSUPP:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "is a multi-part segment\n", seg_name);
+		break;
+	case -ENOSPC:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "overlaps with storage\n", seg_name);
+		break;
+	case -EBUSY:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "overlaps with already loaded dcss\n", seg_name);
+		break;
+	case -EPERM:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "already loaded in incompatible mode\n", seg_name);
+		break;
+	case -ENOMEM:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "out of memory\n", seg_name);
+		break;
+	case -ERANGE:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "exceeds kernel mapping range\n", seg_name);
+		break;
+	default:
+		PRINT_WARN("cannot load/query segment %s, "
+			   "return value %i\n", seg_name, rc);
+		break;
+	}
+}
+
 EXPORT_SYMBOL(segment_load);
 EXPORT_SYMBOL(segment_unload);
 EXPORT_SYMBOL(segment_save);
 EXPORT_SYMBOL(segment_type);
 EXPORT_SYMBOL(segment_modify_shared);
+EXPORT_SYMBOL(segment_warning);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ed13d42..2650f46 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -28,11 +28,11 @@
 #include <linux/hardirq.h>
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
-
 #include <asm/system.h>
 #include <asm/pgtable.h>
 #include <asm/s390_ext.h>
 #include <asm/mmu_context.h>
+#include "../kernel/entry.h"
 
 #ifndef CONFIG_64BIT
 #define __FAIL_ADDR_MASK 0x7ffff000
@@ -50,8 +50,6 @@
 extern int sysctl_userprocess_debug;
 #endif
 
-extern void die(const char *,struct pt_regs *,long);
-
 #ifdef CONFIG_KPROBES
 static inline int notify_page_fault(struct pt_regs *regs, long err)
 {
@@ -245,11 +243,6 @@
 }
 
 #ifdef CONFIG_S390_EXEC_PROTECT
-extern long sys_sigreturn(struct pt_regs *regs);
-extern long sys_rt_sigreturn(struct pt_regs *regs);
-extern long sys32_sigreturn(struct pt_regs *regs);
-extern long sys32_rt_sigreturn(struct pt_regs *regs);
-
 static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
 			 unsigned long address, unsigned long error_code)
 {
@@ -270,15 +263,15 @@
 #ifdef CONFIG_COMPAT
 	compat = test_tsk_thread_flag(current, TIF_31BIT);
 	if (compat && instruction == 0x0a77)
-		sys32_sigreturn(regs);
+		sys32_sigreturn();
 	else if (compat && instruction == 0x0aad)
-		sys32_rt_sigreturn(regs);
+		sys32_rt_sigreturn();
 	else
 #endif
 	if (instruction == 0x0a77)
-		sys_sigreturn(regs);
+		sys_sigreturn();
 	else if (instruction == 0x0aad)
-		sys_rt_sigreturn(regs);
+		sys_rt_sigreturn();
 	else {
 		current->thread.prot_addr = address;
 		current->thread.trap_no = error_code;
@@ -424,7 +417,7 @@
 }
 
 void __kprobes do_protection_exception(struct pt_regs *regs,
-				       unsigned long error_code)
+				       long error_code)
 {
 	/* Protection exception is supressing, decrement psw address. */
 	regs->psw.addr -= (error_code >> 16);
@@ -440,7 +433,7 @@
 	do_exception(regs, 4, 1);
 }
 
-void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code)
+void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
 {
 	do_exception(regs, error_code & 0xff, 0);
 }
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 8053245..202c952 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -50,7 +50,6 @@
 
 	printk("Mem-info:\n");
 	show_free_areas();
-	printk("Free swap:       %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
 	i = max_mapnr;
 	while (i-- > 0) {
 		if (!pfn_valid(i))
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 62bf373..4bbdce3 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -5,7 +5,7 @@
 extra-y	:= head_32.o init_task.o vmlinux.lds
 
 obj-y	:= debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
-	   ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \
+	   ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \
 	   syscalls_32.o time_32.o topology.o traps.o traps_32.o
 
 obj-y				+= cpu/ timers/
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index e01283d..6edf53b 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -1,7 +1,7 @@
 extra-y	:= head_64.o init_task.o vmlinux.lds
 
 obj-y	:= debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
-	   ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \
+	   ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
 	   syscalls_64.o time_64.o topology.o traps.o traps_64.o
 
 obj-y				+= cpu/ timers/
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c
deleted file mode 100644
index 184119e..0000000
--- a/arch/sh/kernel/semaphore.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Just taken from alpha implementation.
- * This can't work well, perhaps.
- */
-/*
- *  Generic semaphore code. Buyer beware. Do your own
- * specific changes in <asm/semaphore-helper.h>
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-#include <asm/semaphore-helper.h>
-
-DEFINE_SPINLOCK(semaphore_wake_lock);
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
- *
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to
- * wake up and exit.  ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore.  The others will go back
- * to sleep.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-void __up(struct semaphore *sem)
-{
-	wake_one_more(sem);
-	wake_up(&sem->wait);
-}
-
-/*
- * Perform the "down" function.  Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from __down, the return is ignored and the wait loop is
- * not interruptible.  This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from __down_interruptible, the return value gets checked
- * upon return.  If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- *
- */
-
-#define DOWN_VAR				\
-	struct task_struct *tsk = current;	\
-	wait_queue_t wait;			\
-	init_waitqueue_entry(&wait, tsk);
-
-#define DOWN_HEAD(task_state)						\
-									\
-									\
-	tsk->state = (task_state);					\
-	add_wait_queue(&sem->wait, &wait);				\
-									\
-	/*								\
-	 * Ok, we're set up.  sem->count is known to be less than zero	\
-	 * so we must wait.						\
-	 *								\
-	 * We can let go the lock for purposes of waiting.		\
-	 * We re-acquire it after awaking so as to protect		\
-	 * all semaphore operations.					\
-	 *								\
-	 * If "up()" is called before we call waking_non_zero() then	\
-	 * we will catch it right away.  If it is called later then	\
-	 * we will have to go through a wakeup cycle to catch it.	\
-	 *								\
-	 * Multiple waiters contend for the semaphore lock to see	\
-	 * who gets to gate through and who has to wait some more.	\
-	 */								\
-	for (;;) {
-
-#define DOWN_TAIL(task_state)			\
-		tsk->state = (task_state);	\
-	}					\
-	tsk->state = TASK_RUNNING;		\
-	remove_wait_queue(&sem->wait, &wait);
-
-void __sched __down(struct semaphore * sem)
-{
-	DOWN_VAR
-	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
-	if (waking_non_zero(sem))
-		break;
-	schedule();
-	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-	DOWN_VAR
-	DOWN_HEAD(TASK_INTERRUPTIBLE)
-
-	ret = waking_non_zero_interruptible(sem, tsk);
-	if (ret)
-	{
-		if (ret == 1)
-			/* ret != 0 only if we get interrupted -arca */
-			ret = 0;
-		break;
-	}
-	schedule();
-	DOWN_TAIL(TASK_INTERRUPTIBLE)
-	return ret;
-}
-
-int __down_trylock(struct semaphore * sem)
-{
-	return waking_non_zero_trylock(sem);
-}
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 45bb333..6d40546 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -9,7 +9,6 @@
 #include <linux/pci.h>
 #include <linux/irq.h>
 #include <asm/sections.h>
-#include <asm/semaphore.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/checksum.h>
@@ -48,12 +47,6 @@
 EXPORT_SYMBOL(get_vm_area);
 #endif
 
-/* semaphore exports */
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
-
 EXPORT_SYMBOL(__udelay);
 EXPORT_SYMBOL(__ndelay);
 EXPORT_SYMBOL(__const_udelay);
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index b6410ce..a310c97 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -16,7 +16,6 @@
 #include <linux/in6.h>
 #include <linux/interrupt.h>
 #include <linux/screen_info.h>
-#include <asm/semaphore.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/checksum.h>
@@ -37,9 +36,6 @@
 EXPORT_SYMBOL(screen_info);
 #endif
 
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(__up);
 EXPORT_SYMBOL(__put_user_asm_l);
 EXPORT_SYMBOL(__get_user_asm_l);
 EXPORT_SYMBOL(copy_page);
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index bf1b15d..2712bb1 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -12,7 +12,7 @@
 	    sys_sparc.o sunos_asm.o systbls.o \
 	    time.o windows.o cpu.o devices.o sclow.o \
 	    tadpole.o tick14.o ptrace.o sys_solaris.o \
-	    unaligned.o una_asm.o muldiv.o semaphore.o \
+	    unaligned.o una_asm.o muldiv.o \
 	    prom.o of_device.o devres.o
 
 devres-y = ../../../kernel/irq/devres.o
diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c
deleted file mode 100644
index 0c37c1a..0000000
--- a/arch/sparc/kernel/semaphore.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */
-
-/* sparc32 semaphore implementation, based on i386 version */
-
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is
- * protected by the semaphore spinlock.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- *  - only on a boundary condition do we need to care. When we go
- *    from a negative count to a non-negative, we wake people up.
- *  - when we go from a non-negative count to a negative do we
- *    (a) synchronize with the "sleeper" count and (b) make sure
- *    that we're on the wakeup list before we synchronize so that
- *    we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
-	wake_up(&sem->wait);
-}
-
-static DEFINE_SPINLOCK(semaphore_lock);
-
-void __sched __down(struct semaphore * sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	spin_lock_irq(&semaphore_lock);
-	sem->sleepers++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock.
-		 */
-		if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irq(&semaphore_lock);
-
-		schedule();
-		tsk->state = TASK_UNINTERRUPTIBLE;
-		spin_lock_irq(&semaphore_lock);
-	}
-	spin_unlock_irq(&semaphore_lock);
-	remove_wait_queue(&sem->wait, &wait);
-	tsk->state = TASK_RUNNING;
-	wake_up(&sem->wait);
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	tsk->state = TASK_INTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	spin_lock_irq(&semaphore_lock);
-	sem->sleepers ++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * With signals pending, this turns into
-		 * the trylock failure case - we won't be
-		 * sleeping, and we* can't get the lock as
-		 * it has contention. Just correct the count
-		 * and exit.
-		 */
-		if (signal_pending(current)) {
-			retval = -EINTR;
-			sem->sleepers = 0;
-			atomic24_add(sleepers, &sem->count);
-			break;
-		}
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock. The
-		 * "-1" is because we're still hoping to get
-		 * the lock.
-		 */
-		if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irq(&semaphore_lock);
-
-		schedule();
-		tsk->state = TASK_INTERRUPTIBLE;
-		spin_lock_irq(&semaphore_lock);
-	}
-	spin_unlock_irq(&semaphore_lock);
-	tsk->state = TASK_RUNNING;
-	remove_wait_queue(&sem->wait, &wait);
-	wake_up(&sem->wait);
-	return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- */
-int __down_trylock(struct semaphore * sem)
-{
-	int sleepers;
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_lock, flags);
-	sleepers = sem->sleepers + 1;
-	sem->sleepers = 0;
-
-	/*
-	 * Add "everybody else" and us into it. They aren't
-	 * playing, because we own the spinlock.
-	 */
-	if (!atomic24_add_negative(sleepers, &sem->count))
-		wake_up(&sem->wait);
-
-	spin_unlock_irqrestore(&semaphore_lock, flags);
-	return 1;
-}
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index c1025e5..97b1de0 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -107,11 +107,6 @@
 EXPORT_SYMBOL(___rw_read_exit);
 EXPORT_SYMBOL(___rw_write_enter);
 #endif
-/* semaphores */
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(__down_interruptible);
 
 EXPORT_SYMBOL(sparc_valid_addr_bitmap);
 EXPORT_SYMBOL(phys_base);
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 1bf5b18..459462e 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -10,7 +10,7 @@
 obj-y		:= process.o setup.o cpu.o idprom.o \
 		   traps.o auxio.o una_asm.o sysfs.o iommu.o \
 		   irq.o ptrace.o time.o sys_sparc.o signal.o \
-		   unaligned.o central.o pci.o starfire.o semaphore.o \
+		   unaligned.o central.o pci.o starfire.o \
 		   power.o sbus.o sparc64_ksyms.o chmc.o \
 		   visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
 
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
deleted file mode 100644
index 9974a68..0000000
--- a/arch/sparc64/kernel/semaphore.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/* semaphore.c: Sparc64 semaphore implementation.
- *
- * This is basically the PPC semaphore scheme ported to use
- * the sparc64 atomic instructions, so see the PPC code for
- * credits.
- */
-
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-/*
- * Atomically update sem->count.
- * This does the equivalent of the following:
- *
- *	old_count = sem->count;
- *	tmp = MAX(old_count, 0) + incr;
- *	sem->count = tmp;
- *	return old_count;
- */
-static inline int __sem_update_count(struct semaphore *sem, int incr)
-{
-	int old_count, tmp;
-
-	__asm__ __volatile__("\n"
-"	! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
-"1:	ldsw	[%3], %0\n"
-"	mov	%0, %1\n"
-"	cmp	%0, 0\n"
-"	movl	%%icc, 0, %1\n"
-"	add	%1, %4, %1\n"
-"	cas	[%3], %0, %1\n"
-"	cmp	%0, %1\n"
-"	membar	#StoreLoad | #StoreStore\n"
-"	bne,pn	%%icc, 1b\n"
-"	 nop\n"
-	: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
-	: "r" (&sem->count), "r" (incr), "m" (sem->count)
-	: "cc");
-
-	return old_count;
-}
-
-static void __up(struct semaphore *sem)
-{
-	__sem_update_count(sem, 1);
-	wake_up(&sem->wait);
-}
-
-void up(struct semaphore *sem)
-{
-	/* This atomically does:
-	 * 	old_val = sem->count;
-	 *	new_val = sem->count + 1;
-	 *	sem->count = new_val;
-	 *	if (old_val < 0)
-	 *		__up(sem);
-	 *
-	 * The (old_val < 0) test is equivalent to
-	 * the more straightforward (new_val <= 0),
-	 * but it is easier to test the former because
-	 * of how the CAS instruction works.
-	 */
-
-	__asm__ __volatile__("\n"
-"	! up sem(%0)\n"
-"	membar	#StoreLoad | #LoadLoad\n"
-"1:	lduw	[%0], %%g1\n"
-"	add	%%g1, 1, %%g7\n"
-"	cas	[%0], %%g1, %%g7\n"
-"	cmp	%%g1, %%g7\n"
-"	bne,pn	%%icc, 1b\n"
-"	 addcc	%%g7, 1, %%g0\n"
-"	membar	#StoreLoad | #StoreStore\n"
-"	ble,pn	%%icc, 3f\n"
-"	 nop\n"
-"2:\n"
-"	.subsection 2\n"
-"3:	mov	%0, %%g1\n"
-"	save	%%sp, -160, %%sp\n"
-"	call	%1\n"
-"	 mov	%%g1, %%o0\n"
-"	ba,pt	%%xcc, 2b\n"
-"	 restore\n"
-"	.previous\n"
-	: : "r" (sem), "i" (__up)
-	: "g1", "g2", "g3", "g7", "memory", "cc");
-}
-
-static void __sched __down(struct semaphore * sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	while (__sem_update_count(sem, -1) <= 0) {
-		schedule();
-		tsk->state = TASK_UNINTERRUPTIBLE;
-	}
-	remove_wait_queue(&sem->wait, &wait);
-	tsk->state = TASK_RUNNING;
-
-	wake_up(&sem->wait);
-}
-
-void __sched down(struct semaphore *sem)
-{
-	might_sleep();
-	/* This atomically does:
-	 * 	old_val = sem->count;
-	 *	new_val = sem->count - 1;
-	 *	sem->count = new_val;
-	 *	if (old_val < 1)
-	 *		__down(sem);
-	 *
-	 * The (old_val < 1) test is equivalent to
-	 * the more straightforward (new_val < 0),
-	 * but it is easier to test the former because
-	 * of how the CAS instruction works.
-	 */
-
-	__asm__ __volatile__("\n"
-"	! down sem(%0)\n"
-"1:	lduw	[%0], %%g1\n"
-"	sub	%%g1, 1, %%g7\n"
-"	cas	[%0], %%g1, %%g7\n"
-"	cmp	%%g1, %%g7\n"
-"	bne,pn	%%icc, 1b\n"
-"	 cmp	%%g7, 1\n"
-"	membar	#StoreLoad | #StoreStore\n"
-"	bl,pn	%%icc, 3f\n"
-"	 nop\n"
-"2:\n"
-"	.subsection 2\n"
-"3:	mov	%0, %%g1\n"
-"	save	%%sp, -160, %%sp\n"
-"	call	%1\n"
-"	 mov	%%g1, %%o0\n"
-"	ba,pt	%%xcc, 2b\n"
-"	 restore\n"
-"	.previous\n"
-	: : "r" (sem), "i" (__down)
-	: "g1", "g2", "g3", "g7", "memory", "cc");
-}
-
-int down_trylock(struct semaphore *sem)
-{
-	int ret;
-
-	/* This atomically does:
-	 * 	old_val = sem->count;
-	 *	new_val = sem->count - 1;
-	 *	if (old_val < 1) {
-	 *		ret = 1;
-	 *	} else {
-	 *		sem->count = new_val;
-	 *		ret = 0;
-	 *	}
-	 *
-	 * The (old_val < 1) test is equivalent to
-	 * the more straightforward (new_val < 0),
-	 * but it is easier to test the former because
-	 * of how the CAS instruction works.
-	 */
-
-	__asm__ __volatile__("\n"
-"	! down_trylock sem(%1) ret(%0)\n"
-"1:	lduw	[%1], %%g1\n"
-"	sub	%%g1, 1, %%g7\n"
-"	cmp	%%g1, 1\n"
-"	bl,pn	%%icc, 2f\n"
-"	 mov	1, %0\n"
-"	cas	[%1], %%g1, %%g7\n"
-"	cmp	%%g1, %%g7\n"
-"	bne,pn	%%icc, 1b\n"
-"	 mov	0, %0\n"
-"	membar	#StoreLoad | #StoreStore\n"
-"2:\n"
-	: "=&r" (ret)
-	: "r" (sem)
-	: "g1", "g7", "memory", "cc");
-
-	return ret;
-}
-
-static int __sched __down_interruptible(struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	tsk->state = TASK_INTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	while (__sem_update_count(sem, -1) <= 0) {
-		if (signal_pending(current)) {
-			__sem_update_count(sem, 0);
-			retval = -EINTR;
-			break;
-		}
-		schedule();
-		tsk->state = TASK_INTERRUPTIBLE;
-	}
-	tsk->state = TASK_RUNNING;
-	remove_wait_queue(&sem->wait, &wait);
-	wake_up(&sem->wait);
-	return retval;
-}
-
-int __sched down_interruptible(struct semaphore *sem)
-{
-	int ret = 0;
-	
-	might_sleep();
-	/* This atomically does:
-	 * 	old_val = sem->count;
-	 *	new_val = sem->count - 1;
-	 *	sem->count = new_val;
-	 *	if (old_val < 1)
-	 *		ret = __down_interruptible(sem);
-	 *
-	 * The (old_val < 1) test is equivalent to
-	 * the more straightforward (new_val < 0),
-	 * but it is easier to test the former because
-	 * of how the CAS instruction works.
-	 */
-
-	__asm__ __volatile__("\n"
-"	! down_interruptible sem(%2) ret(%0)\n"
-"1:	lduw	[%2], %%g1\n"
-"	sub	%%g1, 1, %%g7\n"
-"	cas	[%2], %%g1, %%g7\n"
-"	cmp	%%g1, %%g7\n"
-"	bne,pn	%%icc, 1b\n"
-"	 cmp	%%g7, 1\n"
-"	membar	#StoreLoad | #StoreStore\n"
-"	bl,pn	%%icc, 3f\n"
-"	 nop\n"
-"2:\n"
-"	.subsection 2\n"
-"3:	mov	%2, %%g1\n"
-"	save	%%sp, -160, %%sp\n"
-"	call	%3\n"
-"	 mov	%%g1, %%o0\n"
-"	ba,pt	%%xcc, 2b\n"
-"	 restore\n"
-"	.previous\n"
-	: "=r" (ret)
-	: "0" (ret), "r" (sem), "i" (__down_interruptible)
-	: "g1", "g2", "g3", "g7", "memory", "cc");
-	return ret;
-}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 51fa773..051b8d9 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -130,12 +130,6 @@
 
 EXPORT_SYMBOL(sparc64_get_clock_tick);
 
-/* semaphores */
-EXPORT_SYMBOL(down);
-EXPORT_SYMBOL(down_trylock);
-EXPORT_SYMBOL(down_interruptible);
-EXPORT_SYMBOL(up);
-
 /* RW semaphores */
 EXPORT_SYMBOL(__down_read);
 EXPORT_SYMBOL(__down_read_trylock);
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
index 3cd8a04..e09edfa 100644
--- a/arch/um/Kconfig.i386
+++ b/arch/um/Kconfig.i386
@@ -19,10 +19,6 @@
 	bool
 	default n
 
-config SEMAPHORE_SLEEPERS
-	bool
-	default y
-
 config 3_LEVEL_PGTABLES
 	bool "Three-level pagetables (EXPERIMENTAL)"
 	default n
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
index 6533b34..3fbe69e 100644
--- a/arch/um/Kconfig.x86_64
+++ b/arch/um/Kconfig.x86_64
@@ -11,10 +11,6 @@
 	bool
 	default y
 
-config SEMAPHORE_SLEEPERS
-	bool
-	default y
-
 config 3_LEVEL_PGTABLES
        bool
        default y
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c
index 2a1eac1..bfbefd3 100644
--- a/arch/um/sys-i386/ksyms.c
+++ b/arch/um/sys-i386/ksyms.c
@@ -1,17 +1,5 @@
 #include "linux/module.h"
-#include "linux/in6.h"
-#include "linux/rwsem.h"
-#include "asm/byteorder.h"
-#include "asm/delay.h"
-#include "asm/semaphore.h"
-#include "asm/uaccess.h"
 #include "asm/checksum.h"
-#include "asm/errno.h"
-
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_failed_interruptible);
-EXPORT_SYMBOL(__down_failed_trylock);
-EXPORT_SYMBOL(__up_wakeup);
 
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-ppc/Makefile b/arch/um/sys-ppc/Makefile
index 0890152..b8bc844 100644
--- a/arch/um/sys-ppc/Makefile
+++ b/arch/um/sys-ppc/Makefile
@@ -3,7 +3,7 @@
 .S.o:
 	$(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
 
-OBJS = ptrace.o sigcontext.o semaphore.o checksum.o miscthings.o misc.o \
+OBJS = ptrace.o sigcontext.o checksum.o miscthings.o misc.o \
 	ptrace_user.o sysrq.o
 
 EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel
@@ -20,10 +20,6 @@
 sigcontext.o: sigcontext.c
 	$(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $<
 
-semaphore.c:
-	rm -f $@
-	ln -s $(srctree)/arch/ppc/kernel/$@ $@
-
 checksum.S:
 	rm -f $@
 	ln -s $(srctree)/arch/ppc/lib/$@ $@
@@ -66,4 +62,4 @@
 	$(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
 	rm -f asm
 
-clean-files := $(OBJS) ppc_defs.h checksum.S semaphore.c mk_defs.c
+clean-files := $(OBJS) ppc_defs.h checksum.S mk_defs.c
diff --git a/arch/um/sys-x86_64/ksyms.c b/arch/um/sys-x86_64/ksyms.c
index 12c5936..4d7d1a8 100644
--- a/arch/um/sys-x86_64/ksyms.c
+++ b/arch/um/sys-x86_64/ksyms.c
@@ -1,16 +1,5 @@
 #include "linux/module.h"
-#include "linux/in6.h"
-#include "linux/rwsem.h"
-#include "asm/byteorder.h"
-#include "asm/semaphore.h"
-#include "asm/uaccess.h"
-#include "asm/checksum.h"
-#include "asm/errno.h"
-
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_failed_interruptible);
-EXPORT_SYMBOL(__down_failed_trylock);
-EXPORT_SYMBOL(__up_wakeup);
+#include "asm/string.h"
 
 /*XXX: we need them because they would be exported by x86_64 */
 EXPORT_SYMBOL(__memcpy);
diff --git a/arch/v850/kernel/Makefile b/arch/v850/kernel/Makefile
index 3930482..da5889c 100644
--- a/arch/v850/kernel/Makefile
+++ b/arch/v850/kernel/Makefile
@@ -11,7 +11,7 @@
 
 extra-y := head.o init_task.o vmlinux.lds
 
-obj-y += intv.o entry.o process.o syscalls.o time.o semaphore.o setup.o \
+obj-y += intv.o entry.o process.o syscalls.o time.o setup.o \
 	 signal.o irq.o mach.o ptrace.o bug.o
 obj-$(CONFIG_MODULES)		+= module.o v850_ksyms.o
 # chip-specific code
diff --git a/arch/v850/kernel/semaphore.c b/arch/v850/kernel/semaphore.c
deleted file mode 100644
index fc89fd6..0000000
--- a/arch/v850/kernel/semaphore.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * arch/v850/kernel/semaphore.c -- Semaphore support
- *
- *  Copyright (C) 1998-2000  IBM Corporation
- *  Copyright (C) 1999  Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file COPYING in the main directory of this
- * archive for more details.
- *
- * This file is a copy of the s390 version, arch/s390/kernel/semaphore.c
- *    Author(s): Martin Schwidefsky
- * which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is
- * protected by the semaphore spinlock.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- *  - only on a boundary condition do we need to care. When we go
- *    from a negative count to a non-negative, we wake people up.
- *  - when we go from a non-negative count to a negative do we
- *    (a) synchronize with the "sleeper" count and (b) make sure
- *    that we're on the wakeup list before we synchronize so that
- *    we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
-	wake_up(&sem->wait);
-}
-
-static DEFINE_SPINLOCK(semaphore_lock);
-
-void __sched __down(struct semaphore * sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	spin_lock_irq(&semaphore_lock);
-	sem->sleepers++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irq(&semaphore_lock);
-
-		schedule();
-		tsk->state = TASK_UNINTERRUPTIBLE;
-		spin_lock_irq(&semaphore_lock);
-	}
-	spin_unlock_irq(&semaphore_lock);
-	remove_wait_queue(&sem->wait, &wait);
-	tsk->state = TASK_RUNNING;
-	wake_up(&sem->wait);
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	tsk->state = TASK_INTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	spin_lock_irq(&semaphore_lock);
-	sem->sleepers ++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * With signals pending, this turns into
-		 * the trylock failure case - we won't be
-		 * sleeping, and we* can't get the lock as
-		 * it has contention. Just correct the count
-		 * and exit.
-		 */
-		if (signal_pending(current)) {
-			retval = -EINTR;
-			sem->sleepers = 0;
-			atomic_add(sleepers, &sem->count);
-			break;
-		}
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock. The
-		 * "-1" is because we're still hoping to get
-		 * the lock.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irq(&semaphore_lock);
-
-		schedule();
-		tsk->state = TASK_INTERRUPTIBLE;
-		spin_lock_irq(&semaphore_lock);
-	}
-	spin_unlock_irq(&semaphore_lock);
-	tsk->state = TASK_RUNNING;
-	remove_wait_queue(&sem->wait, &wait);
-	wake_up(&sem->wait);
-	return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- */
-int __down_trylock(struct semaphore * sem)
-{
-        unsigned long flags;
-	int sleepers;
-
-	spin_lock_irqsave(&semaphore_lock, flags);
-	sleepers = sem->sleepers + 1;
-	sem->sleepers = 0;
-
-	/*
-	 * Add "everybody else" and us into it. They aren't
-	 * playing, because we own the spinlock.
-	 */
-	if (!atomic_add_negative(sleepers, &sem->count))
-		wake_up(&sem->wait);
-
-	spin_unlock_irqrestore(&semaphore_lock, flags);
-	return 1;
-}
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c
index 93575fd..8d386a5 100644
--- a/arch/v850/kernel/v850_ksyms.c
+++ b/arch/v850/kernel/v850_ksyms.c
@@ -11,7 +11,6 @@
 #include <asm/pgalloc.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/semaphore.h>
 #include <asm/checksum.h>
 #include <asm/current.h>
 
@@ -34,12 +33,6 @@
 EXPORT_SYMBOL (memcpy);
 EXPORT_SYMBOL (memmove);
 
-/* semaphores */
-EXPORT_SYMBOL (__down);
-EXPORT_SYMBOL (__down_interruptible);
-EXPORT_SYMBOL (__down_trylock);
-EXPORT_SYMBOL (__up);
-
 /*
  * libgcc functions - functions that are used internally by the
  * compiler...  (prototypes are not correct though, but that
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6c70fed..2a59dbb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -23,6 +23,7 @@
 	select HAVE_KPROBES
 	select HAVE_KRETPROBES
 	select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
+	select HAVE_ARCH_KGDB
 
 
 config GENERIC_LOCKBREAK
@@ -53,9 +54,6 @@
 config HAVE_LATENCYTOP_SUPPORT
 	def_bool y
 
-config SEMAPHORE_SLEEPERS
-	def_bool y
-
 config FAST_CMPXCHG_LOCAL
 	bool
 	default y
@@ -117,7 +115,7 @@
 	def_bool y
 
 config HAVE_SETUP_PER_CPU_AREA
-	def_bool X86_64
+	def_bool X86_64 || (X86_SMP && !X86_VOYAGER)
 
 config ARCH_HIBERNATION_POSSIBLE
 	def_bool y
@@ -171,7 +169,7 @@
 config X86_HT
 	bool
 	depends on SMP
-	depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || (X86_64 && !MK8)
+	depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || X86_64
 	default y
 
 config X86_BIOS_REBOOT
@@ -181,7 +179,7 @@
 
 config X86_TRAMPOLINE
 	bool
-	depends on X86_SMP || (X86_VOYAGER && SMP)
+	depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP)
 	default y
 
 config KTIME_SCALAR
@@ -241,8 +239,7 @@
 
 config X86_VOYAGER
 	bool "Voyager (NCR)"
-	depends on X86_32
-	select SMP if !BROKEN
+	depends on X86_32 && (SMP || BROKEN)
 	help
 	  Voyager is an MCA-based 32-way capable SMP architecture proprietary
 	  to NCR Corp.  Machine classes 345x/35xx/4100/51xx are Voyager-based.
@@ -254,9 +251,8 @@
 
 config X86_NUMAQ
 	bool "NUMAQ (IBM/Sequent)"
-	select SMP
+	depends on SMP && X86_32
 	select NUMA
-	depends on X86_32
 	help
 	  This option is used for getting Linux to run on a (IBM/Sequent) NUMA
 	  multiquad box. This changes the way that processors are bootstrapped,
@@ -327,8 +323,9 @@
 
 config X86_VSMP
 	bool "Support for ScaleMP vSMP"
-	depends on X86_64 && PCI
-	 help
+	select PARAVIRT
+	depends on X86_64
+	help
 	  Support for ScaleMP vSMP systems.  Say 'Y' here if this kernel is
 	  supposed to run on these EM64T-based machines.  Only choose this option
 	  if you have one of these machines.
@@ -383,6 +380,35 @@
 
 endif
 
+config MEMTEST_BOOTPARAM
+	bool "Memtest boot parameter"
+	depends on X86_64
+	default y
+	help
+	  This option adds a kernel parameter 'memtest', which allows memtest
+	  to be disabled at boot.  If this option is selected, memtest
+	  functionality can be disabled with memtest=0 on the kernel
+	  command line.  The purpose of this option is to allow a single
+	  kernel image to be distributed with memtest built in, but not
+	  necessarily enabled.
+
+	  If you are unsure how to answer this question, answer Y.
+
+config MEMTEST_BOOTPARAM_VALUE
+	int "Memtest boot parameter default value (0-4)"
+	depends on MEMTEST_BOOTPARAM
+	range 0 4
+	default 0
+	help
+	  This option sets the default value for the kernel parameter
+	  'memtest', which allows memtest to be disabled at boot.  If this
+	  option is set to 0 (zero), the memtest kernel parameter will
+	  default to 0, disabling memtest at bootup.  If this option is
+	  set to 4, the memtest kernel parameter will default to 4,
+	  enabling memtest at bootup, and use that as pattern number.
+
+	  If you are unsure how to answer this question, answer 0.
+
 config ACPI_SRAT
 	def_bool y
 	depends on X86_32 && ACPI && NUMA && (X86_SUMMIT || X86_GENERICARCH)
@@ -507,7 +533,7 @@
 
 config SCHED_SMT
 	bool "SMT (Hyperthreading) scheduler support"
-	depends on (X86_64 && SMP) || (X86_32 && X86_HT)
+	depends on X86_HT
 	help
 	  SMT scheduler support improves the CPU scheduler's decision making
 	  when dealing with Intel Pentium 4 chips with HyperThreading at a
@@ -517,7 +543,7 @@
 config SCHED_MC
 	def_bool y
 	prompt "Multi-core scheduler support"
-	depends on (X86_64 && SMP) || (X86_32 && X86_HT)
+	depends on X86_HT
 	help
 	  Multi-core scheduler support improves the CPU scheduler's decision
 	  making when dealing with multi-core CPU chips at a cost of slightly
@@ -886,7 +912,7 @@
 	  number of nodes. This is only useful for debugging.
 
 config NODES_SHIFT
-	int
+	int "Max num nodes shift(1-15)"
 	range 1 15  if X86_64
 	default "6" if X86_64
 	default "4" if X86_NUMAQ
@@ -1010,6 +1036,21 @@
 
 	  See <file:Documentation/mtrr.txt> for more information.
 
+config X86_PAT
+	def_bool y
+	prompt "x86 PAT support"
+	depends on MTRR && NONPROMISC_DEVMEM
+	help
+	  Use PAT attributes to setup page level cache control.
+
+	  PATs are the modern equivalents of MTRRs and are much more
+	  flexible than MTRRs.
+
+	  Say N here if you see bootup problems (boot crash, boot hang,
+	  spontaneous reboots) or a non-working video driver.
+
+	  If unsure, say Y.
+
 config EFI
 	def_bool n
 	prompt "EFI runtime service support"
@@ -1078,6 +1119,7 @@
 
 config KEXEC
 	bool "kexec system call"
+	depends on X86_64 || X86_BIOS_REBOOT
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
@@ -1379,7 +1421,7 @@
 menu "Bus options (PCI etc.)"
 
 config PCI
-	bool "PCI support" if !X86_VISWS
+	bool "PCI support" if !X86_VISWS && !X86_VSMP
 	depends on !X86_VOYAGER
 	default y
 	select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 9304bfb..57072f2 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -388,7 +388,7 @@
 #
 config X86_P6_NOP
 	def_bool y
-	depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4)
+	depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC)
 
 config X86_TSC
 	def_bool y
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 702eb39..610aaec 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -54,6 +54,18 @@
 
 	  Say N if unsure.
 
+config X86_PTDUMP
+	bool "Export kernel pagetable layout to userspace via debugfs"
+	depends on DEBUG_KERNEL
+	select DEBUG_FS
+	help
+	  Say Y here if you want to show the kernel pagetable layout in a
+	  debugfs file. This information is only useful for kernel developers
+	  who are working in architecture specific areas of the kernel.
+	  It is probably not a good idea to enable this feature in a production
+	  kernel.
+	  If in doubt, say "N"
+
 config DEBUG_RODATA
 	bool "Write protect kernel read-only data structures"
 	default y
@@ -64,6 +76,18 @@
 	  data. This is recommended so that we can catch kernel bugs sooner.
 	  If in doubt, say "Y".
 
+config DIRECT_GBPAGES
+	bool "Enable gbpages-mapped kernel pagetables"
+	depends on DEBUG_KERNEL && EXPERIMENTAL && X86_64
+	help
+	  Enable gigabyte pages support (if the CPU supports it). This can
+	  improve the kernel's performance a tiny bit by reducing TLB
+	  pressure.
+
+	  This is experimental code.
+
+	  If in doubt, say "N".
+
 config DEBUG_RODATA_TEST
 	bool "Testcase for the DEBUG_RODATA feature"
 	depends on DEBUG_RODATA
@@ -82,8 +106,8 @@
 
 config 4KSTACKS
 	bool "Use 4Kb for kernel stacks instead of 8Kb"
-	depends on DEBUG_KERNEL
 	depends on X86_32
+	default y
 	help
 	  If you say Y here the kernel will use a 4Kb stacksize for the
 	  kernel stack attached to each process/thread. This facilitates
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index f1e739a..3cff3c8 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -151,7 +151,6 @@
 # 64 bit does not support subarch support - clear sub arch variables
 fcore-$(CONFIG_X86_64)  :=
 mcore-$(CONFIG_X86_64)  :=
-mflags-$(CONFIG_X86_64) :=
 
 KBUILD_CFLAGS += $(mflags-y)
 KBUILD_AFLAGS += $(mflags-y)
@@ -159,9 +158,9 @@
 ###
 # Kernel objects
 
-head-y                := arch/x86/kernel/head_$(BITS).o
-head-$(CONFIG_X86_64) += arch/x86/kernel/head64.o
-head-y                += arch/x86/kernel/init_task.o
+head-y := arch/x86/kernel/head_$(BITS).o
+head-y += arch/x86/kernel/head$(BITS).o
+head-y += arch/x86/kernel/init_task.o
 
 libs-y  += arch/x86/lib/
 
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index f88458e..7ee102f 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -30,7 +30,7 @@
 
 setup-y		+= a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
 setup-y		+= header.o main.o mca.o memory.o pm.o pmjump.o
-setup-y		+= printf.o string.o tty.o video.o version.o
+setup-y		+= printf.o string.o tty.o video.o video-mode.o version.o
 setup-$(CONFIG_X86_APM_BOOT) += apm.o
 setup-$(CONFIG_X86_VOYAGER) += voyager.o
 
@@ -94,6 +94,20 @@
 
 SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
 
+sed-offsets := -e 's/^00*/0/' \
+        -e 's/^\([0-9a-fA-F]*\) . \(input_data\|input_data_end\)$$/\#define \2 0x\1/p'
+
+quiet_cmd_offsets = OFFSETS $@
+      cmd_offsets = $(NM) $< | sed -n $(sed-offsets) > $@
+
+$(obj)/offsets.h: $(obj)/compressed/vmlinux FORCE
+	$(call if_changed,offsets)
+
+targets += offsets.h
+
+AFLAGS_header.o += -I$(obj)
+$(obj)/header.o: $(obj)/offsets.h
+
 LDFLAGS_setup.elf	:= -T
 $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
 	$(call if_changed,ld)
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 7822a49..0957807 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -286,6 +286,11 @@
 /* video.c */
 void set_video(void);
 
+/* video-mode.c */
+int set_mode(u16 mode);
+int mode_defined(u16 mode);
+void probe_cards(int unsafe);
+
 /* video-vesa.c */
 void vesa_store_edid(void);
 
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index d2b9f3b..92fdd35 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -22,7 +22,7 @@
 	$(call if_changed,ld)
 	@:
 
-OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
+OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
 
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 8182e32..dad4e69 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -15,6 +15,10 @@
  * we just keep it from happening
  */
 #undef CONFIG_PARAVIRT
+#ifdef CONFIG_X86_32
+#define _ASM_DESC_H_ 1
+#endif
+
 #ifdef CONFIG_X86_64
 #define _LINUX_STRING_H_ 1
 #define __LINUX_BITMAP_H 1
@@ -22,6 +26,7 @@
 
 #include <linux/linkage.h>
 #include <linux/screen_info.h>
+#include <linux/elf.h>
 #include <asm/io.h>
 #include <asm/page.h>
 #include <asm/boot.h>
@@ -53,8 +58,8 @@
  * 1 bit (last block flag)
  * 2 bits (block type)
  *
- * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
- * The smallest block type encoding is always used.
+ * 1 block occurs every 32K -1 bytes or when there 50% compression
+ * has been achieved. The smallest block type encoding is always used.
  *
  * stored:
  *    32 bits length in bytes.
@@ -90,9 +95,9 @@
  *
  * All of which is enough to compute an amount of extra data that is required
  * to be safe.  To avoid problems at the block level allocating 5 extra bytes
- * per 32767 bytes of data is sufficient.  To avoind problems internal to a block
- * adding an extra 32767 bytes (the worst case uncompressed block size) is
- * sufficient, to ensure that in the worst case the decompressed data for
+ * per 32767 bytes of data is sufficient.  To avoind problems internal to a
+ * block adding an extra 32767 bytes (the worst case uncompressed block size)
+ * is sufficient, to ensure that in the worst case the decompressed data for
  * block will stop the byte before the compressed data for a block begins.
  * To avoid problems with the compressed data's meta information an extra 18
  * bytes are needed.  Leading to the formula:
@@ -111,58 +116,66 @@
  * gzip declarations
  */
 
-#define OF(args)  args
-#define STATIC static
+#define OF(args)	args
+#define STATIC		static
 
 #undef memset
 #undef memcpy
-#define memzero(s, n)     memset ((s), 0, (n))
+#define memzero(s, n)	memset((s), 0, (n))
 
-typedef unsigned char  uch;
-typedef unsigned short ush;
-typedef unsigned long  ulg;
+typedef unsigned char	uch;
+typedef unsigned short	ush;
+typedef unsigned long	ulg;
 
-#define WSIZE 0x80000000	/* Window size must be at least 32k,
-				 * and a power of two
-				 * We don't actually have a window just
-				 * a huge output buffer so I report
-				 * a 2G windows size, as that should
-				 * always be larger than our output buffer.
-				 */
+/*
+ * Window size must be at least 32k, and a power of two.
+ * We don't actually have a window just a huge output buffer,
+ * so we report a 2G window size, as that should always be
+ * larger than our output buffer:
+ */
+#define WSIZE		0x80000000
 
-static uch *inbuf;	/* input buffer */
-static uch *window;	/* Sliding window buffer, (and final output buffer) */
+/* Input buffer: */
+static unsigned char	*inbuf;
 
-static unsigned insize;  /* valid bytes in inbuf */
-static unsigned inptr;   /* index of next byte to be processed in inbuf */
-static unsigned outcnt;  /* bytes in output buffer */
+/* Sliding window buffer (and final output buffer): */
+static unsigned char	*window;
+
+/* Valid bytes in inbuf: */
+static unsigned		insize;
+
+/* Index of next byte to be processed in inbuf: */
+static unsigned		inptr;
+
+/* Bytes in output buffer: */
+static unsigned		outcnt;
 
 /* gzip flag byte */
-#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
-#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
-#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
-#define COMMENT      0x10 /* bit 4 set: file comment present */
-#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
-#define RESERVED     0xC0 /* bit 6,7:   reserved */
+#define ASCII_FLAG	0x01 /* bit 0 set: file probably ASCII text */
+#define CONTINUATION	0x02 /* bit 1 set: continuation of multi-part gz file */
+#define EXTRA_FIELD	0x04 /* bit 2 set: extra field present */
+#define ORIG_NAM	0x08 /* bit 3 set: original file name present */
+#define COMMENT		0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED	0x20 /* bit 5 set: file is encrypted */
+#define RESERVED	0xC0 /* bit 6, 7:  reserved */
 
-#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
-		
+#define get_byte()	(inptr < insize ? inbuf[inptr++] : fill_inbuf())
+
 /* Diagnostic functions */
 #ifdef DEBUG
-#  define Assert(cond,msg) {if(!(cond)) error(msg);}
-#  define Trace(x) fprintf x
-#  define Tracev(x) {if (verbose) fprintf x ;}
-#  define Tracevv(x) {if (verbose>1) fprintf x ;}
-#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#  define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0)
+#  define Trace(x)	do { fprintf x; } while (0)
+#  define Tracev(x)	do { if (verbose) fprintf x ; } while (0)
+#  define Tracevv(x)	do { if (verbose > 1) fprintf x ; } while (0)
+#  define Tracec(c, x)	do { if (verbose && (c)) fprintf x ; } while (0)
+#  define Tracecv(c, x)	do { if (verbose > 1 && (c)) fprintf x ; } while (0)
 #else
-#  define Assert(cond,msg)
+#  define Assert(cond, msg)
 #  define Trace(x)
 #  define Tracev(x)
 #  define Tracevv(x)
-#  define Tracec(c,x)
-#  define Tracecv(c,x)
+#  define Tracec(c, x)
+#  define Tracecv(c, x)
 #endif
 
 static int  fill_inbuf(void);
@@ -170,7 +183,7 @@
 static void error(char *m);
 static void gzip_mark(void **);
 static void gzip_release(void **);
-  
+
 /*
  * This is set up by the setup-routine at boot-time
  */
@@ -185,7 +198,7 @@
 extern unsigned char input_data[];
 extern int input_len;
 
-static long bytes_out = 0;
+static long bytes_out;
 
 static void *malloc(int size);
 static void free(void *where);
@@ -210,7 +223,7 @@
 #define HEAP_SIZE             0x4000
 #endif
 
-static char *vidmem = (char *)0xb8000;
+static char *vidmem;
 static int vidport;
 static int lines, cols;
 
@@ -224,8 +237,10 @@
 {
 	void *p;
 
-	if (size <0) error("Malloc error");
-	if (free_mem_ptr <= 0) error("Memory error");
+	if (size < 0)
+		error("Malloc error");
+	if (free_mem_ptr <= 0)
+		error("Memory error");
 
 	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
 
@@ -251,19 +266,19 @@
 {
 	free_mem_ptr = (memptr) *ptr;
 }
- 
+
 static void scroll(void)
 {
 	int i;
 
-	memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
-	for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
+	memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
+	for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
 		vidmem[i] = ' ';
 }
 
 static void putstr(const char *s)
 {
-	int x,y,pos;
+	int x, y, pos;
 	char c;
 
 #ifdef CONFIG_X86_32
@@ -274,18 +289,18 @@
 	x = RM_SCREEN_INFO.orig_x;
 	y = RM_SCREEN_INFO.orig_y;
 
-	while ( ( c = *s++ ) != '\0' ) {
-		if ( c == '\n' ) {
+	while ((c = *s++) != '\0') {
+		if (c == '\n') {
 			x = 0;
-			if ( ++y >= lines ) {
+			if (++y >= lines) {
 				scroll();
 				y--;
 			}
 		} else {
 			vidmem [(x + cols * y) * 2] = c;
-			if ( ++x >= cols ) {
+			if (++x >= cols) {
 				x = 0;
-				if ( ++y >= lines ) {
+				if (++y >= lines) {
 					scroll();
 					y--;
 				}
@@ -303,22 +318,22 @@
 	outb(0xff & (pos >> 1), vidport+1);
 }
 
-static void* memset(void* s, int c, unsigned n)
+static void *memset(void *s, int c, unsigned n)
 {
 	int i;
 	char *ss = s;
 
-	for (i=0;i<n;i++) ss[i] = c;
+	for (i = 0; i < n; i++) ss[i] = c;
 	return s;
 }
 
-static void* memcpy(void* dest, const void* src, unsigned n)
+static void *memcpy(void *dest, const void *src, unsigned n)
 {
 	int i;
 	const char *s = src;
 	char *d = dest;
 
-	for (i=0;i<n;i++) d[i] = s[i];
+	for (i = 0; i < n; i++) d[i] = s[i];
 	return dest;
 }
 
@@ -341,9 +356,9 @@
 	/* With my window equal to my output buffer
 	 * I only need to compute the crc here.
 	 */
-	ulg c = crc;         /* temporary variable */
+	unsigned long c = crc;         /* temporary variable */
 	unsigned n;
-	uch *in, ch;
+	unsigned char *in, ch;
 
 	in = window;
 	for (n = 0; n < outcnt; n++) {
@@ -351,7 +366,7 @@
 		c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
 	}
 	crc = c;
-	bytes_out += (ulg)outcnt;
+	bytes_out += (unsigned long)outcnt;
 	outcnt = 0;
 }
 
@@ -365,9 +380,59 @@
 		asm("hlt");
 }
 
+static void parse_elf(void *output)
+{
+#ifdef CONFIG_X86_64
+	Elf64_Ehdr ehdr;
+	Elf64_Phdr *phdrs, *phdr;
+#else
+	Elf32_Ehdr ehdr;
+	Elf32_Phdr *phdrs, *phdr;
+#endif
+	void *dest;
+	int i;
+
+	memcpy(&ehdr, output, sizeof(ehdr));
+	if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
+	   ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
+	   ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
+	   ehdr.e_ident[EI_MAG3] != ELFMAG3) {
+		error("Kernel is not a valid ELF file");
+		return;
+	}
+
+	putstr("Parsing ELF... ");
+
+	phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
+	if (!phdrs)
+		error("Failed to allocate space for phdrs");
+
+	memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
+
+	for (i = 0; i < ehdr.e_phnum; i++) {
+		phdr = &phdrs[i];
+
+		switch (phdr->p_type) {
+		case PT_LOAD:
+#ifdef CONFIG_RELOCATABLE
+			dest = output;
+			dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
+#else
+			dest = (void *)(phdr->p_paddr);
+#endif
+			memcpy(dest,
+			       output + phdr->p_offset,
+			       phdr->p_filesz);
+			break;
+		default: /* Ignore other PT_* */ break;
+		}
+	}
+}
+
 asmlinkage void decompress_kernel(void *rmode, memptr heap,
-				  uch *input_data, unsigned long input_len,
-				  uch *output)
+				  unsigned char *input_data,
+				  unsigned long input_len,
+				  unsigned char *output)
 {
 	real_mode = rmode;
 
@@ -390,12 +455,12 @@
 	inptr  = 0;
 
 #ifdef CONFIG_X86_64
-	if ((ulg)output & (__KERNEL_ALIGN - 1))
+	if ((unsigned long)output & (__KERNEL_ALIGN - 1))
 		error("Destination address not 2M aligned");
-	if ((ulg)output >= 0xffffffffffUL)
+	if ((unsigned long)output >= 0xffffffffffUL)
 		error("Destination address too large");
 #else
-	if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1))
+	if ((u32)output & (CONFIG_PHYSICAL_ALIGN - 1))
 		error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
 	if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
 		error("Destination address too large");
@@ -408,6 +473,7 @@
 	makecrc();
 	putstr("\nDecompressing Linux... ");
 	gunzip();
+	parse_elf(output);
 	putstr("done.\nBooting the kernel.\n");
 	return;
 }
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 769065b..2462c88 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -56,27 +56,27 @@
 	REQUIRED_MASK7,
 };
 
-#define A32(a,b,c,d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
+#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
 
 static int is_amd(void)
 {
-	return cpu_vendor[0] == A32('A','u','t','h') &&
-	       cpu_vendor[1] == A32('e','n','t','i') &&
-	       cpu_vendor[2] == A32('c','A','M','D');
+	return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
+	       cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
+	       cpu_vendor[2] == A32('c', 'A', 'M', 'D');
 }
 
 static int is_centaur(void)
 {
-	return cpu_vendor[0] == A32('C','e','n','t') &&
-	       cpu_vendor[1] == A32('a','u','r','H') &&
-	       cpu_vendor[2] == A32('a','u','l','s');
+	return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
+	       cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
+	       cpu_vendor[2] == A32('a', 'u', 'l', 's');
 }
 
 static int is_transmeta(void)
 {
-	return cpu_vendor[0] == A32('G','e','n','u') &&
-	       cpu_vendor[1] == A32('i','n','e','T') &&
-	       cpu_vendor[2] == A32('M','x','8','6');
+	return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
+	       cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
+	       cpu_vendor[2] == A32('M', 'x', '8', '6');
 }
 
 static int has_fpu(void)
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 64ad901..6d2df8d 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -22,6 +22,7 @@
 #include <asm/page.h>
 #include <asm/setup.h>
 #include "boot.h"
+#include "offsets.h"
 
 SETUPSECTS	= 4			/* default nr of setup-sectors */
 BOOTSEG		= 0x07C0		/* original address of boot-sector */
@@ -119,7 +120,7 @@
 	# Part 2 of the header, from the old setup.S
 
 		.ascii	"HdrS"		# header signature
-		.word	0x0207		# header version number (>= 0x0105)
+		.word	0x0208		# header version number (>= 0x0105)
 					# or else old loadlin-1.5 will fail)
 		.globl realmode_swtch
 realmode_swtch:	.word	0, 0		# default_switch, SETUPSEG
@@ -223,6 +224,9 @@
 
 hardware_subarch_data:	.quad 0
 
+payload_offset:		.long input_data
+payload_length:		.long input_data_end-input_data
+
 # End of setup header #####################################################
 
 	.section ".inittext", "ax"
diff --git a/arch/x86/boot/pm.c b/arch/x86/boot/pm.c
index 1a0f936..a93cb8b 100644
--- a/arch/x86/boot/pm.c
+++ b/arch/x86/boot/pm.c
@@ -100,7 +100,7 @@
 /*
  * Set up the GDT
  */
-#define GDT_ENTRY(flags,base,limit)		\
+#define GDT_ENTRY(flags, base, limit)		\
 	(((u64)(base & 0xff000000) << 32) |	\
 	 ((u64)flags << 40) |			\
 	 ((u64)(limit & 0x00ff0000) << 32) |	\
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index b424874..44dc192 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -50,6 +50,75 @@
 u8 buf[SETUP_SECT_MAX*512];
 int is_big_kernel;
 
+/*----------------------------------------------------------------------*/
+
+static const u32 crctab32[] = {
+	0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
+	0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
+	0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
+	0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+	0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
+	0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+	0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
+	0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+	0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
+	0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
+	0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
+	0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+	0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
+	0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
+	0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
+	0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+	0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
+	0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+	0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
+	0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+	0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
+	0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
+	0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
+	0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+	0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
+	0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
+	0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
+	0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+	0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
+	0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+	0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
+	0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+	0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
+	0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
+	0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
+	0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+	0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
+	0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
+	0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
+	0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+	0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
+	0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+	0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
+	0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+	0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
+	0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
+	0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
+	0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+	0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
+	0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
+	0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
+	0x2d02ef8d
+};
+
+static u32 partial_crc32_one(u8 c, u32 crc)
+{
+	return crctab32[(crc ^ c) & 0xff] ^ (crc >> 8);
+}
+
+static u32 partial_crc32(const u8 *s, int len, u32 crc)
+{
+	while (len--)
+		crc = partial_crc32_one(*s++, crc);
+	return crc;
+}
+
 static void die(const char * str, ...)
 {
 	va_list args;
@@ -74,6 +143,7 @@
 	FILE *file;
 	int fd;
 	void *kernel;
+	u32 crc = 0xffffffffUL;
 
 	if (argc > 2 && !strcmp(argv[1], "-b"))
 	  {
@@ -144,7 +214,8 @@
 	kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
 	if (kernel == MAP_FAILED)
 		die("Unable to mmap '%s': %m", argv[2]);
-	sys_size = (sz + 15) / 16;
+	/* Number of 16-byte paragraphs, including space for a 4-byte CRC */
+	sys_size = (sz + 15 + 4) / 16;
 	if (!is_big_kernel && sys_size > DEF_SYSSIZE)
 		die("System is too big. Try using bzImage or modules.");
 
@@ -155,12 +226,27 @@
 	buf[0x1f6] = sys_size >> 16;
 	buf[0x1f7] = sys_size >> 24;
 
+	crc = partial_crc32(buf, i, crc);
 	if (fwrite(buf, 1, i, stdout) != i)
 		die("Writing setup failed");
 
 	/* Copy the kernel code */
+	crc = partial_crc32(kernel, sz, crc);
 	if (fwrite(kernel, 1, sz, stdout) != sz)
 		die("Writing kernel failed");
+
+	/* Add padding leaving 4 bytes for the checksum */
+	while (sz++ < (sys_size*16) - 4) {
+		crc = partial_crc32_one('\0', crc);
+		if (fwrite("\0", 1, 1, stdout) != 1)
+			die("Writing padding failed");
+	}
+
+	/* Write the CRC */
+	fprintf(stderr, "CRC %lx\n", crc);
+	if (fwrite(&crc, 1, 4, stdout) != 4)
+		die("Writing CRC failed");
+
 	close(fd);
 
 	/* Everything is OK */
diff --git a/arch/x86/boot/video-bios.c b/arch/x86/boot/video-bios.c
index ff664a1..39e247e 100644
--- a/arch/x86/boot/video-bios.c
+++ b/arch/x86/boot/video-bios.c
@@ -50,6 +50,7 @@
 	if (new_mode == mode)
 		return 0;	/* Mode change OK */
 
+#ifndef _WAKEUP
 	if (new_mode != boot_params.screen_info.orig_video_mode) {
 		/* Mode setting failed, but we didn't end up where we
 		   started.  That's bad.  Try to revert to the original
@@ -59,13 +60,18 @@
 			     : "+a" (ax)
 			     : : "ebx", "ecx", "edx", "esi", "edi");
 	}
+#endif
 	return -1;
 }
 
 static int bios_probe(void)
 {
 	u8 mode;
+#ifdef _WAKEUP
+	u8 saved_mode = 0x03;
+#else
 	u8 saved_mode = boot_params.screen_info.orig_video_mode;
+#endif
 	u16 crtc;
 	struct mode_info *mi;
 	int nmodes = 0;
diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c
new file mode 100644
index 0000000..748e8d0
--- /dev/null
+++ b/arch/x86/boot/video-mode.c
@@ -0,0 +1,173 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ *   Copyright (C) 1991, 1992 Linus Torvalds
+ *   Copyright 2007-2008 rPath, Inc. - All Rights Reserved
+ *
+ *   This file is part of the Linux kernel, and is made available under
+ *   the terms of the GNU General Public License version 2.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * arch/i386/boot/video-mode.c
+ *
+ * Set the video mode.  This is separated out into a different
+ * file in order to be shared with the ACPI wakeup code.
+ */
+
+#include "boot.h"
+#include "video.h"
+#include "vesa.h"
+
+/*
+ * Common variables
+ */
+int adapter;			/* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */
+u16 video_segment;
+int force_x, force_y;	/* Don't query the BIOS for cols/rows */
+
+int do_restore;		/* Screen contents changed during mode flip */
+int graphic_mode;	/* Graphic mode with linear frame buffer */
+
+/* Probe the video drivers and have them generate their mode lists. */
+void probe_cards(int unsafe)
+{
+	struct card_info *card;
+	static u8 probed[2];
+
+	if (probed[unsafe])
+		return;
+
+	probed[unsafe] = 1;
+
+	for (card = video_cards; card < video_cards_end; card++) {
+		if (card->unsafe == unsafe) {
+			if (card->probe)
+				card->nmodes = card->probe();
+			else
+				card->nmodes = 0;
+		}
+	}
+}
+
+/* Test if a mode is defined */
+int mode_defined(u16 mode)
+{
+	struct card_info *card;
+	struct mode_info *mi;
+	int i;
+
+	for (card = video_cards; card < video_cards_end; card++) {
+		mi = card->modes;
+		for (i = 0; i < card->nmodes; i++, mi++) {
+			if (mi->mode == mode)
+				return 1;
+		}
+	}
+
+	return 0;
+}
+
+/* Set mode (without recalc) */
+static int raw_set_mode(u16 mode, u16 *real_mode)
+{
+	int nmode, i;
+	struct card_info *card;
+	struct mode_info *mi;
+
+	/* Drop the recalc bit if set */
+	mode &= ~VIDEO_RECALC;
+
+	/* Scan for mode based on fixed ID, position, or resolution */
+	nmode = 0;
+	for (card = video_cards; card < video_cards_end; card++) {
+		mi = card->modes;
+		for (i = 0; i < card->nmodes; i++, mi++) {
+			int visible = mi->x || mi->y;
+
+			if ((mode == nmode && visible) ||
+			    mode == mi->mode ||
+			    mode == (mi->y << 8)+mi->x) {
+				*real_mode = mi->mode;
+				return card->set_mode(mi);
+			}
+
+			if (visible)
+				nmode++;
+		}
+	}
+
+	/* Nothing found?  Is it an "exceptional" (unprobed) mode? */
+	for (card = video_cards; card < video_cards_end; card++) {
+		if (mode >= card->xmode_first &&
+		    mode < card->xmode_first+card->xmode_n) {
+			struct mode_info mix;
+			*real_mode = mix.mode = mode;
+			mix.x = mix.y = 0;
+			return card->set_mode(&mix);
+		}
+	}
+
+	/* Otherwise, failure... */
+	return -1;
+}
+
+/*
+ * Recalculate the vertical video cutoff (hack!)
+ */
+static void vga_recalc_vertical(void)
+{
+	unsigned int font_size, rows;
+	u16 crtc;
+	u8 pt, ov;
+
+	set_fs(0);
+	font_size = rdfs8(0x485); /* BIOS: font size (pixels) */
+	rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */
+
+	rows *= font_size;	/* Visible scan lines */
+	rows--;			/* ... minus one */
+
+	crtc = vga_crtc();
+
+	pt = in_idx(crtc, 0x11);
+	pt &= ~0x80;		/* Unlock CR0-7 */
+	out_idx(pt, crtc, 0x11);
+
+	out_idx((u8)rows, crtc, 0x12); /* Lower height register */
+
+	ov = in_idx(crtc, 0x07); /* Overflow register */
+	ov &= 0xbd;
+	ov |= (rows >> (8-1)) & 0x02;
+	ov |= (rows >> (9-6)) & 0x40;
+	out_idx(ov, crtc, 0x07);
+}
+
+/* Set mode (with recalc if specified) */
+int set_mode(u16 mode)
+{
+	int rv;
+	u16 real_mode;
+
+	/* Very special mode numbers... */
+	if (mode == VIDEO_CURRENT_MODE)
+		return 0;	/* Nothing to do... */
+	else if (mode == NORMAL_VGA)
+		mode = VIDEO_80x25;
+	else if (mode == EXTENDED_VGA)
+		mode = VIDEO_8POINT;
+
+	rv = raw_set_mode(mode, &real_mode);
+	if (rv)
+		return rv;
+
+	if (mode & VIDEO_RECALC)
+		vga_recalc_vertical();
+
+	/* Save the canonical mode number for the kernel, not
+	   an alias, size specification or menu position */
+#ifndef _WAKEUP
+	boot_params.hdr.vid_mode = real_mode;
+#endif
+	return 0;
+}
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
index 419b5c2..5d5a3f6 100644
--- a/arch/x86/boot/video-vesa.c
+++ b/arch/x86/boot/video-vesa.c
@@ -24,7 +24,11 @@
 
 __videocard video_vesa;
 
+#ifndef _WAKEUP
 static void vesa_store_mode_params_graphics(void);
+#else /* _WAKEUP */
+static inline void vesa_store_mode_params_graphics(void) {}
+#endif /* _WAKEUP */
 
 static int vesa_probe(void)
 {
@@ -165,6 +169,8 @@
 }
 
 
+#ifndef _WAKEUP
+
 /* Switch DAC to 8-bit mode */
 static void vesa_dac_set_8bits(void)
 {
@@ -288,6 +294,8 @@
 #endif /* CONFIG_FIRMWARE_EDID */
 }
 
+#endif /* not _WAKEUP */
+
 __videocard video_vesa =
 {
 	.card_name	= "VESA",
diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c
index 7259387..330d658 100644
--- a/arch/x86/boot/video-vga.c
+++ b/arch/x86/boot/video-vga.c
@@ -210,6 +210,8 @@
  */
 static int vga_probe(void)
 {
+	u16 ega_bx;
+
 	static const char *card_name[] = {
 		"CGA/MDA/HGC", "EGA", "VGA"
 	};
@@ -226,12 +228,16 @@
 	u8 vga_flag;
 
 	asm(INT10
-	    : "=b" (boot_params.screen_info.orig_video_ega_bx)
+	    : "=b" (ega_bx)
 	    : "a" (0x1200), "b" (0x10) /* Check EGA/VGA */
 	    : "ecx", "edx", "esi", "edi");
 
+#ifndef _WAKEUP
+	boot_params.screen_info.orig_video_ega_bx = ega_bx;
+#endif
+
 	/* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */
-	if ((u8)boot_params.screen_info.orig_video_ega_bx != 0x10) {
+	if ((u8)ega_bx != 0x10) {
 		/* EGA/VGA */
 		asm(INT10
 		    : "=a" (vga_flag)
@@ -240,7 +246,9 @@
 
 		if (vga_flag == 0x1a) {
 			adapter = ADAPTER_VGA;
+#ifndef _WAKEUP
 			boot_params.screen_info.orig_video_isVGA = 1;
+#endif
 		} else {
 			adapter = ADAPTER_EGA;
 		}
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index 696d08f..c1c47ba 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -18,21 +18,6 @@
 #include "video.h"
 #include "vesa.h"
 
-/*
- * Mode list variables
- */
-static struct card_info cards[];    /* List of cards to probe for */
-
-/*
- * Common variables
- */
-int adapter;			/* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */
-u16 video_segment;
-int force_x, force_y;	/* Don't query the BIOS for cols/rows */
-
-int do_restore = 0;	/* Screen contents changed during mode flip */
-int graphic_mode;	/* Graphic mode with linear frame buffer */
-
 static void store_cursor_position(void)
 {
 	u16 curpos;
@@ -107,147 +92,6 @@
 	boot_params.screen_info.orig_video_lines = y;
 }
 
-/* Probe the video drivers and have them generate their mode lists. */
-static void probe_cards(int unsafe)
-{
-	struct card_info *card;
-	static u8 probed[2];
-
-	if (probed[unsafe])
-		return;
-
-	probed[unsafe] = 1;
-
-	for (card = video_cards; card < video_cards_end; card++) {
-		if (card->unsafe == unsafe) {
-			if (card->probe)
-				card->nmodes = card->probe();
-			else
-				card->nmodes = 0;
-		}
-	}
-}
-
-/* Test if a mode is defined */
-int mode_defined(u16 mode)
-{
-	struct card_info *card;
-	struct mode_info *mi;
-	int i;
-
-	for (card = video_cards; card < video_cards_end; card++) {
-		mi = card->modes;
-		for (i = 0; i < card->nmodes; i++, mi++) {
-			if (mi->mode == mode)
-				return 1;
-		}
-	}
-
-	return 0;
-}
-
-/* Set mode (without recalc) */
-static int raw_set_mode(u16 mode, u16 *real_mode)
-{
-	int nmode, i;
-	struct card_info *card;
-	struct mode_info *mi;
-
-	/* Drop the recalc bit if set */
-	mode &= ~VIDEO_RECALC;
-
-	/* Scan for mode based on fixed ID, position, or resolution */
-	nmode = 0;
-	for (card = video_cards; card < video_cards_end; card++) {
-		mi = card->modes;
-		for (i = 0; i < card->nmodes; i++, mi++) {
-			int visible = mi->x || mi->y;
-
-			if ((mode == nmode && visible) ||
-			    mode == mi->mode ||
-			    mode == (mi->y << 8)+mi->x) {
-				*real_mode = mi->mode;
-				return card->set_mode(mi);
-			}
-
-			if (visible)
-				nmode++;
-		}
-	}
-
-	/* Nothing found?  Is it an "exceptional" (unprobed) mode? */
-	for (card = video_cards; card < video_cards_end; card++) {
-		if (mode >= card->xmode_first &&
-		    mode < card->xmode_first+card->xmode_n) {
-			struct mode_info mix;
-			*real_mode = mix.mode = mode;
-			mix.x = mix.y = 0;
-			return card->set_mode(&mix);
-		}
-	}
-
-	/* Otherwise, failure... */
-	return -1;
-}
-
-/*
- * Recalculate the vertical video cutoff (hack!)
- */
-static void vga_recalc_vertical(void)
-{
-	unsigned int font_size, rows;
-	u16 crtc;
-	u8 pt, ov;
-
-	set_fs(0);
-	font_size = rdfs8(0x485); /* BIOS: font size (pixels) */
-	rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */
-
-	rows *= font_size;	/* Visible scan lines */
-	rows--;			/* ... minus one */
-
-	crtc = vga_crtc();
-
-	pt = in_idx(crtc, 0x11);
-	pt &= ~0x80;		/* Unlock CR0-7 */
-	out_idx(pt, crtc, 0x11);
-
-	out_idx((u8)rows, crtc, 0x12); /* Lower height register */
-
-	ov = in_idx(crtc, 0x07); /* Overflow register */
-	ov &= 0xbd;
-	ov |= (rows >> (8-1)) & 0x02;
-	ov |= (rows >> (9-6)) & 0x40;
-	out_idx(ov, crtc, 0x07);
-}
-
-/* Set mode (with recalc if specified) */
-static int set_mode(u16 mode)
-{
-	int rv;
-	u16 real_mode;
-
-	/* Very special mode numbers... */
-	if (mode == VIDEO_CURRENT_MODE)
-		return 0;	/* Nothing to do... */
-	else if (mode == NORMAL_VGA)
-		mode = VIDEO_80x25;
-	else if (mode == EXTENDED_VGA)
-		mode = VIDEO_8POINT;
-
-	rv = raw_set_mode(mode, &real_mode);
-	if (rv)
-		return rv;
-
-	if (mode & VIDEO_RECALC)
-		vga_recalc_vertical();
-
-	/* Save the canonical mode number for the kernel, not
-	   an alias, size specification or menu position */
-	boot_params.hdr.vid_mode = real_mode;
-	return 0;
-}
-
 static unsigned int get_entry(void)
 {
 	char entry_buf[4];
@@ -486,6 +330,7 @@
 		printf("Undefined video mode number: %x\n", mode);
 		mode = ASK_VGA;
 	}
+	boot_params.hdr.vid_mode = mode;
 	vesa_store_edid();
 	store_mode_params();
 
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 5e7771a..05e155d 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -468,7 +468,7 @@
 		restorer = ka->sa.sa_restorer;
 	} else {
 		/* Return stub is in 32bit vsyscall page */
-		if (current->binfmt->hasvdso)
+		if (current->mm->context.vdso)
 			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
 						 sigreturn);
 		else
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 8022d3c..ae7158b 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -162,12 +162,14 @@
 	SAVE_REST
 	CLEAR_RREGS
 	movq	%r9,R9(%rsp)
-	movq	$-ENOSYS,RAX(%rsp)	/* really needed? */
+	movq	$-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
 	movq	%rsp,%rdi        /* &pt_regs -> arg1 */
 	call	syscall_trace_enter
 	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
 	RESTORE_REST
 	xchgl	%ebp,%r9d
+	cmpl	$(IA32_NR_syscalls-1),%eax
+	ja	int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
 	jmp	sysenter_do_call
 	CFI_ENDPROC
 ENDPROC(ia32_sysenter_target)
@@ -261,13 +263,15 @@
 	SAVE_REST
 	CLEAR_RREGS
 	movq %r9,R9(%rsp)
-	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
+	movq $-ENOSYS,RAX(%rsp)	/* ptrace can change this for a bad syscall */
 	movq %rsp,%rdi        /* &pt_regs -> arg1 */
 	call syscall_trace_enter
 	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
 	RESTORE_REST
 	xchgl %ebp,%r9d
 	movl RSP-ARGOFFSET(%rsp), %r8d
+	cmpl $(IA32_NR_syscalls-1),%eax
+	ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
 	jmp cstar_do_call
 END(ia32_cstar_target)
 				
@@ -325,7 +329,7 @@
 	jnz ia32_tracesys
 ia32_do_syscall:	
 	cmpl $(IA32_NR_syscalls-1),%eax
-	ja  ia32_badsys
+	ja  int_ret_from_sys_call	/* ia32_tracesys has set RAX(%rsp) */
 	IA32_ARG_FIXUP
 	call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
 ia32_sysret:
@@ -335,7 +339,7 @@
 ia32_tracesys:			 
 	SAVE_REST
 	CLEAR_RREGS
-	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
+	movq $-ENOSYS,RAX(%rsp)	/* ptrace can change this for a bad syscall */
 	movq %rsp,%rdi        /* &pt_regs -> arg1 */
 	call syscall_trace_enter
 	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index abf71d2..7cede7a 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -26,51 +26,27 @@
 #include <linux/file.h>
 #include <linux/signal.h>
 #include <linux/syscalls.h>
-#include <linux/resource.h>
 #include <linux/times.h>
 #include <linux/utsname.h>
-#include <linux/smp.h>
 #include <linux/smp_lock.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
 #include <linux/mm.h>
-#include <linux/shm.h>
-#include <linux/slab.h>
 #include <linux/uio.h>
-#include <linux/nfs_fs.h>
-#include <linux/quota.h>
-#include <linux/module.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr.h>
-#include <linux/nfsd/syscall.h>
 #include <linux/poll.h>
 #include <linux/personality.h>
 #include <linux/stat.h>
-#include <linux/ipc.h>
 #include <linux/rwsem.h>
-#include <linux/binfmts.h>
-#include <linux/init.h>
-#include <linux/aio_abi.h>
-#include <linux/aio.h>
 #include <linux/compat.h>
 #include <linux/vfs.h>
 #include <linux/ptrace.h>
 #include <linux/highuid.h>
-#include <linux/vmalloc.h>
-#include <linux/fsnotify.h>
 #include <linux/sysctl.h>
 #include <asm/mman.h>
 #include <asm/types.h>
 #include <asm/uaccess.h>
 #include <asm/semaphore.h>
 #include <asm/atomic.h>
-#include <asm/ldt.h>
-
-#include <net/scm.h>
-#include <net/sock.h>
 #include <asm/ia32.h>
+#include <asm/vgtod.h>
 
 #define AA(__x)		((unsigned long)(__x))
 
@@ -804,11 +780,6 @@
 	if (IS_ERR(filename))
 		return error;
 	error = compat_do_execve(filename, argv, envp, regs);
-	if (error == 0) {
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
-	}
 	putname(filename);
 	return error;
 }
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4eb5ce8..c3920ea 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -2,8 +2,7 @@
 # Makefile for the linux kernel.
 #
 
-extra-y                := head_$(BITS).o init_task.o vmlinux.lds
-extra-$(CONFIG_X86_64) += head64.o
+extra-y                := head_$(BITS).o head$(BITS).o init_task.o vmlinux.lds
 
 CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
 
@@ -19,7 +18,7 @@
 obj-y			:= process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
 obj-y			+= traps_$(BITS).o irq_$(BITS).o
 obj-y			+= time_$(BITS).o ioport.o ldt.o
-obj-y			+= setup_$(BITS).o i8259_$(BITS).o
+obj-y			+= setup_$(BITS).o i8259_$(BITS).o setup.o
 obj-$(CONFIG_X86_32)	+= sys_i386_32.o i386_ksyms_32.o
 obj-$(CONFIG_X86_64)	+= sys_x86_64.o x8664_ksyms_64.o
 obj-$(CONFIG_X86_64)	+= syscall_64.o vsyscall_64.o setup64.o
@@ -29,6 +28,7 @@
 obj-$(CONFIG_X86_64)	+= pci-nommu_64.o bugs_64.o
 obj-y			+= tsc_$(BITS).o io_delay.o rtc.o
 
+obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline.o
 obj-y				+= i387.o
 obj-y				+= ptrace.o
 obj-y				+= ds.o
@@ -47,11 +47,12 @@
 obj-$(CONFIG_PCI)		+= early-quirks.o
 apm-y				:= apm_32.o
 obj-$(CONFIG_APM)		+= apm.o
-obj-$(CONFIG_X86_SMP)		+= smp_$(BITS).o smpboot_$(BITS).o tsc_sync.o
-obj-$(CONFIG_X86_32_SMP)	+= smpcommon_32.o
-obj-$(CONFIG_X86_64_SMP)	+= smp_64.o smpboot_64.o tsc_sync.o
+obj-$(CONFIG_X86_SMP)		+= smp.o
+obj-$(CONFIG_X86_SMP)		+= smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o
+obj-$(CONFIG_X86_32_SMP)	+= smpcommon.o
+obj-$(CONFIG_X86_64_SMP)	+= tsc_sync.o smpcommon.o
 obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline_$(BITS).o
-obj-$(CONFIG_X86_MPPARSE)	+= mpparse_$(BITS).o
+obj-$(CONFIG_X86_MPPARSE)	+= mpparse.o
 obj-$(CONFIG_X86_LOCAL_APIC)	+= apic_$(BITS).o nmi_$(BITS).o
 obj-$(CONFIG_X86_IO_APIC)	+= io_apic_$(BITS).o
 obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups_32.o
@@ -60,12 +61,13 @@
 obj-$(CONFIG_CRASH_DUMP)	+= crash_dump_$(BITS).o
 obj-$(CONFIG_X86_NUMAQ)		+= numaq_32.o
 obj-$(CONFIG_X86_SUMMIT_NUMA)	+= summit_32.o
-obj-$(CONFIG_X86_VSMP)		+= vsmp_64.o
+obj-y				+= vsmp_64.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o
 obj-$(CONFIG_MODULES)		+= module_$(BITS).o
 obj-$(CONFIG_ACPI_SRAT) 	+= srat_32.o
 obj-$(CONFIG_EFI) 		+= efi.o efi_$(BITS).o efi_stub_$(BITS).o
 obj-$(CONFIG_DOUBLEFAULT) 	+= doublefault_32.o
+obj-$(CONFIG_KGDB)		+= kgdb.o
 obj-$(CONFIG_VM86)		+= vm86_32.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 
@@ -89,7 +91,7 @@
 ###
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
-        obj-y				+= genapic_64.o genapic_flat_64.o
+        obj-y				+= genapic_64.o genapic_flat_64.o genx2apic_uv_x.o
         obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer_64.o
         obj-$(CONFIG_AUDIT)		+= audit_64.o
 
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 19d3d6e..7335959 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,7 +1,14 @@
+subdir-				:= realmode
+
 obj-$(CONFIG_ACPI)		+= boot.o
-obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup_$(BITS).o
+obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup_rm.o wakeup_$(BITS).o
 
 ifneq ($(CONFIG_ACPI_PROCESSOR),)
 obj-y				+= cstate.o processor.o
 endif
 
+$(obj)/wakeup_rm.o:    $(obj)/realmode/wakeup.bin
+
+$(obj)/realmode/wakeup.bin: FORCE
+	$(Q)$(MAKE) $(build)=$(obj)/realmode $@
+
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 2cdc9de..057ccf1 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -39,6 +39,11 @@
 #include <asm/apic.h>
 #include <asm/io.h>
 #include <asm/mpspec.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_X86_LOCAL_APIC
+# include <mach_apic.h>
+#endif
 
 static int __initdata acpi_force = 0;
 
@@ -52,9 +57,7 @@
 #ifdef	CONFIG_X86_64
 
 #include <asm/proto.h>
-
-static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
-
+#include <asm/genapic.h>
 
 #else				/* X86 */
 
@@ -111,7 +114,7 @@
 	if (!phys_addr || !size)
 		return NULL;
 
-	if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
+	if (phys_addr+size <= (max_pfn_mapped << PAGE_SHIFT) + PAGE_SIZE)
 		return __va(phys_addr);
 
 	return NULL;
@@ -237,6 +240,16 @@
 	return 0;
 }
 
+static void __cpuinit acpi_register_lapic(int id, u8 enabled)
+{
+	if (!enabled) {
+		++disabled_cpus;
+		return;
+	}
+
+	generic_processor_info(id, 0);
+}
+
 static int __init
 acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
 {
@@ -256,8 +269,26 @@
 	 * to not preallocating memory for all NR_CPUS
 	 * when we use CPU hotplug.
 	 */
-	mp_register_lapic(processor->id,	/* APIC ID */
-			  processor->lapic_flags & ACPI_MADT_ENABLED);	/* Enabled? */
+	acpi_register_lapic(processor->id,	/* APIC ID */
+			    processor->lapic_flags & ACPI_MADT_ENABLED);
+
+	return 0;
+}
+
+static int __init
+acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
+{
+	struct acpi_madt_local_sapic *processor = NULL;
+
+	processor = (struct acpi_madt_local_sapic *)header;
+
+	if (BAD_MADT_ENTRY(processor, end))
+		return -EINVAL;
+
+	acpi_table_print_madt_entry(header);
+
+	acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
+			    processor->lapic_flags & ACPI_MADT_ENABLED);
 
 	return 0;
 }
@@ -300,6 +331,8 @@
 
 #ifdef CONFIG_X86_IO_APIC
 
+struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
+
 static int __init
 acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
 {
@@ -532,7 +565,7 @@
 	buffer.pointer = NULL;
 
 	tmp_map = cpu_present_map;
-	mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
+	acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
 
 	/*
 	 * If mp_register_lapic successfully generates a new logical cpu
@@ -732,6 +765,16 @@
  * Parse LAPIC entries in MADT
  * returns 0 on success, < 0 on error
  */
+
+static void __init acpi_register_lapic_address(unsigned long address)
+{
+	mp_lapic_addr = address;
+
+	set_fixmap_nocache(FIX_APIC_BASE, address);
+	if (boot_cpu_physical_apicid == -1U)
+		boot_cpu_physical_apicid  = GET_APIC_ID(read_apic_id());
+}
+
 static int __init acpi_parse_madt_lapic_entries(void)
 {
 	int count;
@@ -753,10 +796,14 @@
 		return count;
 	}
 
-	mp_register_lapic_address(acpi_lapic_addr);
+	acpi_register_lapic_address(acpi_lapic_addr);
 
-	count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
-				      MAX_APICS);
+	count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
+				      acpi_parse_sapic, MAX_APICS);
+
+	if (!count)
+		count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
+					      acpi_parse_lapic, MAX_APICS);
 	if (!count) {
 		printk(KERN_ERR PREFIX "No LAPIC entries present\n");
 		/* TBD: Cleanup to allow fallback to MPS */
diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
new file mode 100644
index 0000000..0929008
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/Makefile
@@ -0,0 +1,57 @@
+#
+# arch/x86/kernel/acpi/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+targets		:= wakeup.bin wakeup.elf
+
+wakeup-y	+= wakeup.o wakemain.o video-mode.o copy.o
+
+# The link order of the video-*.o modules can matter.  In particular,
+# video-vga.o *must* be listed first, followed by video-vesa.o.
+# Hardware-specific drivers should follow in the order they should be
+# probed, and video-bios.o should typically be last.
+wakeup-y	+= video-vga.o
+wakeup-y	+= video-vesa.o
+wakeup-y	+= video-bios.o
+
+targets		+= $(wakeup-y)
+
+bootsrc		:= $(src)/../../../boot
+
+# ---------------------------------------------------------------------------
+
+# How to compile the 16-bit code.  Note we always compile for -march=i386,
+# that way we can complain to the user if the CPU is insufficient.
+# Compile with _SETUP since this is similar to the boot-time setup code.
+KBUILD_CFLAGS	:= $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
+		   -I$(srctree)/$(bootsrc) \
+		   $(cflags-y) \
+		   -Wall -Wstrict-prototypes \
+		   -march=i386 -mregparm=3 \
+		   -include $(srctree)/$(bootsrc)/code16gcc.h \
+		   -fno-strict-aliasing -fomit-frame-pointer \
+		   $(call cc-option, -ffreestanding) \
+		   $(call cc-option, -fno-toplevel-reorder,\
+			$(call cc-option, -fno-unit-at-a-time)) \
+		   $(call cc-option, -fno-stack-protector) \
+		   $(call cc-option, -mpreferred-stack-boundary=2)
+KBUILD_CFLAGS	+= $(call cc-option, -m32)
+KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
+
+WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y))
+
+LDFLAGS_wakeup.elf	:= -T
+
+CPPFLAGS_wakeup.lds += -P -C
+
+$(obj)/wakeup.elf: $(src)/wakeup.lds $(WAKEUP_OBJS) FORCE
+	$(call if_changed,ld)
+
+OBJCOPYFLAGS_wakeup.bin	:= -O binary
+
+$(obj)/wakeup.bin: $(obj)/wakeup.elf FORCE
+	$(call if_changed,objcopy)
diff --git a/arch/x86/kernel/acpi/realmode/copy.S b/arch/x86/kernel/acpi/realmode/copy.S
new file mode 100644
index 0000000..dc59ebe
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/copy.S
@@ -0,0 +1 @@
+#include "../../../boot/copy.S"
diff --git a/arch/x86/kernel/acpi/realmode/video-bios.c b/arch/x86/kernel/acpi/realmode/video-bios.c
new file mode 100644
index 0000000..7deabc1
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/video-bios.c
@@ -0,0 +1 @@
+#include "../../../boot/video-bios.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-mode.c b/arch/x86/kernel/acpi/realmode/video-mode.c
new file mode 100644
index 0000000..328ad20
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/video-mode.c
@@ -0,0 +1 @@
+#include "../../../boot/video-mode.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vesa.c b/arch/x86/kernel/acpi/realmode/video-vesa.c
new file mode 100644
index 0000000..9dbb967
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/video-vesa.c
@@ -0,0 +1 @@
+#include "../../../boot/video-vesa.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vga.c b/arch/x86/kernel/acpi/realmode/video-vga.c
new file mode 100644
index 0000000..bcc8125
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/video-vga.c
@@ -0,0 +1 @@
+#include "../../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakemain.c b/arch/x86/kernel/acpi/realmode/wakemain.c
new file mode 100644
index 0000000..883962d
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/wakemain.c
@@ -0,0 +1,81 @@
+#include "wakeup.h"
+#include "boot.h"
+
+static void udelay(int loops)
+{
+	while (loops--)
+		io_delay();	/* Approximately 1 us */
+}
+
+static void beep(unsigned int hz)
+{
+	u8 enable;
+
+	if (!hz) {
+		enable = 0x00;		/* Turn off speaker */
+	} else {
+		u16 div = 1193181/hz;
+
+		outb(0xb6, 0x43);	/* Ctr 2, squarewave, load, binary */
+		io_delay();
+		outb(div, 0x42);	/* LSB of counter */
+		io_delay();
+		outb(div >> 8, 0x42);	/* MSB of counter */
+		io_delay();
+
+		enable = 0x03;		/* Turn on speaker */
+	}
+	inb(0x61);		/* Dummy read of System Control Port B */
+	io_delay();
+	outb(enable, 0x61);	/* Enable timer 2 output to speaker */
+	io_delay();
+}
+
+#define DOT_HZ		880
+#define DASH_HZ		587
+#define US_PER_DOT	125000
+
+/* Okay, this is totally silly, but it's kind of fun. */
+static void send_morse(const char *pattern)
+{
+	char s;
+
+	while ((s = *pattern++)) {
+		switch (s) {
+		case '.':
+			beep(DOT_HZ);
+			udelay(US_PER_DOT);
+			beep(0);
+			udelay(US_PER_DOT);
+			break;
+		case '-':
+			beep(DASH_HZ);
+			udelay(US_PER_DOT * 3);
+			beep(0);
+			udelay(US_PER_DOT);
+			break;
+		default:	/* Assume it's a space */
+			udelay(US_PER_DOT * 3);
+			break;
+		}
+	}
+}
+
+void main(void)
+{
+	/* Kill machine if structures are wrong */
+	if (wakeup_header.real_magic != 0x12345678)
+		while (1);
+
+	if (wakeup_header.realmode_flags & 4)
+		send_morse("...-");
+
+	if (wakeup_header.realmode_flags & 1)
+		asm volatile("lcallw   $0xc000,$3");
+
+	if (wakeup_header.realmode_flags & 2) {
+		/* Need to call BIOS */
+		probe_cards(0);
+		set_mode(wakeup_header.video_mode);
+	}
+}
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
new file mode 100644
index 0000000..f9b77fb
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -0,0 +1,113 @@
+/*
+ * ACPI wakeup real mode startup stub
+ */
+#include <asm/segment.h>
+#include <asm/msr-index.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+	.code16
+	.section ".header", "a"
+
+/* This should match the structure in wakeup.h */
+		.globl	wakeup_header
+wakeup_header:
+video_mode:	.short	0	/* Video mode number */
+pmode_return:	.byte	0x66, 0xea	/* ljmpl */
+		.long	0	/* offset goes here */
+		.short	__KERNEL_CS
+pmode_cr0:	.long	0	/* Saved %cr0 */
+pmode_cr3:	.long	0	/* Saved %cr3 */
+pmode_cr4:	.long	0	/* Saved %cr4 */
+pmode_efer:	.quad	0	/* Saved EFER */
+pmode_gdt:	.quad	0
+realmode_flags:	.long	0
+real_magic:	.long	0
+trampoline_segment:	.word 0
+signature:	.long	0x51ee1111
+
+	.text
+	.globl	_start
+	.code16
+wakeup_code:
+_start:
+	cli
+	cld
+
+	/* Set up segments */
+	movw	%cs, %ax
+	movw	%ax, %ds
+	movw	%ax, %es
+	movw	%ax, %ss
+
+	movl	$wakeup_stack_end, %esp
+
+	/* Clear the EFLAGS */
+	pushl	$0
+	popfl
+
+	/* Check header signature... */
+	movl	signature, %eax
+	cmpl	$0x51ee1111, %eax
+	jne	bogus_real_magic
+
+	/* Check we really have everything... */
+	movl	end_signature, %eax
+	cmpl	$0x65a22c82, %eax
+	jne	bogus_real_magic
+
+	/* Call the C code */
+	calll	main
+
+	/* Do any other stuff... */
+
+#ifndef CONFIG_64BIT
+	/* This could also be done in C code... */
+	movl	pmode_cr3, %eax
+	movl	%eax, %cr3
+
+	movl	pmode_cr4, %ecx
+	jecxz	1f
+	movl	%ecx, %cr4
+1:
+	movl	pmode_efer, %eax
+	movl	pmode_efer + 4, %edx
+	movl	%eax, %ecx
+	orl	%edx, %ecx
+	jz	1f
+	movl	$0xc0000080, %ecx
+	wrmsr
+1:
+
+	lgdtl	pmode_gdt
+
+	/* This really couldn't... */
+	movl	pmode_cr0, %eax
+	movl	%eax, %cr0
+	jmp	pmode_return
+#else
+	pushw	$0
+	pushw	trampoline_segment
+	pushw	$0
+	lret
+#endif
+
+bogus_real_magic:
+1:
+	hlt
+	jmp	1b
+
+	.data
+	.balign	4
+	.globl	HEAP, heap_end
+HEAP:
+	.long	wakeup_heap
+heap_end:
+	.long	wakeup_stack
+
+	.bss
+wakeup_heap:
+	.space	2048
+wakeup_stack:
+	.space	2048
+wakeup_stack_end:
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h
new file mode 100644
index 0000000..ef8166f
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/wakeup.h
@@ -0,0 +1,36 @@
+/*
+ * Definitions for the wakeup data structure at the head of the
+ * wakeup code.
+ */
+
+#ifndef ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
+#define ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/* This must match data at wakeup.S */
+struct wakeup_header {
+	u16 video_mode;		/* Video mode number */
+	u16 _jmp1;		/* ljmpl opcode, 32-bit only */
+	u32 pmode_entry;	/* Protected mode resume point, 32-bit only */
+	u16 _jmp2;		/* CS value, 32-bit only */
+	u32 pmode_cr0;		/* Protected mode cr0 */
+	u32 pmode_cr3;		/* Protected mode cr3 */
+	u32 pmode_cr4;		/* Protected mode cr4 */
+	u32 pmode_efer_low;	/* Protected mode EFER */
+	u32 pmode_efer_high;
+	u64 pmode_gdt;
+	u32 realmode_flags;
+	u32 real_magic;
+	u16 trampoline_segment;	/* segment with trampoline code, 64-bit only */
+	u32 signature;		/* To check we have correct structure */
+} __attribute__((__packed__));
+
+extern struct wakeup_header wakeup_header;
+#endif
+
+#define HEADER_OFFSET 0x3f00
+#define WAKEUP_SIZE   0x4000
+
+#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
new file mode 100644
index 0000000..22fab6c
--- /dev/null
+++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
@@ -0,0 +1,61 @@
+/*
+ * wakeup.ld
+ *
+ * Linker script for the real-mode wakeup code
+ */
+#undef i386
+#include "wakeup.h"
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+	. = HEADER_OFFSET;
+	.header : {
+		 *(.header)
+	}
+
+	. = 0;
+	.text : {
+		 *(.text*)
+	}
+
+	. = ALIGN(16);
+	.rodata : {
+		*(.rodata*)
+	}
+
+	.videocards : {
+		video_cards = .;
+		*(.videocards)
+		video_cards_end = .;
+	}
+
+	. = ALIGN(16);
+	.data : {
+		 *(.data*)
+	}
+
+	.signature : {
+		end_signature = .;
+		LONG(0x65a22c82)
+	}
+
+	. = ALIGN(16);
+	.bss :	{
+		__bss_start = .;
+		*(.bss)
+		__bss_end = .;
+	}
+
+	. = ALIGN(16);
+	_end = .;
+
+	/DISCARD/ : {
+		*(.note*)
+	}
+
+	. = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
+}
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 6bc815c..afc25ee 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -10,30 +10,72 @@
 #include <linux/dmi.h>
 #include <linux/cpumask.h>
 
-#include <asm/smp.h>
+#include "realmode/wakeup.h"
+#include "sleep.h"
+
+unsigned long acpi_wakeup_address;
+unsigned long acpi_realmode_flags;
 
 /* address in low memory of the wakeup routine. */
-unsigned long acpi_wakeup_address = 0;
-unsigned long acpi_realmode_flags;
-extern char wakeup_start, wakeup_end;
+static unsigned long acpi_realmode;
 
-extern unsigned long acpi_copy_wakeup_routine(unsigned long);
+#ifdef CONFIG_64BIT
+static char temp_stack[10240];
+#endif
 
 /**
  * acpi_save_state_mem - save kernel state
  *
  * Create an identity mapped page table and copy the wakeup routine to
  * low memory.
+ *
+ * Note that this is too late to change acpi_wakeup_address.
  */
 int acpi_save_state_mem(void)
 {
-	if (!acpi_wakeup_address) {
-		printk(KERN_ERR "Could not allocate memory during boot, S3 disabled\n");
+	struct wakeup_header *header;
+
+	if (!acpi_realmode) {
+		printk(KERN_ERR "Could not allocate memory during boot, "
+		       "S3 disabled\n");
 		return -ENOMEM;
 	}
-	memcpy((void *)acpi_wakeup_address, &wakeup_start,
-	       &wakeup_end - &wakeup_start);
-	acpi_copy_wakeup_routine(acpi_wakeup_address);
+	memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE);
+
+	header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET);
+	if (header->signature != 0x51ee1111) {
+		printk(KERN_ERR "wakeup header does not match\n");
+		return -EINVAL;
+	}
+
+	header->video_mode = saved_video_mode;
+
+#ifndef CONFIG_64BIT
+	store_gdt((struct desc_ptr *)&header->pmode_gdt);
+
+	header->pmode_efer_low = nx_enabled;
+	if (header->pmode_efer_low & 1) {
+		/* This is strange, why not save efer, always? */
+		rdmsr(MSR_EFER, header->pmode_efer_low,
+			header->pmode_efer_high);
+	}
+#endif /* !CONFIG_64BIT */
+
+	header->pmode_cr0 = read_cr0();
+	header->pmode_cr4 = read_cr4();
+	header->realmode_flags = acpi_realmode_flags;
+	header->real_magic = 0x12345678;
+
+#ifndef CONFIG_64BIT
+	header->pmode_entry = (u32)&wakeup_pmode_return;
+	header->pmode_cr3 = (u32)(swsusp_pg_dir - __PAGE_OFFSET);
+	saved_magic = 0x12345678;
+#else /* CONFIG_64BIT */
+	header->trampoline_segment = setup_trampoline() >> 4;
+	init_rsp = (unsigned long)temp_stack + 4096;
+	initial_code = (unsigned long)wakeup_long64;
+	saved_magic = 0x123456789abcdef0;
+#endif /* CONFIG_64BIT */
 
 	return 0;
 }
@@ -56,15 +98,20 @@
  */
 void __init acpi_reserve_bootmem(void)
 {
-	if ((&wakeup_end - &wakeup_start) > PAGE_SIZE*2) {
+	if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) {
 		printk(KERN_ERR
 		       "ACPI: Wakeup code way too big, S3 disabled.\n");
 		return;
 	}
 
-	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2);
-	if (!acpi_wakeup_address)
+	acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
+
+	if (!acpi_realmode) {
 		printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
+		return;
+	}
+
+	acpi_wakeup_address = acpi_realmode;
 }
 
 
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
new file mode 100644
index 0000000..adbcbaa
--- /dev/null
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -0,0 +1,16 @@
+/*
+ *	Variables and functions used by the code in sleep.c
+ */
+
+#include <asm/trampoline.h>
+
+extern char wakeup_code_start, wakeup_code_end;
+
+extern unsigned long saved_video_mode;
+extern long saved_magic;
+
+extern int wakeup_pmode_return;
+extern char swsusp_pg_dir[PAGE_SIZE];
+
+extern unsigned long acpi_copy_wakeup_routine(unsigned long);
+extern void wakeup_long64(void);
diff --git a/arch/x86/kernel/acpi/sleep_32.c b/arch/x86/kernel/acpi/sleep_32.c
deleted file mode 100644
index 63fe552..0000000
--- a/arch/x86/kernel/acpi/sleep_32.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * sleep.c - x86-specific ACPI sleep support.
- *
- *  Copyright (C) 2001-2003 Patrick Mochel
- *  Copyright (C) 2001-2003 Pavel Machek <pavel@suse.cz>
- */
-
-#include <linux/acpi.h>
-#include <linux/bootmem.h>
-#include <linux/dmi.h>
-#include <linux/cpumask.h>
-
-#include <asm/smp.h>
-
-/* Ouch, we want to delete this. We already have better version in userspace, in
-   s2ram from suspend.sf.net project */
-static __init int reset_videomode_after_s3(const struct dmi_system_id *d)
-{
-	acpi_realmode_flags |= 2;
-	return 0;
-}
-
-static __initdata struct dmi_system_id acpisleep_dmi_table[] = {
-	{			/* Reset video mode after returning from ACPI S3 sleep */
-	 .callback = reset_videomode_after_s3,
-	 .ident = "Toshiba Satellite 4030cdt",
-	 .matches = {
-		     DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
-		     },
-	 },
-	{}
-};
-
-static int __init acpisleep_dmi_init(void)
-{
-	dmi_check_system(acpisleep_dmi_table);
-	return 0;
-}
-
-core_initcall(acpisleep_dmi_init);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index f53e327..a12e6a9 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -3,178 +3,12 @@
 #include <asm/segment.h>
 #include <asm/page.h>
 
-#
-# wakeup_code runs in real mode, and at unknown address (determined at run-time).
-# Therefore it must only use relative jumps/calls. 
-#
-# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
-#
-# If physical address of wakeup_code is 0x12345, BIOS should call us with
-# cs = 0x1234, eip = 0x05
-#
-
-#define BEEP \
-	inb	$97, %al; 	\
-	outb	%al, $0x80; 	\
-	movb	$3, %al; 	\
-	outb	%al, $97; 	\
-	outb	%al, $0x80; 	\
-	movb	$-74, %al; 	\
-	outb	%al, $67; 	\
-	outb	%al, $0x80; 	\
-	movb	$-119, %al; 	\
-	outb	%al, $66; 	\
-	outb	%al, $0x80; 	\
-	movb	$15, %al; 	\
-	outb	%al, $66;
-
-ALIGN
-	.align	4096
-ENTRY(wakeup_start)
-wakeup_code:
-	wakeup_code_start = .
-	.code16
-
-	cli
-	cld
-
-	# setup data segment
-	movw	%cs, %ax
-	movw	%ax, %ds					# Make ds:0 point to wakeup_start
-	movw	%ax, %ss
-
-	testl   $4, realmode_flags - wakeup_code
-	jz      1f
-	BEEP
-1:
-	mov	$(wakeup_stack - wakeup_code), %sp		# Private stack is needed for ASUS board
-
-	pushl	$0						# Kill any dangerous flags
-	popfl
-
-	movl	real_magic - wakeup_code, %eax
-	cmpl	$0x12345678, %eax
-	jne	bogus_real_magic
-
-	testl	$1, realmode_flags - wakeup_code
-	jz	1f
-	lcall   $0xc000,$3
-	movw	%cs, %ax
-	movw	%ax, %ds					# Bios might have played with that
-	movw	%ax, %ss
-1:
-
-	testl	$2, realmode_flags - wakeup_code
-	jz	1f
-	mov	video_mode - wakeup_code, %ax
-	call	mode_set
-1:
-
-	# set up page table
-	movl	$swsusp_pg_dir-__PAGE_OFFSET, %eax
-	movl	%eax, %cr3
-
-	testl	$1, real_efer_save_restore - wakeup_code
-	jz	4f
-	# restore efer setting
-	movl	real_save_efer_edx - wakeup_code, %edx
-	movl	real_save_efer_eax - wakeup_code, %eax
-	mov     $0xc0000080, %ecx
-	wrmsr
-4:
-	# make sure %cr4 is set correctly (features, etc)
-	movl	real_save_cr4 - wakeup_code, %eax
-	movl	%eax, %cr4
-	
-	# need a gdt -- use lgdtl to force 32-bit operands, in case
-	# the GDT is located past 16 megabytes.
-	lgdtl	real_save_gdt - wakeup_code
-
-	movl	real_save_cr0 - wakeup_code, %eax
-	movl	%eax, %cr0
-	jmp 1f
-1:
-	movl	real_magic - wakeup_code, %eax
-	cmpl	$0x12345678, %eax
-	jne	bogus_real_magic
-
-	testl   $8, realmode_flags - wakeup_code
-	jz      1f
-	BEEP
-1:
-	ljmpl	$__KERNEL_CS, $wakeup_pmode_return
-
-real_save_gdt:	.word 0
-		.long 0
-real_save_cr0:	.long 0
-real_save_cr3:	.long 0
-real_save_cr4:	.long 0
-real_magic:	.long 0
-video_mode:	.long 0
-realmode_flags:	.long 0
-real_efer_save_restore:	.long 0
-real_save_efer_edx: 	.long 0
-real_save_efer_eax: 	.long 0
-
-bogus_real_magic:
-	jmp bogus_real_magic
-
-/* This code uses an extended set of video mode numbers. These include:
- * Aliases for standard modes
- *	NORMAL_VGA (-1)
- *	EXTENDED_VGA (-2)
- *	ASK_VGA (-3)
- * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
- * of compatibility when extending the table. These are between 0x00 and 0xff.
- */
-#define VIDEO_FIRST_MENU 0x0000
-
-/* Standard BIOS video modes (BIOS number + 0x0100) */
-#define VIDEO_FIRST_BIOS 0x0100
-
-/* VESA BIOS video modes (VESA number + 0x0200) */
-#define VIDEO_FIRST_VESA 0x0200
-
-/* Video7 special modes (BIOS number + 0x0900) */
-#define VIDEO_FIRST_V7 0x0900
-
-# Setting of user mode (AX=mode ID) => CF=success
-
-# For now, we only handle VESA modes (0x0200..0x03ff).  To handle other
-# modes, we should probably compile in the video code from the boot
-# directory.
-mode_set:
-	movw	%ax, %bx
-	subb	$VIDEO_FIRST_VESA>>8, %bh
-	cmpb	$2, %bh
-	jb	check_vesa
-
-setbad:
-	clc
-	ret
-
-check_vesa:
-	orw	$0x4000, %bx			# Use linear frame buffer
-	movw	$0x4f02, %ax			# VESA BIOS mode set call
-	int	$0x10
-	cmpw	$0x004f, %ax			# AL=4f if implemented
-	jnz	setbad				# AH=0 if OK
-
-	stc
-	ret
+# Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
 
 	.code32
 	ALIGN
 
-.org	0x800
-wakeup_stack_begin:	# Stack grows down
-
-.org	0xff0		# Just below end of page
-wakeup_stack:
-ENTRY(wakeup_end)
-	
-.org	0x1000
-
+ENTRY(wakeup_pmode_return)
 wakeup_pmode_return:
 	movw	$__KERNEL_DS, %ax
 	movw	%ax, %ss
@@ -187,7 +21,7 @@
 	lgdt	saved_gdt
 	lidt	saved_idt
 	lldt	saved_ldt
-	ljmp	$(__KERNEL_CS),$1f
+	ljmp	$(__KERNEL_CS), $1f
 1:
 	movl	%cr3, %eax
 	movl	%eax, %cr3
@@ -201,82 +35,41 @@
 	jne	bogus_magic
 
 	# jump to place where we left off
-	movl	saved_eip,%eax
+	movl	saved_eip, %eax
 	jmp	*%eax
 
 bogus_magic:
 	jmp	bogus_magic
 
 
-##
-# acpi_copy_wakeup_routine
-#
-# Copy the above routine to low memory.
-#
-# Parameters:
-# %eax:	place to copy wakeup routine to
-#
-# Returned address is location of code in low memory (past data and stack)
-#
-ENTRY(acpi_copy_wakeup_routine)
 
-	pushl	%ebx
+save_registers:
 	sgdt	saved_gdt
 	sidt	saved_idt
 	sldt	saved_ldt
 	str	saved_tss
 
-	movl	nx_enabled, %edx
-	movl	%edx, real_efer_save_restore - wakeup_start (%eax)
-	testl	$1, real_efer_save_restore - wakeup_start (%eax)
-	jz	2f
-	# save efer setting
-	pushl	%eax
-	movl	%eax, %ebx
-	mov     $0xc0000080, %ecx
-	rdmsr
-	movl	%edx, real_save_efer_edx - wakeup_start (%ebx)
-	movl	%eax, real_save_efer_eax - wakeup_start (%ebx)
-	popl	%eax
-2:
-
-	movl    %cr3, %edx
-	movl    %edx, real_save_cr3 - wakeup_start (%eax)
-	movl    %cr4, %edx
-	movl    %edx, real_save_cr4 - wakeup_start (%eax)
-	movl	%cr0, %edx
-	movl	%edx, real_save_cr0 - wakeup_start (%eax)
-	sgdt    real_save_gdt - wakeup_start (%eax)
-
-	movl	saved_videomode, %edx
-	movl	%edx, video_mode - wakeup_start (%eax)
-	movl	acpi_realmode_flags, %edx
-	movl	%edx, realmode_flags - wakeup_start (%eax)
-	movl	$0x12345678, real_magic - wakeup_start (%eax)
-	movl	$0x12345678, saved_magic
-	popl	%ebx
-	ret
-
-save_registers:
 	leal	4(%esp), %eax
 	movl	%eax, saved_context_esp
-	movl %ebx, saved_context_ebx
-	movl %ebp, saved_context_ebp
-	movl %esi, saved_context_esi
-	movl %edi, saved_context_edi
-	pushfl ; popl saved_context_eflags
+	movl	%ebx, saved_context_ebx
+	movl	%ebp, saved_context_ebp
+	movl	%esi, saved_context_esi
+	movl	%edi, saved_context_edi
+	pushfl
+	popl	saved_context_eflags
 
-	movl $ret_point, saved_eip
+	movl	$ret_point, saved_eip
 	ret
 
 
 restore_registers:
-	movl saved_context_ebp, %ebp
-	movl saved_context_ebx, %ebx
-	movl saved_context_esi, %esi
-	movl saved_context_edi, %edi
-	pushl saved_context_eflags ; popfl
-	ret	
+	movl	saved_context_ebp, %ebp
+	movl	saved_context_ebx, %ebx
+	movl	saved_context_esi, %esi
+	movl	saved_context_edi, %edi
+	pushl	saved_context_eflags
+	popfl
+	ret
 
 ENTRY(do_suspend_lowlevel)
 	call	save_processor_state
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 2e1b9e0..bcc2934 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -7,191 +7,18 @@
 #include <asm/asm-offsets.h>
 
 # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
-#
-# wakeup_code runs in real mode, and at unknown address (determined at run-time).
-# Therefore it must only use relative jumps/calls. 
-#
-# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
-#
-# If physical address of wakeup_code is 0x12345, BIOS should call us with
-# cs = 0x1234, eip = 0x05
-#
-
-#define BEEP \
-	inb	$97, %al; 	\
-	outb	%al, $0x80; 	\
-	movb	$3, %al; 	\
-	outb	%al, $97; 	\
-	outb	%al, $0x80; 	\
-	movb	$-74, %al; 	\
-	outb	%al, $67; 	\
-	outb	%al, $0x80; 	\
-	movb	$-119, %al; 	\
-	outb	%al, $66; 	\
-	outb	%al, $0x80; 	\
-	movb	$15, %al; 	\
-	outb	%al, $66;
-
-
-ALIGN
-	.align	16
-ENTRY(wakeup_start)
-wakeup_code:
-	wakeup_code_start = .
-	.code16
-
-# Running in *copy* of this code, somewhere in low 1MB.
-
-	cli
-	cld
-	# setup data segment
-	movw	%cs, %ax
-	movw	%ax, %ds		# Make ds:0 point to wakeup_start
-	movw	%ax, %ss
-
-	# Data segment must be set up before we can see whether to beep.
-	testl   $4, realmode_flags - wakeup_code
-	jz      1f
-	BEEP
-1:
-
-					# Private stack is needed for ASUS board
-	mov	$(wakeup_stack - wakeup_code), %sp
-
-	pushl	$0			# Kill any dangerous flags
-	popfl
-
-	movl	real_magic - wakeup_code, %eax
-	cmpl	$0x12345678, %eax
-	jne	bogus_real_magic
-
-	testl	$1, realmode_flags - wakeup_code
-	jz	1f
-	lcall   $0xc000,$3
-	movw	%cs, %ax
-	movw	%ax, %ds		# Bios might have played with that
-	movw	%ax, %ss
-1:
-
-	testl	$2, realmode_flags - wakeup_code
-	jz	1f
-	mov	video_mode - wakeup_code, %ax
-	call	mode_set
-1:
-
-	mov	%ds, %ax			# Find 32bit wakeup_code addr
-	movzx   %ax, %esi			# (Convert %ds:gdt to a liner ptr)
-	shll    $4, %esi
-						# Fix up the vectors
-	addl    %esi, wakeup_32_vector - wakeup_code
-	addl    %esi, wakeup_long64_vector - wakeup_code
-	addl    %esi, gdt_48a + 2 - wakeup_code # Fixup the gdt pointer
-
-	lidtl	%ds:idt_48a - wakeup_code
-	lgdtl	%ds:gdt_48a - wakeup_code	# load gdt with whatever is
-						# appropriate
-
-	movl	$1, %eax			# protected mode (PE) bit
-	lmsw	%ax				# This is it!
-	jmp	1f
-1:
-
-	ljmpl   *(wakeup_32_vector - wakeup_code)
-
-	.balign 4
-wakeup_32_vector:
-	.long   wakeup_32 - wakeup_code
-	.word   __KERNEL32_CS, 0
-
-	.code32
-wakeup_32:
-# Running in this code, but at low address; paging is not yet turned on.
-
-	movl	$__KERNEL_DS, %eax
-	movl	%eax, %ds
-
-	/*
-	 * Prepare for entering 64bits mode
-	 */
-
-	/* Enable PAE */
-	xorl	%eax, %eax
-	btsl	$5, %eax
-	movl	%eax, %cr4
-
-	/* Setup early boot stage 4 level pagetables */
-	leal    (wakeup_level4_pgt - wakeup_code)(%esi), %eax
-	movl	%eax, %cr3
-
-        /* Check if nx is implemented */
-        movl    $0x80000001, %eax
-        cpuid
-        movl    %edx,%edi
-
-	/* Enable Long Mode */
-	xorl    %eax, %eax
-	btsl	$_EFER_LME, %eax
-
-	/* No Execute supported? */
-	btl	$20,%edi
-	jnc     1f
-	btsl	$_EFER_NX, %eax
-				
-	/* Make changes effective */
-1:	movl    $MSR_EFER, %ecx
-	xorl    %edx, %edx
-	wrmsr
-
-	xorl	%eax, %eax
-	btsl	$31, %eax			/* Enable paging and in turn activate Long Mode */
-	btsl	$0, %eax			/* Enable protected mode */
-
-	/* Make changes effective */
-	movl	%eax, %cr0
-
-	/* At this point:
-		CR4.PAE must be 1
-		CS.L must be 0
-		CR3 must point to PML4
-		Next instruction must be a branch
-		This must be on identity-mapped page
-	*/
-	/*
-	 * At this point we're in long mode but in 32bit compatibility mode
-	 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
-	 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
-	 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
-	 */
-
-	/* Finally jump in 64bit mode */
-        ljmp    *(wakeup_long64_vector - wakeup_code)(%esi)
-
-	.balign 4
-wakeup_long64_vector:
-	.long   wakeup_long64 - wakeup_code
-	.word   __KERNEL_CS, 0
 
 .code64
-
-	/* Hooray, we are in Long 64-bit mode (but still running in
-	 * low memory)
-	 */
-wakeup_long64:
 	/*
-	 * We must switch to a new descriptor in kernel space for the GDT
-	 * because soon the kernel won't have access anymore to the userspace
-	 * addresses where we're currently running on. We have to do that here
-	 * because in 32bit we couldn't load a 64bit linear address.
+	 * Hooray, we are in Long 64-bit mode (but still running in low memory)
 	 */
-	lgdt	cpu_gdt_descr
+ENTRY(wakeup_long64)
+wakeup_long64:
+	movq	saved_magic, %rax
+	movq	$0x123456789abcdef0, %rdx
+	cmpq	%rdx, %rax
+	jne	bogus_64_magic
 
-	movq    saved_magic, %rax
-	movq    $0x123456789abcdef0, %rdx
-	cmpq    %rdx, %rax
-	jne     bogus_64_magic
-
-	nop
-	nop
 	movw	$__KERNEL_DS, %ax
 	movw	%ax, %ss	
 	movw	%ax, %ds
@@ -208,130 +35,8 @@
 	movq	saved_rip, %rax
 	jmp	*%rax
 
-.code32
-
-	.align	64	
-gdta:
-	/* Its good to keep gdt in sync with one in trampoline.S */
-	.word	0, 0, 0, 0			# dummy
-	/* ??? Why I need the accessed bit set in order for this to work? */
-	.quad   0x00cf9b000000ffff              # __KERNEL32_CS
-	.quad   0x00af9b000000ffff              # __KERNEL_CS
-	.quad   0x00cf93000000ffff              # __KERNEL_DS
-
-idt_48a:
-	.word	0				# idt limit = 0
-	.word	0, 0				# idt base = 0L
-
-gdt_48a:
-	.word	0x800				# gdt limit=2048,
-						#  256 GDT entries
-	.long   gdta - wakeup_code              # gdt base (relocated in later)
-	
-real_magic:	.quad 0
-video_mode:	.quad 0
-realmode_flags:	.quad 0
-
-.code16
-bogus_real_magic:
-	jmp bogus_real_magic
-
-.code64
 bogus_64_magic:
-	jmp bogus_64_magic
-
-/* This code uses an extended set of video mode numbers. These include:
- * Aliases for standard modes
- *	NORMAL_VGA (-1)
- *	EXTENDED_VGA (-2)
- *	ASK_VGA (-3)
- * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
- * of compatibility when extending the table. These are between 0x00 and 0xff.
- */
-#define VIDEO_FIRST_MENU 0x0000
-
-/* Standard BIOS video modes (BIOS number + 0x0100) */
-#define VIDEO_FIRST_BIOS 0x0100
-
-/* VESA BIOS video modes (VESA number + 0x0200) */
-#define VIDEO_FIRST_VESA 0x0200
-
-/* Video7 special modes (BIOS number + 0x0900) */
-#define VIDEO_FIRST_V7 0x0900
-
-# Setting of user mode (AX=mode ID) => CF=success
-
-# For now, we only handle VESA modes (0x0200..0x03ff).  To handle other
-# modes, we should probably compile in the video code from the boot
-# directory.
-.code16
-mode_set:
-	movw	%ax, %bx
-	subb	$VIDEO_FIRST_VESA>>8, %bh
-	cmpb	$2, %bh
-	jb	check_vesa
-
-setbad:
-	clc
-	ret
-
-check_vesa:
-	orw	$0x4000, %bx			# Use linear frame buffer
-	movw	$0x4f02, %ax			# VESA BIOS mode set call
-	int	$0x10
-	cmpw	$0x004f, %ax			# AL=4f if implemented
-	jnz	setbad				# AH=0 if OK
-
-	stc
-	ret
-
-wakeup_stack_begin:	# Stack grows down
-
-.org	0xff0
-wakeup_stack:		# Just below end of page
-
-.org   0x1000
-ENTRY(wakeup_level4_pgt)
-	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-	.fill   510,8,0
-	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
-	.quad   level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
-
-ENTRY(wakeup_end)
-	
-##
-# acpi_copy_wakeup_routine
-#
-# Copy the above routine to low memory.
-#
-# Parameters:
-# %rdi:	place to copy wakeup routine to
-#
-# Returned address is location of code in low memory (past data and stack)
-#
-	.code64
-ENTRY(acpi_copy_wakeup_routine)
-	pushq	%rax
-	pushq	%rdx
-
-	movl	saved_video_mode, %edx
-	movl	%edx, video_mode - wakeup_start (,%rdi)
-	movl	acpi_realmode_flags, %edx
-	movl	%edx, realmode_flags - wakeup_start (,%rdi)
-	movq	$0x12345678, real_magic - wakeup_start (,%rdi)
-	movq	$0x123456789abcdef0, %rdx
-	movq	%rdx, saved_magic
-
-	movq    saved_magic, %rax
-	movq    $0x123456789abcdef0, %rdx
-	cmpq    %rdx, %rax
-	jne     bogus_64_magic
-
-	# restore the regs we used
-	popq	%rdx
-	popq	%rax
-ENTRY(do_suspend_lowlevel_s4bios)
-	ret
+	jmp	bogus_64_magic
 
 	.align 2
 	.p2align 4,,15
@@ -414,7 +119,7 @@
 	jmp	restore_processor_state
 .LFE5:
 .Lfe5:
-	.size	do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel
+	.size	do_suspend_lowlevel, .Lfe5-do_suspend_lowlevel
 	
 .data
 ALIGN
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S
new file mode 100644
index 0000000..6ff3b57
--- /dev/null
+++ b/arch/x86/kernel/acpi/wakeup_rm.S
@@ -0,0 +1,10 @@
+/*
+ * Wrapper script for the realmode binary as a transport object
+ * before copying to low memory.
+ */
+	.section ".rodata","a"
+	.globl	wakeup_code_start, wakeup_code_end
+wakeup_code_start:
+	.incbin	"arch/x86/kernel/acpi/realmode/wakeup.bin"
+wakeup_code_end:
+	.size	wakeup_code_start, .-wakeup_code_start
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 5fed98c..df4099d 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -11,6 +11,8 @@
 #include <asm/mce.h>
 #include <asm/nmi.h>
 #include <asm/vsyscall.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
 
 #define MAX_PATCH_LEN (255-1)
 
@@ -177,7 +179,7 @@
 #endif /* CONFIG_X86_64 */
 
 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
-static void add_nops(void *insns, unsigned int len)
+void add_nops(void *insns, unsigned int len)
 {
 	const unsigned char *const *noptable = find_nop_table();
 
@@ -190,6 +192,7 @@
 		len -= noplen;
 	}
 }
+EXPORT_SYMBOL_GPL(add_nops);
 
 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 extern u8 *__smp_locks[], *__smp_locks_end[];
@@ -205,7 +208,7 @@
 	struct alt_instr *a;
 	char insnbuf[MAX_PATCH_LEN];
 
-	DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
+	DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
 	for (a = start; a < end; a++) {
 		u8 *instr = a->instr;
 		BUG_ON(a->replacementlen > a->instrlen);
@@ -217,13 +220,13 @@
 		if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
 			instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
 			DPRINTK("%s: vsyscall fixup: %p => %p\n",
-				__FUNCTION__, a->instr, instr);
+				__func__, a->instr, instr);
 		}
 #endif
 		memcpy(insnbuf, a->replacement, a->replacementlen);
 		add_nops(insnbuf + a->replacementlen,
 			 a->instrlen - a->replacementlen);
-		text_poke(instr, insnbuf, a->instrlen);
+		text_poke_early(instr, insnbuf, a->instrlen);
 	}
 }
 
@@ -284,7 +287,6 @@
 				 void *text,  void *text_end)
 {
 	struct smp_alt_module *smp;
-	unsigned long flags;
 
 	if (noreplace_smp)
 		return;
@@ -307,42 +309,40 @@
 	smp->text	= text;
 	smp->text_end	= text_end;
 	DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
-		__FUNCTION__, smp->locks, smp->locks_end,
+		__func__, smp->locks, smp->locks_end,
 		smp->text, smp->text_end, smp->name);
 
-	spin_lock_irqsave(&smp_alt, flags);
+	spin_lock(&smp_alt);
 	list_add_tail(&smp->next, &smp_alt_modules);
 	if (boot_cpu_has(X86_FEATURE_UP))
 		alternatives_smp_unlock(smp->locks, smp->locks_end,
 					smp->text, smp->text_end);
-	spin_unlock_irqrestore(&smp_alt, flags);
+	spin_unlock(&smp_alt);
 }
 
 void alternatives_smp_module_del(struct module *mod)
 {
 	struct smp_alt_module *item;
-	unsigned long flags;
 
 	if (smp_alt_once || noreplace_smp)
 		return;
 
-	spin_lock_irqsave(&smp_alt, flags);
+	spin_lock(&smp_alt);
 	list_for_each_entry(item, &smp_alt_modules, next) {
 		if (mod != item->mod)
 			continue;
 		list_del(&item->next);
-		spin_unlock_irqrestore(&smp_alt, flags);
-		DPRINTK("%s: %s\n", __FUNCTION__, item->name);
+		spin_unlock(&smp_alt);
+		DPRINTK("%s: %s\n", __func__, item->name);
 		kfree(item);
 		return;
 	}
-	spin_unlock_irqrestore(&smp_alt, flags);
+	spin_unlock(&smp_alt);
 }
 
 void alternatives_smp_switch(int smp)
 {
 	struct smp_alt_module *mod;
-	unsigned long flags;
 
 #ifdef CONFIG_LOCKDEP
 	/*
@@ -359,7 +359,7 @@
 		return;
 	BUG_ON(!smp && (num_online_cpus() > 1));
 
-	spin_lock_irqsave(&smp_alt, flags);
+	spin_lock(&smp_alt);
 
 	/*
 	 * Avoid unnecessary switches because it forces JIT based VMs to
@@ -383,7 +383,7 @@
 						mod->text, mod->text_end);
 	}
 	smp_mode = smp;
-	spin_unlock_irqrestore(&smp_alt, flags);
+	spin_unlock(&smp_alt);
 }
 
 #endif
@@ -411,7 +411,7 @@
 
 		/* Pad the rest with nops */
 		add_nops(insnbuf + used, p->len - used);
-		text_poke(p->instr, insnbuf, p->len);
+		text_poke_early(p->instr, insnbuf, p->len);
 	}
 }
 extern struct paravirt_patch_site __start_parainstructions[],
@@ -420,8 +420,6 @@
 
 void __init alternative_instructions(void)
 {
-	unsigned long flags;
-
 	/* The patching is not fully atomic, so try to avoid local interruptions
 	   that might execute the to be patched code.
 	   Other CPUs are not running. */
@@ -430,7 +428,6 @@
 	stop_mce();
 #endif
 
-	local_irq_save(flags);
 	apply_alternatives(__alt_instructions, __alt_instructions_end);
 
 	/* switch to patch-once-at-boottime-only mode and free the
@@ -462,7 +459,6 @@
 	}
 #endif
  	apply_paravirt(__parainstructions, __parainstructions_end);
-	local_irq_restore(flags);
 
 	if (smp_alt_once)
 		free_init_pages("SMP alternatives",
@@ -475,18 +471,71 @@
 #endif
 }
 
-/*
- * Warning:
+/**
+ * text_poke_early - Update instructions on a live kernel at boot time
+ * @addr: address to modify
+ * @opcode: source of the copy
+ * @len: length to copy
+ *
  * When you use this code to patch more than one byte of an instruction
  * you need to make sure that other CPUs cannot execute this code in parallel.
- * Also no thread must be currently preempted in the middle of these instructions.
- * And on the local CPU you need to be protected again NMI or MCE handlers
- * seeing an inconsistent instruction while you patch.
+ * Also no thread must be currently preempted in the middle of these
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
  */
-void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
+void *text_poke_early(void *addr, const void *opcode, size_t len)
 {
+	unsigned long flags;
+	local_irq_save(flags);
 	memcpy(addr, opcode, len);
+	local_irq_restore(flags);
 	sync_core();
 	/* Could also do a CLFLUSH here to speed up CPU recovery; but
 	   that causes hangs on some VIA CPUs. */
+	return addr;
+}
+
+/**
+ * text_poke - Update instructions on a live kernel
+ * @addr: address to modify
+ * @opcode: source of the copy
+ * @len: length to copy
+ *
+ * Only atomic text poke/set should be allowed when not doing early patching.
+ * It means the size must be writable atomically and the address must be aligned
+ * in a way that permits an atomic write. It also makes sure we fit on a single
+ * page.
+ */
+void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+{
+	unsigned long flags;
+	char *vaddr;
+	int nr_pages = 2;
+
+	BUG_ON(len > sizeof(long));
+	BUG_ON((((long)addr + len - 1) & ~(sizeof(long) - 1))
+		- ((long)addr & ~(sizeof(long) - 1)));
+	if (kernel_text_address((unsigned long)addr)) {
+		struct page *pages[2] = { virt_to_page(addr),
+			virt_to_page(addr + PAGE_SIZE) };
+		if (!pages[1])
+			nr_pages = 1;
+		vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
+		BUG_ON(!vaddr);
+		local_irq_save(flags);
+		memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+		local_irq_restore(flags);
+		vunmap(vaddr);
+	} else {
+		/*
+		 * modules are in vmalloc'ed memory, always writable.
+		 */
+		local_irq_save(flags);
+		memcpy(addr, opcode, len);
+		local_irq_restore(flags);
+	}
+	sync_core();
+	/* Could also do a CLFLUSH here to speed up CPU recovery; but
+	   that causes hangs on some VIA CPUs. */
+	return addr;
 }
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 00df126..479926d 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -27,11 +27,11 @@
 #include <asm/k8.h>
 
 int gart_iommu_aperture;
-int gart_iommu_aperture_disabled __initdata = 0;
-int gart_iommu_aperture_allowed __initdata = 0;
+int gart_iommu_aperture_disabled __initdata;
+int gart_iommu_aperture_allowed __initdata;
 
 int fallback_aper_order __initdata = 1; /* 64MB */
-int fallback_aper_force __initdata = 0;
+int fallback_aper_force __initdata;
 
 int fix_aperture __initdata = 1;
 
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 35a568e..6872081 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -50,6 +50,11 @@
 # error SPURIOUS_APIC_VECTOR definition error
 #endif
 
+unsigned long mp_lapic_addr;
+
+DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
+EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
+
 /*
  * Knob to control our willingness to enable the local APIC.
  *
@@ -621,6 +626,35 @@
 }
 
 /*
+ * Setup extended LVT, AMD specific (K8, family 10h)
+ *
+ * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
+ * MCE interrupts are supported. Thus MCE offset must be set to 0.
+ */
+
+#define APIC_EILVT_LVTOFF_MCE 0
+#define APIC_EILVT_LVTOFF_IBS 1
+
+static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
+{
+	unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
+	unsigned int  v   = (mask << 16) | (msg_type << 8) | vector;
+	apic_write(reg, v);
+}
+
+u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
+{
+	setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
+	return APIC_EILVT_LVTOFF_MCE;
+}
+
+u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
+{
+	setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
+	return APIC_EILVT_LVTOFF_IBS;
+}
+
+/*
  * Local APIC start and shutdown
  */
 
@@ -868,12 +902,50 @@
 	apic_write_around(APIC_LVT1, value);
 }
 
+void __cpuinit lapic_setup_esr(void)
+{
+	unsigned long oldvalue, value, maxlvt;
+	if (lapic_is_integrated() && !esr_disable) {
+		/* !82489DX */
+		maxlvt = lapic_get_maxlvt();
+		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
+			apic_write(APIC_ESR, 0);
+		oldvalue = apic_read(APIC_ESR);
+
+		/* enables sending errors */
+		value = ERROR_APIC_VECTOR;
+		apic_write_around(APIC_LVTERR, value);
+		/*
+		 * spec says clear errors after enabling vector.
+		 */
+		if (maxlvt > 3)
+			apic_write(APIC_ESR, 0);
+		value = apic_read(APIC_ESR);
+		if (value != oldvalue)
+			apic_printk(APIC_VERBOSE, "ESR value before enabling "
+				"vector: 0x%08lx  after: 0x%08lx\n",
+				oldvalue, value);
+	} else {
+		if (esr_disable)
+			/*
+			 * Something untraceable is creating bad interrupts on
+			 * secondary quads ... for the moment, just leave the
+			 * ESR disabled - we can't do anything useful with the
+			 * errors anyway - mbligh
+			 */
+			printk(KERN_INFO "Leaving ESR disabled.\n");
+		else
+			printk(KERN_INFO "No ESR for 82489DX.\n");
+	}
+}
+
+
 /**
  * setup_local_APIC - setup the local APIC
  */
 void __cpuinit setup_local_APIC(void)
 {
-	unsigned long oldvalue, value, maxlvt, integrated;
+	unsigned long value, integrated;
 	int i, j;
 
 	/* Pound the ESR really hard over the head with a big hammer - mbligh */
@@ -997,40 +1069,13 @@
 	if (!integrated)		/* 82489DX */
 		value |= APIC_LVT_LEVEL_TRIGGER;
 	apic_write_around(APIC_LVT1, value);
+}
 
-	if (integrated && !esr_disable) {
-		/* !82489DX */
-		maxlvt = lapic_get_maxlvt();
-		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
-			apic_write(APIC_ESR, 0);
-		oldvalue = apic_read(APIC_ESR);
+void __cpuinit end_local_APIC_setup(void)
+{
+	unsigned long value;
 
-		/* enables sending errors */
-		value = ERROR_APIC_VECTOR;
-		apic_write_around(APIC_LVTERR, value);
-		/*
-		 * spec says clear errors after enabling vector.
-		 */
-		if (maxlvt > 3)
-			apic_write(APIC_ESR, 0);
-		value = apic_read(APIC_ESR);
-		if (value != oldvalue)
-			apic_printk(APIC_VERBOSE, "ESR value before enabling "
-				"vector: 0x%08lx  after: 0x%08lx\n",
-				oldvalue, value);
-	} else {
-		if (esr_disable)
-			/*
-			 * Something untraceable is creating bad interrupts on
-			 * secondary quads ... for the moment, just leave the
-			 * ESR disabled - we can't do anything useful with the
-			 * errors anyway - mbligh
-			 */
-			printk(KERN_INFO "Leaving ESR disabled.\n");
-		else
-			printk(KERN_INFO "No ESR for 82489DX.\n");
-	}
-
+	lapic_setup_esr();
 	/* Disable the local apic timer */
 	value = apic_read(APIC_LVTT);
 	value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
@@ -1147,7 +1192,7 @@
 	 * default configuration (or the MP table is broken).
 	 */
 	if (boot_cpu_physical_apicid == -1U)
-		boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+		boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
 
 #ifdef CONFIG_X86_IO_APIC
 	{
@@ -1185,6 +1230,9 @@
  * This initializes the IO-APIC and APIC hardware if this is
  * a UP kernel.
  */
+
+int apic_version[MAX_APICS];
+
 int __init APIC_init_uniprocessor(void)
 {
 	if (enable_local_apic < 0)
@@ -1214,12 +1262,13 @@
 	 * might be zero if read from MP tables. Get it from LAPIC.
 	 */
 #ifdef CONFIG_CRASH_DUMP
-	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+	boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
 #endif
 	phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
 
 	setup_local_APIC();
 
+	end_local_APIC_setup();
 #ifdef CONFIG_X86_IO_APIC
 	if (smp_found_config)
 		if (!skip_ioapic_setup && nr_ioapics)
@@ -1288,6 +1337,29 @@
 	irq_exit();
 }
 
+#ifdef CONFIG_SMP
+void __init smp_intr_init(void)
+{
+	/*
+	 * IRQ0 must be given a fixed assignment and initialized,
+	 * because it's used before the IO-APIC is set up.
+	 */
+	set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
+
+	/*
+	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
+	 * IPI, driven by wakeup.
+	 */
+	set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
+
+	/* IPI for invalidation */
+	set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+
+	/* IPI for generic function call */
+	set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+}
+#endif
+
 /*
  * Initialize APIC interrupts
  */
@@ -1394,6 +1466,88 @@
 	}
 }
 
+unsigned int __cpuinitdata maxcpus = NR_CPUS;
+
+void __cpuinit generic_processor_info(int apicid, int version)
+{
+	int cpu;
+	cpumask_t tmp_map;
+	physid_mask_t phys_cpu;
+
+	/*
+	 * Validate version
+	 */
+	if (version == 0x0) {
+		printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
+				"fixing up to 0x10. (tell your hw vendor)\n",
+				version);
+		version = 0x10;
+	}
+	apic_version[apicid] = version;
+
+	phys_cpu = apicid_to_cpu_present(apicid);
+	physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
+
+	if (num_processors >= NR_CPUS) {
+		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
+			"  Processor ignored.\n", NR_CPUS);
+		return;
+	}
+
+	if (num_processors >= maxcpus) {
+		printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
+			" Processor ignored.\n", maxcpus);
+		return;
+	}
+
+	num_processors++;
+	cpus_complement(tmp_map, cpu_present_map);
+	cpu = first_cpu(tmp_map);
+
+	if (apicid == boot_cpu_physical_apicid)
+		/*
+		 * x86_bios_cpu_apicid is required to have processors listed
+		 * in same order as logical cpu numbers. Hence the first
+		 * entry is BSP, and so on.
+		 */
+		cpu = 0;
+
+	/*
+	 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
+	 * but we need to work other dependencies like SMP_SUSPEND etc
+	 * before this can be done without some confusion.
+	 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
+	 *       - Ashok Raj <ashok.raj@intel.com>
+	 */
+	if (num_processors > 8) {
+		switch (boot_cpu_data.x86_vendor) {
+		case X86_VENDOR_INTEL:
+			if (!APIC_XAPIC(version)) {
+				def_to_bigsmp = 0;
+				break;
+			}
+			/* If P4 and above fall through */
+		case X86_VENDOR_AMD:
+			def_to_bigsmp = 1;
+		}
+	}
+#ifdef CONFIG_SMP
+	/* are we being called early in kernel startup? */
+	if (x86_cpu_to_apicid_early_ptr) {
+		u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
+		u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
+
+		cpu_to_apicid[cpu] = apicid;
+		bios_cpu_apicid[cpu] = apicid;
+	} else {
+		per_cpu(x86_cpu_to_apicid, cpu) = apicid;
+		per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
+	}
+#endif
+	cpu_set(cpu, cpu_possible_map);
+	cpu_set(cpu, cpu_present_map);
+}
+
 /*
  * Power management
  */
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index d8d03e0..9e8e5c0 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -34,13 +34,15 @@
 #include <asm/mpspec.h>
 #include <asm/hpet.h>
 #include <asm/pgalloc.h>
-#include <asm/mach_apic.h>
 #include <asm/nmi.h>
 #include <asm/idle.h>
 #include <asm/proto.h>
 #include <asm/timex.h>
 #include <asm/apic.h>
 
+#include <mach_ipi.h>
+#include <mach_apic.h>
+
 int disable_apic_timer __cpuinitdata;
 static int apic_calibrate_pmtmr __initdata;
 int disable_apic;
@@ -83,6 +85,12 @@
 
 static unsigned long apic_phys;
 
+unsigned long mp_lapic_addr;
+
+DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
+EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
+
+unsigned int __cpuinitdata maxcpus = NR_CPUS;
 /*
  * Get the LAPIC version
  */
@@ -431,7 +439,8 @@
 	lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
 
 	local_irq_enable();
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
+	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
+			   &boot_cpu_physical_apicid);
 	local_irq_disable();
 }
 
@@ -640,10 +649,10 @@
 	/*
 	 * The ID register is read/write in a real APIC.
 	 */
-	reg0 = apic_read(APIC_ID);
+	reg0 = read_apic_id();
 	apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
 	apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
-	reg1 = apic_read(APIC_ID);
+	reg1 = read_apic_id();
 	apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
 	apic_write(APIC_ID, reg0);
 	if (reg1 != (reg0 ^ APIC_ID_MASK))
@@ -728,6 +737,7 @@
 	unsigned int value;
 	int i, j;
 
+	preempt_disable();
 	value = apic_read(APIC_LVR);
 
 	BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
@@ -821,6 +831,7 @@
 	else
 		value = APIC_DM_NMI | APIC_LVT_MASKED;
 	apic_write(APIC_LVT1, value);
+	preempt_enable();
 }
 
 void __cpuinit lapic_setup_esr(void)
@@ -857,10 +868,34 @@
 	}
 
 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-	boot_cpu_id = 0;
+	boot_cpu_physical_apicid = 0;
 	return 0;
 }
 
+void __init early_init_lapic_mapping(void)
+{
+	unsigned long apic_phys;
+
+	/*
+	 * If no local APIC can be found then go out
+	 * : it means there is no mpatable and MADT
+	 */
+	if (!smp_found_config)
+		return;
+
+	apic_phys = mp_lapic_addr;
+
+	set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
+	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
+				 APIC_BASE, apic_phys);
+
+	/*
+	 * Fetch the APIC ID of the BSP in case we have a
+	 * default configuration (or the MP table is broken).
+	 */
+	boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
+}
+
 /**
  * init_apic_mappings - initialize APIC mappings
  */
@@ -881,16 +916,11 @@
 	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
 				APIC_BASE, apic_phys);
 
-	/* Put local APIC into the resource map. */
-	lapic_resource.start = apic_phys;
-	lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
-	insert_resource(&iomem_resource, &lapic_resource);
-
 	/*
 	 * Fetch the APIC ID of the BSP in case we have a
 	 * default configuration (or the MP table is broken).
 	 */
-	boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
+	boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
 }
 
 /*
@@ -911,8 +941,8 @@
 
 	verify_local_APIC();
 
-	phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
-	apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
+	phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
+	apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
 
 	setup_local_APIC();
 
@@ -1029,6 +1059,52 @@
 	apic_write(APIC_LVT1, value);
 }
 
+void __cpuinit generic_processor_info(int apicid, int version)
+{
+	int cpu;
+	cpumask_t tmp_map;
+
+	if (num_processors >= NR_CPUS) {
+		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
+		       " Processor ignored.\n", NR_CPUS);
+		return;
+	}
+
+	if (num_processors >= maxcpus) {
+		printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
+		       " Processor ignored.\n", maxcpus);
+		return;
+	}
+
+	num_processors++;
+	cpus_complement(tmp_map, cpu_present_map);
+	cpu = first_cpu(tmp_map);
+
+	physid_set(apicid, phys_cpu_present_map);
+	if (apicid == boot_cpu_physical_apicid) {
+		/*
+		 * x86_bios_cpu_apicid is required to have processors listed
+		 * in same order as logical cpu numbers. Hence the first
+		 * entry is BSP, and so on.
+		 */
+		cpu = 0;
+	}
+	/* are we being called early in kernel startup? */
+	if (x86_cpu_to_apicid_early_ptr) {
+		u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
+		u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
+
+		cpu_to_apicid[cpu] = apicid;
+		bios_cpu_apicid[cpu] = apicid;
+	} else {
+		per_cpu(x86_cpu_to_apicid, cpu) = apicid;
+		per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
+	}
+
+	cpu_set(cpu, cpu_possible_map);
+	cpu_set(cpu, cpu_present_map);
+}
+
 /*
  * Power management
  */
@@ -1065,7 +1141,7 @@
 
 	maxlvt = lapic_get_maxlvt();
 
-	apic_pm_state.apic_id = apic_read(APIC_ID);
+	apic_pm_state.apic_id = read_apic_id();
 	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
 	apic_pm_state.apic_ldr = apic_read(APIC_LDR);
 	apic_pm_state.apic_dfr = apic_read(APIC_DFR);
@@ -1180,9 +1256,19 @@
 {
 	int i, clusters, zeros;
 	unsigned id;
-	u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
+	u16 *bios_cpu_apicid;
 	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
 
+	/*
+	 * there is not this kind of box with AMD CPU yet.
+	 * Some AMD box with quadcore cpu and 8 sockets apicid
+	 * will be [4, 0x23] or [8, 0x27] could be thought to
+	 * vsmp box still need checking...
+	 */
+	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
+		return 0;
+
+	bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
 	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
 
 	for (i = 0; i < NR_CPUS; i++) {
@@ -1219,6 +1305,12 @@
 			++zeros;
 	}
 
+	/* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
+	 * not guaranteed to be synced between boards
+	 */
+	if (is_vsmp_box() && clusters > 1)
+		return 1;
+
 	/*
 	 * If clusters > 2, then should be multi-chassis.
 	 * May have to revisit this when multi-core + hyperthreaded CPUs come
@@ -1290,3 +1382,21 @@
 }
 __setup("apicpmtimer", setup_apicpmtimer);
 
+static int __init lapic_insert_resource(void)
+{
+	if (!apic_phys)
+		return -1;
+
+	/* Put local APIC into the resource map. */
+	lapic_resource.start = apic_phys;
+	lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
+	insert_resource(&iomem_resource, &lapic_resource);
+
+	return 0;
+}
+
+/*
+ * need call insert after e820_reserve_resources()
+ * that is using request_resource
+ */
+late_initcall(lapic_insert_resource);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index d4438ef..f0030a0 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -2217,7 +2217,6 @@
  */
 static int __init apm_init(void)
 {
-	struct proc_dir_entry *apm_proc;
 	struct desc_struct *gdt;
 	int err;
 
@@ -2322,9 +2321,7 @@
 	set_base(gdt[APM_DS >> 3],
 		 __va((unsigned long)apm_info.bios.dseg << 4));
 
-	apm_proc = create_proc_entry("apm", 0, NULL);
-	if (apm_proc)
-		apm_proc->proc_fops = &apm_file_ops;
+	proc_create("apm", 0, NULL, &apm_file_ops);
 
 	kapmd_task = kthread_create(apm, NULL, "kapmd");
 	if (IS_ERR(kapmd_task)) {
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 8ea0401..670c3c3 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -10,7 +10,7 @@
 #include <linux/personality.h>
 #include <linux/suspend.h>
 #include <asm/ucontext.h>
-#include "sigframe_32.h"
+#include "sigframe.h"
 #include <asm/pgtable.h>
 #include <asm/fixmap.h>
 #include <asm/processor.h>
diff --git a/arch/x86/kernel/bugs_64.c b/arch/x86/kernel/bugs_64.c
index 8f520f9..9a3ed06 100644
--- a/arch/x86/kernel/bugs_64.c
+++ b/arch/x86/kernel/bugs_64.c
@@ -9,13 +9,25 @@
 #include <asm/bugs.h>
 #include <asm/processor.h>
 #include <asm/mtrr.h>
+#include <asm/cacheflush.h>
 
 void __init check_bugs(void)
 {
-	identify_cpu(&boot_cpu_data);
+	identify_boot_cpu();
 #if !defined(CONFIG_SMP)
 	printk("CPU: ");
 	print_cpu_info(&boot_cpu_data);
 #endif
 	alternative_instructions();
+
+	/*
+	 * Make sure the first 2MB area is not mapped by huge pages
+	 * There are typically fixed size MTRRs in there and overlapping
+	 * MTRRs into large pages causes slow downs.
+	 *
+	 * Right now we don't do that with gbpages because there seems
+	 * very little benefit for that case.
+	 */
+	if (!direct_gbpages)
+		set_memory_4k((unsigned long)__va(0), 1);
 }
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index a0c4d7c..ee7c452 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -3,9 +3,9 @@
 #
 
 obj-y			:= intel_cacheinfo.o addon_cpuid_features.o
-obj-y			+= feature_names.o
+obj-y			+= proc.o feature_names.o
 
-obj-$(CONFIG_X86_32)	+= common.o proc.o bugs.o
+obj-$(CONFIG_X86_32)	+= common.o bugs.o
 obj-$(CONFIG_X86_32)	+= amd.o
 obj-$(CONFIG_X86_32)	+= cyrix.o
 obj-$(CONFIG_X86_32)	+= centaur.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 693e353..0173065 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -4,8 +4,8 @@
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
-#include <asm/mach_apic.h>
 
+#include <mach_apic.h>
 #include "cpu.h"
 
 /*
@@ -20,7 +20,7 @@
  *	the chip setting when fixing the bug but they also tweaked some
  *	performance at the same time..
  */
- 
+
 extern void vide(void);
 __asm__(".align 4\nvide: ret");
 
@@ -63,12 +63,12 @@
 
 int force_mwait __cpuinitdata;
 
-void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
+static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
 {
 	if (cpuid_eax(0x80000000) >= 0x80000007) {
 		c->x86_power = cpuid_edx(0x80000007);
 		if (c->x86_power & (1<<8))
-			set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+			set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 	}
 }
 
@@ -81,7 +81,8 @@
 #ifdef CONFIG_SMP
 	unsigned long long value;
 
-	/* Disable TLB flush filter by setting HWCR.FFDIS on K8
+	/*
+	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
 	 * bit 6 of msr C001_0015
 	 *
 	 * Errata 63 for SH-B3 steppings
@@ -102,15 +103,16 @@
 	 *	no bus pipeline)
 	 */
 
-	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-	clear_bit(0*32+31, c->x86_capability);
-	
+	/*
+	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
+	 */
+	clear_cpu_cap(c, 0*32+31);
+
 	r = get_model_name(c);
 
-	switch(c->x86)
-	{
-		case 4:
+	switch (c->x86) {
+	case 4:
 		/*
 		 * General Systems BIOSen alias the cpu frequency registers
 		 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
@@ -120,61 +122,60 @@
 #define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
 #define CBAR_ENB	(0x80000000)
 #define CBAR_KEY	(0X000000CB)
-			if (c->x86_model==9 || c->x86_model == 10) {
+			if (c->x86_model == 9 || c->x86_model == 10) {
 				if (inl (CBAR) & CBAR_ENB)
 					outl (0 | CBAR_KEY, CBAR);
 			}
 			break;
-		case 5:
-			if( c->x86_model < 6 )
-			{
+	case 5:
+			if (c->x86_model < 6) {
 				/* Based on AMD doc 20734R - June 2000 */
-				if ( c->x86_model == 0 ) {
-					clear_bit(X86_FEATURE_APIC, c->x86_capability);
-					set_bit(X86_FEATURE_PGE, c->x86_capability);
+				if (c->x86_model == 0) {
+					clear_cpu_cap(c, X86_FEATURE_APIC);
+					set_cpu_cap(c, X86_FEATURE_PGE);
 				}
 				break;
 			}
-			
-			if ( c->x86_model == 6 && c->x86_mask == 1 ) {
+
+			if (c->x86_model == 6 && c->x86_mask == 1) {
 				const int K6_BUG_LOOP = 1000000;
 				int n;
 				void (*f_vide)(void);
 				unsigned long d, d2;
-				
+
 				printk(KERN_INFO "AMD K6 stepping B detected - ");
-				
+
 				/*
-				 * It looks like AMD fixed the 2.6.2 bug and improved indirect 
+				 * It looks like AMD fixed the 2.6.2 bug and improved indirect
 				 * calls at the same time.
 				 */
 
 				n = K6_BUG_LOOP;
 				f_vide = vide;
 				rdtscl(d);
-				while (n--) 
+				while (n--)
 					f_vide();
 				rdtscl(d2);
 				d = d2-d;
 
-				if (d > 20*K6_BUG_LOOP) 
+				if (d > 20*K6_BUG_LOOP)
 					printk("system stability may be impaired when more than 32 MB are used.\n");
-				else 
+				else
 					printk("probably OK (after B9730xxxx).\n");
 				printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
 			}
 
 			/* K6 with old style WHCR */
 			if (c->x86_model < 8 ||
-			   (c->x86_model== 8 && c->x86_mask < 8)) {
+			   (c->x86_model == 8 && c->x86_mask < 8)) {
 				/* We can only write allocate on the low 508Mb */
-				if(mbytes>508)
-					mbytes=508;
+				if (mbytes > 508)
+					mbytes = 508;
 
 				rdmsr(MSR_K6_WHCR, l, h);
-				if ((l&0x0000FFFF)==0) {
+				if ((l&0x0000FFFF) == 0) {
 					unsigned long flags;
-					l=(1<<0)|((mbytes/4)<<1);
+					l = (1<<0)|((mbytes/4)<<1);
 					local_irq_save(flags);
 					wbinvd();
 					wrmsr(MSR_K6_WHCR, l, h);
@@ -185,17 +186,17 @@
 				break;
 			}
 
-			if ((c->x86_model == 8 && c->x86_mask >7) ||
+			if ((c->x86_model == 8 && c->x86_mask > 7) ||
 			     c->x86_model == 9 || c->x86_model == 13) {
 				/* The more serious chips .. */
 
-				if(mbytes>4092)
-					mbytes=4092;
+				if (mbytes > 4092)
+					mbytes = 4092;
 
 				rdmsr(MSR_K6_WHCR, l, h);
-				if ((l&0xFFFF0000)==0) {
+				if ((l&0xFFFF0000) == 0) {
 					unsigned long flags;
-					l=((mbytes>>2)<<22)|(1<<16);
+					l = ((mbytes>>2)<<22)|(1<<16);
 					local_irq_save(flags);
 					wbinvd();
 					wrmsr(MSR_K6_WHCR, l, h);
@@ -207,7 +208,7 @@
 				/*  Set MTRR capability flag if appropriate */
 				if (c->x86_model == 13 || c->x86_model == 9 ||
 				   (c->x86_model == 8 && c->x86_mask >= 8))
-					set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
+					set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 				break;
 			}
 
@@ -217,10 +218,11 @@
 				break;
 			}
 			break;
-		case 6: /* An Athlon/Duron */
- 
-			/* Bit 15 of Athlon specific MSR 15, needs to be 0
- 			 * to enable SSE on Palomino/Morgan/Barton CPU's.
+	case 6: /* An Athlon/Duron */
+
+			/*
+			 * Bit 15 of Athlon specific MSR 15, needs to be 0
+			 * to enable SSE on Palomino/Morgan/Barton CPU's.
 			 * If the BIOS didn't enable it already, enable it here.
 			 */
 			if (c->x86_model >= 6 && c->x86_model <= 10) {
@@ -229,15 +231,16 @@
 					rdmsr(MSR_K7_HWCR, l, h);
 					l &= ~0x00008000;
 					wrmsr(MSR_K7_HWCR, l, h);
-					set_bit(X86_FEATURE_XMM, c->x86_capability);
+					set_cpu_cap(c, X86_FEATURE_XMM);
 				}
 			}
 
-			/* It's been determined by AMD that Athlons since model 8 stepping 1
+			/*
+			 * It's been determined by AMD that Athlons since model 8 stepping 1
 			 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
 			 * As per AMD technical note 27212 0.2
 			 */
-			if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
+			if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
 				rdmsr(MSR_K7_CLK_CTL, l, h);
 				if ((l & 0xfff00000) != 0x20000000) {
 					printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
@@ -253,20 +256,19 @@
 	/* Use K8 tuning for Fam10h and Fam11h */
 	case 0x10:
 	case 0x11:
-		set_bit(X86_FEATURE_K8, c->x86_capability);
+		set_cpu_cap(c, X86_FEATURE_K8);
 		break;
 	case 6:
-		set_bit(X86_FEATURE_K7, c->x86_capability); 
+		set_cpu_cap(c, X86_FEATURE_K7);
 		break;
 	}
 	if (c->x86 >= 6)
-		set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
+		set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
 
 	display_cacheinfo(c);
 
-	if (cpuid_eax(0x80000000) >= 0x80000008) {
+	if (cpuid_eax(0x80000000) >= 0x80000008)
 		c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
-	}
 
 #ifdef CONFIG_X86_HT
 	/*
@@ -302,20 +304,20 @@
 
 	/* K6s reports MCEs but don't actually have all the MSRs */
 	if (c->x86 < 6)
-		clear_bit(X86_FEATURE_MCE, c->x86_capability);
+		clear_cpu_cap(c, X86_FEATURE_MCE);
 
 	if (cpu_has_xmm2)
-		set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
+		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
 }
 
-static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
 	/* AMD errata T13 (order #21922) */
 	if ((c->x86 == 6)) {
 		if (c->x86_model == 3 && c->x86_mask == 0)	/* Duron Rev A0 */
 			size = 64;
 		if (c->x86_model == 4 &&
-		    (c->x86_mask==0 || c->x86_mask==1))	/* Tbird rev A1/A2 */
+		    (c->x86_mask == 0 || c->x86_mask == 1))	/* Tbird rev A1/A2 */
 			size = 256;
 	}
 	return size;
@@ -323,19 +325,20 @@
 
 static struct cpu_dev amd_cpu_dev __cpuinitdata = {
 	.c_vendor	= "AMD",
-	.c_ident 	= { "AuthenticAMD" },
+	.c_ident	= { "AuthenticAMD" },
 	.c_models = {
 		{ .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
 		  {
 			  [3] = "486 DX/2",
 			  [7] = "486 DX/2-WB",
-			  [8] = "486 DX/4", 
-			  [9] = "486 DX/4-WB", 
+			  [8] = "486 DX/4",
+			  [9] = "486 DX/4-WB",
 			  [14] = "Am5x86-WT",
-			  [15] = "Am5x86-WB" 
+			  [15] = "Am5x86-WB"
 		  }
 		},
 	},
+	.c_early_init   = early_init_amd,
 	.c_init		= init_amd,
 	.c_size_cache	= amd_size_cache,
 };
@@ -345,3 +348,5 @@
 	cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
 	return 0;
 }
+
+cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 9681fa1..e0f45ed 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -1,31 +1,34 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/bitops.h>
+
 #include <asm/processor.h>
 #include <asm/msr.h>
 #include <asm/e820.h>
 #include <asm/mtrr.h>
+
 #include "cpu.h"
 
 #ifdef CONFIG_X86_OOSTORE
 
 static u32 __cpuinit power2(u32 x)
 {
-	u32 s=1;
-	while(s<=x)
-		s<<=1;
-	return s>>=1;
+	u32 s = 1;
+
+	while (s <= x)
+		s <<= 1;
+
+	return s >>= 1;
 }
 
 
 /*
- *	Set up an actual MCR
+ * Set up an actual MCR
  */
- 
 static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
 {
 	u32 lo, hi;
-	
+
 	hi = base & ~0xFFF;
 	lo = ~(size-1);		/* Size is a power of 2 so this makes a mask */
 	lo &= ~0xFFF;		/* Remove the ctrl value bits */
@@ -35,30 +38,28 @@
 }
 
 /*
- *	Figure what we can cover with MCR's
+ * Figure what we can cover with MCR's
  *
- *	Shortcut: We know you can't put 4Gig of RAM on a winchip
+ * Shortcut: We know you can't put 4Gig of RAM on a winchip
  */
-
-static u32 __cpuinit ramtop(void)		/* 16388 */
+static u32 __cpuinit ramtop(void)
 {
-	int i;
-	u32 top = 0;
 	u32 clip = 0xFFFFFFFFUL;
-	
+	u32 top = 0;
+	int i;
+
 	for (i = 0; i < e820.nr_map; i++) {
 		unsigned long start, end;
 
 		if (e820.map[i].addr > 0xFFFFFFFFUL)
 			continue;
 		/*
-		 *	Don't MCR over reserved space. Ignore the ISA hole
-		 *	we frob around that catastrophe already
+		 * Don't MCR over reserved space. Ignore the ISA hole
+		 * we frob around that catastrophe already
 		 */
-		 			
-		if (e820.map[i].type == E820_RESERVED)
-		{
-			if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
+		if (e820.map[i].type == E820_RESERVED) {
+			if (e820.map[i].addr >= 0x100000UL &&
+			    e820.map[i].addr < clip)
 				clip = e820.map[i].addr;
 			continue;
 		}
@@ -69,28 +70,27 @@
 		if (end > top)
 			top = end;
 	}
-	/* Everything below 'top' should be RAM except for the ISA hole.
-	   Because of the limited MCR's we want to map NV/ACPI into our
-	   MCR range for gunk in RAM 
-	   
-	   Clip might cause us to MCR insufficient RAM but that is an
-	   acceptable failure mode and should only bite obscure boxes with
-	   a VESA hole at 15Mb
-	   
-	   The second case Clip sometimes kicks in is when the EBDA is marked
-	   as reserved. Again we fail safe with reasonable results
-	*/
-	
-	if(top>clip)
-		top=clip;
-		
+	/*
+	 * Everything below 'top' should be RAM except for the ISA hole.
+	 * Because of the limited MCR's we want to map NV/ACPI into our
+	 * MCR range for gunk in RAM
+	 *
+	 * Clip might cause us to MCR insufficient RAM but that is an
+	 * acceptable failure mode and should only bite obscure boxes with
+	 * a VESA hole at 15Mb
+	 *
+	 * The second case Clip sometimes kicks in is when the EBDA is marked
+	 * as reserved. Again we fail safe with reasonable results
+	 */
+	if (top > clip)
+		top = clip;
+
 	return top;
 }
 
 /*
- *	Compute a set of MCR's to give maximum coverage
+ * Compute a set of MCR's to give maximum coverage
  */
-
 static int __cpuinit centaur_mcr_compute(int nr, int key)
 {
 	u32 mem = ramtop();
@@ -99,141 +99,131 @@
 	u32 top = root;
 	u32 floor = 0;
 	int ct = 0;
-	
-	while(ct<nr)
-	{
+
+	while (ct < nr) {
 		u32 fspace = 0;
+		u32 high;
+		u32 low;
 
 		/*
-		 *	Find the largest block we will fill going upwards
+		 * Find the largest block we will fill going upwards
 		 */
-
-		u32 high = power2(mem-top);	
+		high = power2(mem-top);
 
 		/*
-		 *	Find the largest block we will fill going downwards
+		 * Find the largest block we will fill going downwards
 		 */
-
-		u32 low = base/2;
+		low = base/2;
 
 		/*
-		 *	Don't fill below 1Mb going downwards as there
-		 *	is an ISA hole in the way.
-		 */		
-		 
-		if(base <= 1024*1024)
+		 * Don't fill below 1Mb going downwards as there
+		 * is an ISA hole in the way.
+		 */
+		if (base <= 1024*1024)
 			low = 0;
-			
+
 		/*
-		 *	See how much space we could cover by filling below
-		 *	the ISA hole
+		 * See how much space we could cover by filling below
+		 * the ISA hole
 		 */
-		 
-		if(floor == 0)
+
+		if (floor == 0)
 			fspace = 512*1024;
-		else if(floor ==512*1024)
+		else if (floor == 512*1024)
 			fspace = 128*1024;
 
 		/* And forget ROM space */
-		
+
 		/*
-		 *	Now install the largest coverage we get
+		 * Now install the largest coverage we get
 		 */
-		 
-		if(fspace > high && fspace > low)
-		{
+		if (fspace > high && fspace > low) {
 			centaur_mcr_insert(ct, floor, fspace, key);
 			floor += fspace;
-		}
-		else if(high > low)
-		{
+		} else if (high > low) {
 			centaur_mcr_insert(ct, top, high, key);
 			top += high;
-		}
-		else if(low > 0)
-		{
+		} else if (low > 0) {
 			base -= low;
 			centaur_mcr_insert(ct, base, low, key);
-		}
-		else break;
+		} else
+			break;
 		ct++;
 	}
 	/*
-	 *	We loaded ct values. We now need to set the mask. The caller
-	 *	must do this bit.
+	 * We loaded ct values. We now need to set the mask. The caller
+	 * must do this bit.
 	 */
-	 
 	return ct;
 }
 
 static void __cpuinit centaur_create_optimal_mcr(void)
 {
+	int used;
 	int i;
-	/*
-	 *	Allocate up to 6 mcrs to mark as much of ram as possible
-	 *	as write combining and weak write ordered.
-	 *
-	 *	To experiment with: Linux never uses stack operations for 
-	 *	mmio spaces so we could globally enable stack operation wc
-	 *
-	 *	Load the registers with type 31 - full write combining, all
-	 *	writes weakly ordered.
-	 */
-	int used = centaur_mcr_compute(6, 31);
 
 	/*
-	 *	Wipe unused MCRs
+	 * Allocate up to 6 mcrs to mark as much of ram as possible
+	 * as write combining and weak write ordered.
+	 *
+	 * To experiment with: Linux never uses stack operations for
+	 * mmio spaces so we could globally enable stack operation wc
+	 *
+	 * Load the registers with type 31 - full write combining, all
+	 * writes weakly ordered.
 	 */
-	 
-	for(i=used;i<8;i++)
+	used = centaur_mcr_compute(6, 31);
+
+	/*
+	 * Wipe unused MCRs
+	 */
+	for (i = used; i < 8; i++)
 		wrmsr(MSR_IDT_MCR0+i, 0, 0);
 }
 
 static void __cpuinit winchip2_create_optimal_mcr(void)
 {
 	u32 lo, hi;
+	int used;
 	int i;
 
 	/*
-	 *	Allocate up to 6 mcrs to mark as much of ram as possible
-	 *	as write combining, weak store ordered.
+	 * Allocate up to 6 mcrs to mark as much of ram as possible
+	 * as write combining, weak store ordered.
 	 *
-	 *	Load the registers with type 25
-	 *		8	-	weak write ordering
-	 *		16	-	weak read ordering
-	 *		1	-	write combining
+	 * Load the registers with type 25
+	 *	8	-	weak write ordering
+	 *	16	-	weak read ordering
+	 *	1	-	write combining
+	 */
+	used = centaur_mcr_compute(6, 25);
+
+	/*
+	 * Mark the registers we are using.
+	 */
+	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+	for (i = 0; i < used; i++)
+		lo |= 1<<(9+i);
+	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+
+	/*
+	 * Wipe unused MCRs
 	 */
 
-	int used = centaur_mcr_compute(6, 25);
-	
-	/*
-	 *	Mark the registers we are using.
-	 */
-	 
-	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-	for(i=0;i<used;i++)
-		lo|=1<<(9+i);
-	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-	
-	/*
-	 *	Wipe unused MCRs
-	 */
-	 
-	for(i=used;i<8;i++)
+	for (i = used; i < 8; i++)
 		wrmsr(MSR_IDT_MCR0+i, 0, 0);
 }
 
 /*
- *	Handle the MCR key on the Winchip 2.
+ * Handle the MCR key on the Winchip 2.
  */
-
 static void __cpuinit winchip2_unprotect_mcr(void)
 {
 	u32 lo, hi;
 	u32 key;
-	
+
 	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-	lo&=~0x1C0;	/* blank bits 8-6 */
+	lo &= ~0x1C0;	/* blank bits 8-6 */
 	key = (lo>>17) & 7;
 	lo |= key<<6;	/* replace with unlock key */
 	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
@@ -242,9 +232,9 @@
 static void __cpuinit winchip2_protect_mcr(void)
 {
 	u32 lo, hi;
-	
+
 	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-	lo&=~0x1C0;	/* blank bits 8-6 */
+	lo &= ~0x1C0;	/* blank bits 8-6 */
 	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
 }
 #endif /* CONFIG_X86_OOSTORE */
@@ -267,17 +257,17 @@
 
 		/* enable ACE unit, if present and disabled */
 		if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
-			rdmsr (MSR_VIA_FCR, lo, hi);
+			rdmsr(MSR_VIA_FCR, lo, hi);
 			lo |= ACE_FCR;		/* enable ACE unit */
-			wrmsr (MSR_VIA_FCR, lo, hi);
+			wrmsr(MSR_VIA_FCR, lo, hi);
 			printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
 		}
 
 		/* enable RNG unit, if present and disabled */
 		if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
-			rdmsr (MSR_VIA_RNG, lo, hi);
+			rdmsr(MSR_VIA_RNG, lo, hi);
 			lo |= RNG_ENABLE;	/* enable RNG unit */
-			wrmsr (MSR_VIA_RNG, lo, hi);
+			wrmsr(MSR_VIA_RNG, lo, hi);
 			printk(KERN_INFO "CPU: Enabled h/w RNG\n");
 		}
 
@@ -288,171 +278,183 @@
 	}
 
 	/* Cyrix III family needs CX8 & PGE explicitly enabled. */
-	if (c->x86_model >=6 && c->x86_model <= 9) {
-		rdmsr (MSR_VIA_FCR, lo, hi);
+	if (c->x86_model >= 6 && c->x86_model <= 9) {
+		rdmsr(MSR_VIA_FCR, lo, hi);
 		lo |= (1<<1 | 1<<7);
-		wrmsr (MSR_VIA_FCR, lo, hi);
-		set_bit(X86_FEATURE_CX8, c->x86_capability);
+		wrmsr(MSR_VIA_FCR, lo, hi);
+		set_cpu_cap(c, X86_FEATURE_CX8);
 	}
 
 	/* Before Nehemiah, the C3's had 3dNOW! */
-	if (c->x86_model >=6 && c->x86_model <9)
-		set_bit(X86_FEATURE_3DNOW, c->x86_capability);
+	if (c->x86_model >= 6 && c->x86_model < 9)
+		set_cpu_cap(c, X86_FEATURE_3DNOW);
 
 	get_model_name(c);
 	display_cacheinfo(c);
 }
 
+enum {
+		ECX8		= 1<<1,
+		EIERRINT	= 1<<2,
+		DPM		= 1<<3,
+		DMCE		= 1<<4,
+		DSTPCLK		= 1<<5,
+		ELINEAR		= 1<<6,
+		DSMC		= 1<<7,
+		DTLOCK		= 1<<8,
+		EDCTLB		= 1<<8,
+		EMMX		= 1<<9,
+		DPDC		= 1<<11,
+		EBRPRED		= 1<<12,
+		DIC		= 1<<13,
+		DDC		= 1<<14,
+		DNA		= 1<<15,
+		ERETSTK		= 1<<16,
+		E2MMX		= 1<<19,
+		EAMD3D		= 1<<20,
+};
+
 static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
 {
-	enum {
-		ECX8=1<<1,
-		EIERRINT=1<<2,
-		DPM=1<<3,
-		DMCE=1<<4,
-		DSTPCLK=1<<5,
-		ELINEAR=1<<6,
-		DSMC=1<<7,
-		DTLOCK=1<<8,
-		EDCTLB=1<<8,
-		EMMX=1<<9,
-		DPDC=1<<11,
-		EBRPRED=1<<12,
-		DIC=1<<13,
-		DDC=1<<14,
-		DNA=1<<15,
-		ERETSTK=1<<16,
-		E2MMX=1<<19,
-		EAMD3D=1<<20,
-	};
 
 	char *name;
-	u32  fcr_set=0;
-	u32  fcr_clr=0;
-	u32  lo,hi,newlo;
-	u32  aa,bb,cc,dd;
+	u32  fcr_set = 0;
+	u32  fcr_clr = 0;
+	u32  lo, hi, newlo;
+	u32  aa, bb, cc, dd;
 
-	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-	clear_bit(0*32+31, c->x86_capability);
+	/*
+	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
+	 */
+	clear_cpu_cap(c, 0*32+31);
 
 	switch (c->x86) {
-
-		case 5:
-			switch(c->x86_model) {
-			case 4:
-				name="C6";
-				fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
-				fcr_clr=DPDC;
-				printk(KERN_NOTICE "Disabling bugged TSC.\n");
-				clear_bit(X86_FEATURE_TSC, c->x86_capability);
+	case 5:
+		switch (c->x86_model) {
+		case 4:
+			name = "C6";
+			fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
+			fcr_clr = DPDC;
+			printk(KERN_NOTICE "Disabling bugged TSC.\n");
+			clear_cpu_cap(c, X86_FEATURE_TSC);
 #ifdef CONFIG_X86_OOSTORE
-				centaur_create_optimal_mcr();
-				/* Enable
-					write combining on non-stack, non-string
-					write combining on string, all types
-					weak write ordering 
-					
-				   The C6 original lacks weak read order 
-				   
-				   Note 0x120 is write only on Winchip 1 */
-				   
-				wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
-#endif				
-				break;
-			case 8:
-				switch(c->x86_mask) {
-				default:
-					name="2";
-					break;
-				case 7 ... 9:
-					name="2A";
-					break;
-				case 10 ... 15:
-					name="2B";
-					break;
-				}
-				fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
-				fcr_clr=DPDC;
-#ifdef CONFIG_X86_OOSTORE
-				winchip2_unprotect_mcr();
-				winchip2_create_optimal_mcr();
-				rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-				/* Enable
-					write combining on non-stack, non-string
-					write combining on string, all types
-					weak write ordering 
-				*/
-				lo|=31;				
-				wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-				winchip2_protect_mcr();
+			centaur_create_optimal_mcr();
+			/*
+			 * Enable:
+			 *	write combining on non-stack, non-string
+			 *	write combining on string, all types
+			 *	weak write ordering
+			 *
+			 * The C6 original lacks weak read order
+			 *
+			 * Note 0x120 is write only on Winchip 1
+			 */
+			wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
 #endif
-				break;
-			case 9:
-				name="3";
-				fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
-				fcr_clr=DPDC;
-#ifdef CONFIG_X86_OOSTORE
-				winchip2_unprotect_mcr();
-				winchip2_create_optimal_mcr();
-				rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-				/* Enable
-					write combining on non-stack, non-string
-					write combining on string, all types
-					weak write ordering 
-				*/
-				lo|=31;				
-				wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-				winchip2_protect_mcr();
-#endif
-				break;
+			break;
+		case 8:
+			switch (c->x86_mask) {
 			default:
-				name="??";
+			name = "2";
+				break;
+			case 7 ... 9:
+				name = "2A";
+				break;
+			case 10 ... 15:
+				name = "2B";
+				break;
 			}
-
-			rdmsr(MSR_IDT_FCR1, lo, hi);
-			newlo=(lo|fcr_set) & (~fcr_clr);
-
-			if (newlo!=lo) {
-				printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
-				wrmsr(MSR_IDT_FCR1, newlo, hi );
-			} else {
-				printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
-			}
-			/* Emulate MTRRs using Centaur's MCR. */
-			set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
-			/* Report CX8 */
-			set_bit(X86_FEATURE_CX8, c->x86_capability);
-			/* Set 3DNow! on Winchip 2 and above. */
-			if (c->x86_model >=8)
-				set_bit(X86_FEATURE_3DNOW, c->x86_capability);
-			/* See if we can find out some more. */
-			if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
-				/* Yes, we can. */
-				cpuid(0x80000005,&aa,&bb,&cc,&dd);
-				/* Add L1 data and code cache sizes. */
-				c->x86_cache_size = (cc>>24)+(dd>>24);
-			}
-			sprintf( c->x86_model_id, "WinChip %s", name );
+			fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
+				  E2MMX|EAMD3D;
+			fcr_clr = DPDC;
+#ifdef CONFIG_X86_OOSTORE
+			winchip2_unprotect_mcr();
+			winchip2_create_optimal_mcr();
+			rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+			/*
+			 * Enable:
+			 *	write combining on non-stack, non-string
+			 *	write combining on string, all types
+			 *	weak write ordering
+			 */
+			lo |= 31;
+			wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+			winchip2_protect_mcr();
+#endif
 			break;
-
-		case 6:
-			init_c3(c);
+		case 9:
+			name = "3";
+			fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
+				  E2MMX|EAMD3D;
+			fcr_clr = DPDC;
+#ifdef CONFIG_X86_OOSTORE
+			winchip2_unprotect_mcr();
+			winchip2_create_optimal_mcr();
+			rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+			/*
+			 * Enable:
+			 *	write combining on non-stack, non-string
+			 *	write combining on string, all types
+			 *	weak write ordering
+			 */
+			lo |= 31;
+			wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+			winchip2_protect_mcr();
+#endif
 			break;
+		default:
+			name = "??";
+		}
+
+		rdmsr(MSR_IDT_FCR1, lo, hi);
+		newlo = (lo|fcr_set) & (~fcr_clr);
+
+		if (newlo != lo) {
+			printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n",
+				lo, newlo);
+			wrmsr(MSR_IDT_FCR1, newlo, hi);
+		} else {
+			printk(KERN_INFO "Centaur FCR is 0x%X\n", lo);
+		}
+		/* Emulate MTRRs using Centaur's MCR. */
+		set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
+		/* Report CX8 */
+		set_cpu_cap(c, X86_FEATURE_CX8);
+		/* Set 3DNow! on Winchip 2 and above. */
+		if (c->x86_model >= 8)
+			set_cpu_cap(c, X86_FEATURE_3DNOW);
+		/* See if we can find out some more. */
+		if (cpuid_eax(0x80000000) >= 0x80000005) {
+			/* Yes, we can. */
+			cpuid(0x80000005, &aa, &bb, &cc, &dd);
+			/* Add L1 data and code cache sizes. */
+			c->x86_cache_size = (cc>>24)+(dd>>24);
+		}
+		sprintf(c->x86_model_id, "WinChip %s", name);
+		break;
+
+	case 6:
+		init_c3(c);
+		break;
 	}
 }
 
-static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+static unsigned int __cpuinit
+centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
 	/* VIA C3 CPUs (670-68F) need further shifting. */
 	if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
 		size >>= 8;
 
-	/* VIA also screwed up Nehemiah stepping 1, and made
-	   it return '65KB' instead of '64KB'
-	   - Note, it seems this may only be in engineering samples. */
-	if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
-		size -=1;
+	/*
+	 * There's also an erratum in Nehemiah stepping 1, which
+	 * returns '65KB' instead of '64KB'
+	 *  - Note, it seems this may only be in engineering samples.
+	 */
+	if ((c->x86 == 6) && (c->x86_model == 9) &&
+				(c->x86_mask == 1) && (size == 65))
+		size -= 1;
 
 	return size;
 }
@@ -464,8 +466,4 @@
 	.c_size_cache	= centaur_size_cache,
 };
 
-int __init centaur_init_cpu(void)
-{
-	cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
-	return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a38aafa..d999d78 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -62,9 +62,9 @@
 static int cachesize_override __cpuinitdata = -1;
 static int disable_x86_serial_nr __cpuinitdata = 1;
 
-struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
 
-static void __cpuinit default_init(struct cpuinfo_x86 * c)
+static void __cpuinit default_init(struct cpuinfo_x86 *c)
 {
 	/* Not much we can do here... */
 	/* Check if at least it has cpuid */
@@ -81,11 +81,11 @@
 	.c_init	= default_init,
 	.c_vendor = "Unknown",
 };
-static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
+static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
 
 static int __init cachesize_setup(char *str)
 {
-	get_option (&str, &cachesize_override);
+	get_option(&str, &cachesize_override);
 	return 1;
 }
 __setup("cachesize=", cachesize_setup);
@@ -107,12 +107,12 @@
 	/* Intel chips right-justify this string for some dumb reason;
 	   undo that brain damage */
 	p = q = &c->x86_model_id[0];
-	while ( *p == ' ' )
+	while (*p == ' ')
 	     p++;
-	if ( p != q ) {
-	     while ( *p )
+	if (p != q) {
+	     while (*p)
 		  *q++ = *p++;
-	     while ( q <= &c->x86_model_id[48] )
+	     while (q <= &c->x86_model_id[48])
 		  *q++ = '\0';	/* Zero-pad the rest */
 	}
 
@@ -130,7 +130,7 @@
 		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
 		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
 			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-		c->x86_cache_size=(ecx>>24)+(edx>>24);	
+		c->x86_cache_size = (ecx>>24)+(edx>>24);
 	}
 
 	if (n < 0x80000006)	/* Some chips just has a large L1. */
@@ -138,16 +138,16 @@
 
 	ecx = cpuid_ecx(0x80000006);
 	l2size = ecx >> 16;
-	
+
 	/* do processor-specific cache resizing */
 	if (this_cpu->c_size_cache)
-		l2size = this_cpu->c_size_cache(c,l2size);
+		l2size = this_cpu->c_size_cache(c, l2size);
 
 	/* Allow user to override all this if necessary. */
 	if (cachesize_override != -1)
 		l2size = cachesize_override;
 
-	if ( l2size == 0 )
+	if (l2size == 0)
 		return;		/* Again, no L2 cache is possible */
 
 	c->x86_cache_size = l2size;
@@ -156,16 +156,19 @@
 	       l2size, ecx & 0xFF);
 }
 
-/* Naming convention should be: <Name> [(<Codename>)] */
-/* This table only is used unless init_<vendor>() below doesn't set it; */
-/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
+/*
+ * Naming convention should be: <Name> [(<Codename>)]
+ * This table only is used unless init_<vendor>() below doesn't set it;
+ * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
+ *
+ */
 
 /* Look up CPU names by table lookup. */
 static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
 {
 	struct cpu_model_info *info;
 
-	if ( c->x86_model >= 16 )
+	if (c->x86_model >= 16)
 		return NULL;	/* Range check */
 
 	if (!this_cpu)
@@ -190,9 +193,9 @@
 
 	for (i = 0; i < X86_VENDOR_NUM; i++) {
 		if (cpu_devs[i]) {
-			if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
-			    (cpu_devs[i]->c_ident[1] && 
-			     !strcmp(v,cpu_devs[i]->c_ident[1]))) {
+			if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
+			    (cpu_devs[i]->c_ident[1] &&
+			     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
 				c->x86_vendor = i;
 				if (!early)
 					this_cpu = cpu_devs[i];
@@ -210,7 +213,7 @@
 }
 
 
-static int __init x86_fxsr_setup(char * s)
+static int __init x86_fxsr_setup(char *s)
 {
 	setup_clear_cpu_cap(X86_FEATURE_FXSR);
 	setup_clear_cpu_cap(X86_FEATURE_XMM);
@@ -219,7 +222,7 @@
 __setup("nofxsr", x86_fxsr_setup);
 
 
-static int __init x86_sep_setup(char * s)
+static int __init x86_sep_setup(char *s)
 {
 	setup_clear_cpu_cap(X86_FEATURE_SEP);
 	return 1;
@@ -306,14 +309,30 @@
 
 	}
 
+	clear_cpu_cap(c, X86_FEATURE_PAT);
+
+	switch (c->x86_vendor) {
+	case X86_VENDOR_AMD:
+		if (c->x86 >= 0xf && c->x86 <= 0x11)
+			set_cpu_cap(c, X86_FEATURE_PAT);
+		break;
+	case X86_VENDOR_INTEL:
+		if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
+			set_cpu_cap(c, X86_FEATURE_PAT);
+		break;
+	}
+
 }
 
-/* Do minimum CPU detection early.
-   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-   The others are not touched to avoid unwanted side effects.
-
-   WARNING: this function is only called on the BP.  Don't add code here
-   that is supposed to run on all CPUs. */
+/*
+ * Do minimum CPU detection early.
+ * Fields really needed: vendor, cpuid_level, family, model, mask,
+ * cache alignment.
+ * The others are not touched to avoid unwanted side effects.
+ *
+ * WARNING: this function is only called on the BP.  Don't add code here
+ * that is supposed to run on all CPUs.
+ */
 static void __init early_cpu_detect(void)
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -328,19 +347,14 @@
 
 	get_cpu_vendor(c, 1);
 
-	switch (c->x86_vendor) {
-	case X86_VENDOR_AMD:
-		early_init_amd(c);
-		break;
-	case X86_VENDOR_INTEL:
-		early_init_intel(c);
-		break;
-	}
+	if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
+	    cpu_devs[c->x86_vendor]->c_early_init)
+		cpu_devs[c->x86_vendor]->c_early_init(c);
 
 	early_get_cap(c);
 }
 
-static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
+static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
 {
 	u32 tfms, xlvl;
 	unsigned int ebx;
@@ -351,13 +365,12 @@
 		      (unsigned int *)&c->x86_vendor_id[0],
 		      (unsigned int *)&c->x86_vendor_id[8],
 		      (unsigned int *)&c->x86_vendor_id[4]);
-		
+
 		get_cpu_vendor(c, 0);
 		/* Initialize the standard set of capabilities */
 		/* Note that the vendor-specific code below might override */
-	
 		/* Intel-defined flags: level 0x00000001 */
-		if ( c->cpuid_level >= 0x00000001 ) {
+		if (c->cpuid_level >= 0x00000001) {
 			u32 capability, excap;
 			cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
 			c->x86_capability[0] = capability;
@@ -369,12 +382,14 @@
 			if (c->x86 >= 0x6)
 				c->x86_model += ((tfms >> 16) & 0xF) << 4;
 			c->x86_mask = tfms & 15;
+			c->initial_apicid = (ebx >> 24) & 0xFF;
 #ifdef CONFIG_X86_HT
-			c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
+			c->apicid = phys_pkg_id(c->initial_apicid, 0);
+			c->phys_proc_id = c->initial_apicid;
 #else
-			c->apicid = (ebx >> 24) & 0xFF;
+			c->apicid = c->initial_apicid;
 #endif
-			if (c->x86_capability[0] & (1<<19))
+			if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
 				c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
 		} else {
 			/* Have CPUID level 0 only - unheard of */
@@ -383,33 +398,42 @@
 
 		/* AMD-defined flags: level 0x80000001 */
 		xlvl = cpuid_eax(0x80000000);
-		if ( (xlvl & 0xffff0000) == 0x80000000 ) {
-			if ( xlvl >= 0x80000001 ) {
+		if ((xlvl & 0xffff0000) == 0x80000000) {
+			if (xlvl >= 0x80000001) {
 				c->x86_capability[1] = cpuid_edx(0x80000001);
 				c->x86_capability[6] = cpuid_ecx(0x80000001);
 			}
-			if ( xlvl >= 0x80000004 )
+			if (xlvl >= 0x80000004)
 				get_model_name(c); /* Default name */
 		}
 
 		init_scattered_cpuid_features(c);
 	}
 
-#ifdef CONFIG_X86_HT
-	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
-#endif
+	clear_cpu_cap(c, X86_FEATURE_PAT);
+
+	switch (c->x86_vendor) {
+	case X86_VENDOR_AMD:
+		if (c->x86 >= 0xf && c->x86 <= 0x11)
+			set_cpu_cap(c, X86_FEATURE_PAT);
+		break;
+	case X86_VENDOR_INTEL:
+		if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
+			set_cpu_cap(c, X86_FEATURE_PAT);
+		break;
+	}
 }
 
 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 {
-	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
+	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
 		/* Disable processor serial number */
-		unsigned long lo,hi;
-		rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+		unsigned long lo, hi;
+		rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 		lo |= 0x200000;
-		wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+		wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 		printk(KERN_NOTICE "CPU serial number disabled.\n");
-		clear_bit(X86_FEATURE_PN, c->x86_capability);
+		clear_cpu_cap(c, X86_FEATURE_PN);
 
 		/* Disabling the serial number may affect the cpuid level */
 		c->cpuid_level = cpuid_eax(0);
@@ -444,9 +468,11 @@
 	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 
 	if (!have_cpuid_p()) {
-		/* First of all, decide if this is a 486 or higher */
-		/* It's a 486 if we can modify the AC flag */
-		if ( flag_is_changeable_p(X86_EFLAGS_AC) )
+		/*
+		 * First of all, decide if this is a 486 or higher
+		 * It's a 486 if we can modify the AC flag
+		 */
+		if (flag_is_changeable_p(X86_EFLAGS_AC))
 			c->x86 = 4;
 		else
 			c->x86 = 3;
@@ -479,10 +505,10 @@
 	 */
 
 	/* If the model name is still unset, do table lookup. */
-	if ( !c->x86_model_id[0] ) {
+	if (!c->x86_model_id[0]) {
 		char *p;
 		p = table_lookup_model(c);
-		if ( p )
+		if (p)
 			strcpy(c->x86_model_id, p);
 		else
 			/* Last resort... */
@@ -496,9 +522,9 @@
 	 * common between the CPUs.  The first time this routine gets
 	 * executed, c == &boot_cpu_data.
 	 */
-	if ( c != &boot_cpu_data ) {
+	if (c != &boot_cpu_data) {
 		/* AND the already accumulated flags with these */
-		for ( i = 0 ; i < NCAPINTS ; i++ )
+		for (i = 0 ; i < NCAPINTS ; i++)
 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
 	}
 
@@ -542,7 +568,7 @@
 
 	if (smp_num_siblings == 1) {
 		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
-	} else if (smp_num_siblings > 1 ) {
+	} else if (smp_num_siblings > 1) {
 
 		if (smp_num_siblings > NR_CPUS) {
 			printk(KERN_WARNING "CPU: Unsupported number of the "
@@ -552,7 +578,7 @@
 		}
 
 		index_msb = get_count_order(smp_num_siblings);
-		c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+		c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
 
 		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
 		       c->phys_proc_id);
@@ -563,7 +589,7 @@
 
 		core_bits = get_count_order(c->x86_max_cores);
 
-		c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
+		c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
 					       ((1 << core_bits) - 1);
 
 		if (c->x86_max_cores > 1)
@@ -597,7 +623,7 @@
 	else
 		printk("%s", c->x86_model_id);
 
-	if (c->x86_mask || c->cpuid_level >= 0) 
+	if (c->x86_mask || c->cpuid_level >= 0)
 		printk(" stepping %02x\n", c->x86_mask);
 	else
 		printk("\n");
@@ -616,23 +642,15 @@
 
 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
 
-/* This is hacky. :)
- * We're emulating future behavior.
- * In the future, the cpu-specific init functions will be called implicitly
- * via the magic of initcalls.
- * They will insert themselves into the cpu_devs structure.
- * Then, when cpu_init() is called, we can just iterate over that array.
- */
 void __init early_cpu_init(void)
 {
-	intel_cpu_init();
-	cyrix_init_cpu();
-	nsc_init_cpu();
-	amd_init_cpu();
-	centaur_init_cpu();
-	transmeta_init_cpu();
-	nexgen_init_cpu();
-	umc_init_cpu();
+	struct cpu_vendor_dev *cvdev;
+
+	for (cvdev = __x86cpuvendor_start ;
+	     cvdev < __x86cpuvendor_end   ;
+	     cvdev++)
+		cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
+
 	early_cpu_detect();
 }
 
@@ -666,7 +684,7 @@
 {
 	int cpu = smp_processor_id();
 	struct task_struct *curr = current;
-	struct tss_struct * t = &per_cpu(init_tss, cpu);
+	struct tss_struct *t = &per_cpu(init_tss, cpu);
 	struct thread_struct *thread = &curr->thread;
 
 	if (cpu_test_and_set(cpu, cpu_initialized)) {
@@ -692,7 +710,7 @@
 	enter_lazy_tlb(&init_mm, curr);
 
 	load_sp0(t, thread);
-	set_tss_desc(cpu,t);
+	set_tss_desc(cpu, t);
 	load_TR_desc();
 	load_LDT(&init_mm.context);
 
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index e0b38c3..783691b 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -14,6 +14,7 @@
 
 	struct		cpu_model_info c_models[4];
 
+	void            (*c_early_init)(struct cpuinfo_x86 *c);
 	void		(*c_init)(struct cpuinfo_x86 * c);
 	void		(*c_identify)(struct cpuinfo_x86 * c);
 	unsigned int	(*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
@@ -21,18 +22,17 @@
 
 extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
 
+struct cpu_vendor_dev {
+	int vendor;
+	struct cpu_dev *cpu_dev;
+};
+
+#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
+	static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
+	__attribute__((__section__(".x86cpuvendor.init"))) = \
+	{ cpu_vendor_id, cpu_dev }
+
+extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
+
 extern int get_model_name(struct cpuinfo_x86 *c);
 extern void display_cacheinfo(struct cpuinfo_x86 *c);
-
-extern void early_init_intel(struct cpuinfo_x86 *c);
-extern void early_init_amd(struct cpuinfo_x86 *c);
-
-/* Specific CPU type init functions */
-int intel_cpu_init(void);
-int amd_init_cpu(void);
-int cyrix_init_cpu(void);
-int nsc_init_cpu(void);
-int centaur_init_cpu(void);
-int transmeta_init_cpu(void);
-int nexgen_init_cpu(void);
-int umc_init_cpu(void);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 7139b02..3fd7a67 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -19,7 +19,7 @@
 {
 	unsigned char ccr2, ccr3;
 	unsigned long flags;
-	
+
 	/* we test for DEVID by checking whether CCR3 is writable */
 	local_irq_save(flags);
 	ccr3 = getCx86(CX86_CCR3);
@@ -37,8 +37,7 @@
 			setCx86(CX86_CCR2, ccr2);
 			*dir0 = 0xfe;
 		}
-	}
-	else {
+	} else {
 		setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
 
 		/* read DIR0 and DIR1 CPU registers */
@@ -86,7 +85,7 @@
 static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
 {
 	unsigned long flags;
-	
+
 	if (Cx86_dir0_msb == 3) {
 		unsigned char ccr3, ccr5;
 
@@ -132,7 +131,7 @@
 	/* set 'Not Write-through' */
 	write_cr0(read_cr0() | X86_CR0_NW);
 	/* CCR2 bit 2: lock NW bit and set WT1 */
-	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
+	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
 }
 
 static void __cpuinit set_cx86_inc(void)
@@ -148,7 +147,7 @@
 	setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
 	/* PCR0 -- Performance Control */
 	/* Incrementor Margin 10 */
-	setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); 
+	setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
 	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
 }
 
@@ -167,16 +166,16 @@
 
 	ccr3 = getCx86(CX86_CCR3);
 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
-	
+
 
 	/* FPU fast, DTE cache, Mem bypass */
 	setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
 	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
-	
+
 	set_cx86_memwb();
-	set_cx86_reorder();	
+	set_cx86_reorder();
 	set_cx86_inc();
-	
+
 	local_irq_restore(flags);
 }
 
@@ -187,14 +186,16 @@
 	char *buf = c->x86_model_id;
 	const char *p = NULL;
 
-	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-	clear_bit(0*32+31, c->x86_capability);
+	/*
+	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
+	 */
+	clear_cpu_cap(c, 0*32+31);
 
 	/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
-	if ( test_bit(1*32+24, c->x86_capability) ) {
-		clear_bit(1*32+24, c->x86_capability);
-		set_bit(X86_FEATURE_CXMMX, c->x86_capability);
+	if (test_cpu_cap(c, 1*32+24)) {
+		clear_cpu_cap(c, 1*32+24);
+		set_cpu_cap(c, X86_FEATURE_CXMMX);
 	}
 
 	do_cyrix_devid(&dir0, &dir1);
@@ -213,7 +214,7 @@
 	 * the model, multiplier and stepping.  Black magic included,
 	 * to make the silicon step/rev numbers match the printed ones.
 	 */
-	 
+
 	switch (dir0_msn) {
 		unsigned char tmp;
 
@@ -241,7 +242,7 @@
 		} else             /* 686 */
 			p = Cx86_cb+1;
 		/* Emulate MTRRs using Cyrix's ARRs. */
-		set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
+		set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
 		/* 6x86's contain this bug */
 		c->coma_bug = 1;
 		break;
@@ -250,17 +251,18 @@
 #ifdef CONFIG_PCI
 	{
 		u32 vendor, device;
-		/* It isn't really a PCI quirk directly, but the cure is the
-		   same. The MediaGX has deep magic SMM stuff that handles the
-		   SB emulation. It throws away the fifo on disable_dma() which
-		   is wrong and ruins the audio. 
-
-		   Bug2: VSA1 has a wrap bug so that using maximum sized DMA 
-		   causes bad things. According to NatSemi VSA2 has another
-		   bug to do with 'hlt'. I've not seen any boards using VSA2
-		   and X doesn't seem to support it either so who cares 8).
-		   VSA1 we work around however.
-		*/
+		/*
+		 * It isn't really a PCI quirk directly, but the cure is the
+		 * same. The MediaGX has deep magic SMM stuff that handles the
+		 * SB emulation. It throws away the fifo on disable_dma() which
+		 * is wrong and ruins the audio.
+		 *
+		 *  Bug2: VSA1 has a wrap bug so that using maximum sized DMA
+		 *  causes bad things. According to NatSemi VSA2 has another
+		 *  bug to do with 'hlt'. I've not seen any boards using VSA2
+		 *  and X doesn't seem to support it either so who cares 8).
+		 *  VSA1 we work around however.
+		 */
 
 		printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
 		isa_dma_bridge_buggy = 2;
@@ -273,55 +275,51 @@
 
 		/*
 		 *  The 5510/5520 companion chips have a funky PIT.
-		 */  
+		 */
 		if (vendor == PCI_VENDOR_ID_CYRIX &&
 	 (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520))
 			mark_tsc_unstable("cyrix 5510/5520 detected");
 	}
 #endif
-		c->x86_cache_size=16;	/* Yep 16K integrated cache thats it */
+		c->x86_cache_size = 16;	/* Yep 16K integrated cache thats it */
 
 		/* GXm supports extended cpuid levels 'ala' AMD */
 		if (c->cpuid_level == 2) {
 			/* Enable cxMMX extensions (GX1 Datasheet 54) */
 			setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
-			
+
 			/*
 			 * GXm : 0x30 ... 0x5f GXm  datasheet 51
 			 * GXlv: 0x6x          GXlv datasheet 54
 			 *  ?  : 0x7x
 			 * GX1 : 0x8x          GX1  datasheet 56
 			 */
-			if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f))
+			if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f))
 				geode_configure();
 			get_model_name(c);  /* get CPU marketing name */
 			return;
-		}
-		else {  /* MediaGX */
+		} else { /* MediaGX */
 			Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
 			p = Cx86_cb+2;
 			c->x86_model = (dir1 & 0x20) ? 1 : 2;
 		}
 		break;
 
-        case 5: /* 6x86MX/M II */
-		if (dir1 > 7)
-		{
+	case 5: /* 6x86MX/M II */
+		if (dir1 > 7) {
 			dir0_msn++;  /* M II */
 			/* Enable MMX extensions (App note 108) */
 			setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
-		}
-		else
-		{
+		} else {
 			c->coma_bug = 1;      /* 6x86MX, it has the bug. */
 		}
 		tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
 		Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
 		p = Cx86_cb+tmp;
-        	if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
+		if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
 			(c->x86_model)++;
 		/* Emulate MTRRs using Cyrix's ARRs. */
-		set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
+		set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
 		break;
 
 	case 0xf:  /* Cyrix 486 without DEVID registers */
@@ -343,7 +341,8 @@
 		break;
 	}
 	strcpy(buf, Cx86_model[dir0_msn & 7]);
-	if (p) strcat(buf, p);
+	if (p)
+		strcat(buf, p);
 	return;
 }
 
@@ -352,7 +351,8 @@
  */
 static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
 {
-	/* There may be GX1 processors in the wild that are branded
+	/*
+	 * There may be GX1 processors in the wild that are branded
 	 * NSC and not Cyrix.
 	 *
 	 * This function only handles the GX processor, and kicks every
@@ -377,7 +377,7 @@
  * by the fact that they preserve the flags across the division of 5/2.
  * PII and PPro exhibit this behavior too, but they have cpuid available.
  */
- 
+
 /*
  * Perform the Cyrix 5/2 test. A Cyrix won't change
  * the flags, while other 486 chips will.
@@ -398,27 +398,26 @@
 	return (unsigned char) (test >> 8) == 0x02;
 }
 
-static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c)
+static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
 {
 	/* Detect Cyrix with disabled CPUID */
-	if ( c->x86 == 4 && test_cyrix_52div() ) {
+	if (c->x86 == 4 && test_cyrix_52div()) {
 		unsigned char dir0, dir1;
-		
+
 		strcpy(c->x86_vendor_id, "CyrixInstead");
-	        c->x86_vendor = X86_VENDOR_CYRIX;
-	        
-	        /* Actually enable cpuid on the older cyrix */
-	    
-	    	/* Retrieve CPU revisions */
-	    	
+		c->x86_vendor = X86_VENDOR_CYRIX;
+
+		/* Actually enable cpuid on the older cyrix */
+
+		/* Retrieve CPU revisions */
+
 		do_cyrix_devid(&dir0, &dir1);
 
-		dir0>>=4;		
-		
+		dir0 >>= 4;
+
 		/* Check it is an affected model */
-		
-   	        if (dir0 == 5 || dir0 == 3)
-   	        {
+
+		if (dir0 == 5 || dir0 == 3) {
 			unsigned char ccr3;
 			unsigned long flags;
 			printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
@@ -434,26 +433,17 @@
 
 static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
 	.c_vendor	= "Cyrix",
-	.c_ident 	= { "CyrixInstead" },
+	.c_ident	= { "CyrixInstead" },
 	.c_init		= init_cyrix,
 	.c_identify	= cyrix_identify,
 };
 
-int __init cyrix_init_cpu(void)
-{
-	cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
-	return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev);
 
 static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
 	.c_vendor	= "NSC",
-	.c_ident 	= { "Geode by NSC" },
+	.c_ident	= { "Geode by NSC" },
 	.c_init		= init_nsc,
 };
 
-int __init nsc_init_cpu(void)
-{
-	cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
-	return 0;
-}
-
+cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev);
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c
index ee975ac..e43ad4a 100644
--- a/arch/x86/kernel/cpu/feature_names.c
+++ b/arch/x86/kernel/cpu/feature_names.c
@@ -4,7 +4,7 @@
  * This file must not contain any executable code.
  */
 
-#include "asm/cpufeature.h"
+#include <asm/cpufeature.h>
 
 /*
  * These flag bits must match the definitions in <asm/cpufeature.h>.
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index fae31ce..fe9224c 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -30,7 +30,7 @@
 struct movsl_mask movsl_mask __read_mostly;
 #endif
 
-void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
+static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
 {
 	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
@@ -45,7 +45,7 @@
  *
  *	This is called before we do cpu ident work
  */
- 
+
 int __cpuinit ppro_with_ram_bug(void)
 {
 	/* Uses data from early_cpu_detect now */
@@ -58,7 +58,7 @@
 	}
 	return 0;
 }
-	
+
 
 /*
  * P4 Xeon errata 037 workaround.
@@ -69,7 +69,7 @@
 	unsigned long lo, hi;
 
 	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
-		rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
+		rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
 		if ((lo & (1<<9)) == 0) {
 			printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
 			printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
@@ -127,10 +127,10 @@
 	 */
 	c->f00f_bug = 0;
 	if (!paravirt_enabled() && c->x86 == 5) {
-		static int f00f_workaround_enabled = 0;
+		static int f00f_workaround_enabled;
 
 		c->f00f_bug = 1;
-		if ( !f00f_workaround_enabled ) {
+		if (!f00f_workaround_enabled) {
 			trap_init_f00f_bug();
 			printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
 			f00f_workaround_enabled = 1;
@@ -139,20 +139,22 @@
 #endif
 
 	l2 = init_intel_cacheinfo(c);
-	if (c->cpuid_level > 9 ) {
+	if (c->cpuid_level > 9) {
 		unsigned eax = cpuid_eax(10);
 		/* Check for version and the number of counters */
 		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
-			set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
+			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
 	}
 
 	/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
 	if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
-		clear_bit(X86_FEATURE_SEP, c->x86_capability);
+		clear_cpu_cap(c, X86_FEATURE_SEP);
 
-	/* Names for the Pentium II/Celeron processors 
-	   detectable only by also checking the cache size.
-	   Dixon is NOT a Celeron. */
+	/*
+	 * Names for the Pentium II/Celeron processors
+	 * detectable only by also checking the cache size.
+	 * Dixon is NOT a Celeron.
+	 */
 	if (c->x86 == 6) {
 		switch (c->x86_model) {
 		case 5:
@@ -163,14 +165,14 @@
 					p = "Mobile Pentium II (Dixon)";
 			}
 			break;
-			
+
 		case 6:
 			if (l2 == 128)
 				p = "Celeron (Mendocino)";
 			else if (c->x86_mask == 0 || c->x86_mask == 5)
 				p = "Celeron-A";
 			break;
-			
+
 		case 8:
 			if (l2 == 128)
 				p = "Celeron (Coppermine)";
@@ -178,9 +180,9 @@
 		}
 	}
 
-	if ( p )
+	if (p)
 		strcpy(c->x86_model_id, p);
-	
+
 	c->x86_max_cores = num_cpu_cores(c);
 
 	detect_ht(c);
@@ -207,28 +209,29 @@
 #endif
 
 	if (cpu_has_xmm2)
-		set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability);
+		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 	if (c->x86 == 15) {
-		set_bit(X86_FEATURE_P4, c->x86_capability);
+		set_cpu_cap(c, X86_FEATURE_P4);
 	}
-	if (c->x86 == 6) 
-		set_bit(X86_FEATURE_P3, c->x86_capability);
+	if (c->x86 == 6)
+		set_cpu_cap(c, X86_FEATURE_P3);
 	if (cpu_has_ds) {
 		unsigned int l1;
 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
 		if (!(l1 & (1<<11)))
-			set_bit(X86_FEATURE_BTS, c->x86_capability);
+			set_cpu_cap(c, X86_FEATURE_BTS);
 		if (!(l1 & (1<<12)))
-			set_bit(X86_FEATURE_PEBS, c->x86_capability);
+			set_cpu_cap(c, X86_FEATURE_PEBS);
 	}
 
 	if (cpu_has_bts)
 		ds_init_intel(c);
 }
 
-static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
-	/* Intel PIII Tualatin. This comes in two flavours.
+	/*
+	 * Intel PIII Tualatin. This comes in two flavours.
 	 * One has 256kb of cache, the other 512. We have no way
 	 * to determine which, so we use a boottime override
 	 * for the 512kb model, and assume 256 otherwise.
@@ -240,42 +243,42 @@
 
 static struct cpu_dev intel_cpu_dev __cpuinitdata = {
 	.c_vendor	= "Intel",
-	.c_ident 	= { "GenuineIntel" },
+	.c_ident	= { "GenuineIntel" },
 	.c_models = {
-		{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 
-		  { 
-			  [0] = "486 DX-25/33", 
-			  [1] = "486 DX-50", 
-			  [2] = "486 SX", 
-			  [3] = "486 DX/2", 
-			  [4] = "486 SL", 
-			  [5] = "486 SX/2", 
-			  [7] = "486 DX/2-WB", 
-			  [8] = "486 DX/4", 
+		{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
+		  {
+			  [0] = "486 DX-25/33",
+			  [1] = "486 DX-50",
+			  [2] = "486 SX",
+			  [3] = "486 DX/2",
+			  [4] = "486 SL",
+			  [5] = "486 SX/2",
+			  [7] = "486 DX/2-WB",
+			  [8] = "486 DX/4",
 			  [9] = "486 DX/4-WB"
 		  }
 		},
 		{ .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
-		  { 
-			  [0] = "Pentium 60/66 A-step", 
-			  [1] = "Pentium 60/66", 
+		  {
+			  [0] = "Pentium 60/66 A-step",
+			  [1] = "Pentium 60/66",
 			  [2] = "Pentium 75 - 200",
-			  [3] = "OverDrive PODP5V83", 
+			  [3] = "OverDrive PODP5V83",
 			  [4] = "Pentium MMX",
-			  [7] = "Mobile Pentium 75 - 200", 
+			  [7] = "Mobile Pentium 75 - 200",
 			  [8] = "Mobile Pentium MMX"
 		  }
 		},
 		{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
-		  { 
+		  {
 			  [0] = "Pentium Pro A-step",
-			  [1] = "Pentium Pro", 
-			  [3] = "Pentium II (Klamath)", 
-			  [4] = "Pentium II (Deschutes)", 
-			  [5] = "Pentium II (Deschutes)", 
+			  [1] = "Pentium Pro",
+			  [3] = "Pentium II (Klamath)",
+			  [4] = "Pentium II (Deschutes)",
+			  [5] = "Pentium II (Deschutes)",
 			  [6] = "Mobile Pentium II",
-			  [7] = "Pentium III (Katmai)", 
-			  [8] = "Pentium III (Coppermine)", 
+			  [7] = "Pentium III (Katmai)",
+			  [8] = "Pentium III (Coppermine)",
 			  [10] = "Pentium III (Cascades)",
 			  [11] = "Pentium III (Tualatin)",
 		  }
@@ -290,15 +293,12 @@
 		  }
 		},
 	},
+	.c_early_init   = early_init_intel,
 	.c_init		= init_intel,
 	.c_size_cache	= intel_size_cache,
 };
 
-__init int intel_cpu_init(void)
-{
-	cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
-	return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
 
 #ifndef CONFIG_X86_CMPXCHG
 unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
@@ -364,5 +364,5 @@
 EXPORT_SYMBOL(cmpxchg_486_u64);
 #endif
 
-// arch_initcall(intel_cpu_init);
+/* arch_initcall(intel_cpu_init); */
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index a5182dc..774d87c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
@@ -10,20 +10,20 @@
 #include <linux/smp.h>
 #include <linux/thread_info.h>
 
-#include <asm/processor.h> 
+#include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/mce.h>
 
 #include "mce.h"
 
-int mce_disabled = 0;
+int mce_disabled;
 int nr_mce_banks;
 
 EXPORT_SYMBOL_GPL(nr_mce_banks);	/* non-fatal.o */
 
 /* Handle unconfigured int18 (should never happen) */
-static void unexpected_machine_check(struct pt_regs * regs, long error_code)
-{	
+static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+{
 	printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id());
 }
 
@@ -33,30 +33,30 @@
 /* This has to be run for each processor */
 void mcheck_init(struct cpuinfo_x86 *c)
 {
-	if (mce_disabled==1)
+	if (mce_disabled == 1)
 		return;
 
 	switch (c->x86_vendor) {
-		case X86_VENDOR_AMD:
-			amd_mcheck_init(c);
-			break;
+	case X86_VENDOR_AMD:
+		amd_mcheck_init(c);
+		break;
 
-		case X86_VENDOR_INTEL:
-			if (c->x86==5)
-				intel_p5_mcheck_init(c);
-			if (c->x86==6)
-				intel_p6_mcheck_init(c);
-			if (c->x86==15)
-				intel_p4_mcheck_init(c);
-			break;
+	case X86_VENDOR_INTEL:
+		if (c->x86 == 5)
+			intel_p5_mcheck_init(c);
+		if (c->x86 == 6)
+			intel_p6_mcheck_init(c);
+		if (c->x86 == 15)
+			intel_p4_mcheck_init(c);
+		break;
 
-		case X86_VENDOR_CENTAUR:
-			if (c->x86==5)
-				winchip_mcheck_init(c);
-			break;
+	case X86_VENDOR_CENTAUR:
+		if (c->x86 == 5)
+			winchip_mcheck_init(c);
+		break;
 
-		default:
-			break;
+	default:
+		break;
 	}
 }
 
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c
index bf39409..00ccb6c 100644
--- a/arch/x86/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c
@@ -16,7 +16,7 @@
 #include <linux/smp.h>
 #include <linux/module.h>
 
-#include <asm/processor.h> 
+#include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/msr.h>
 
@@ -26,23 +26,26 @@
 
 #define MCE_RATE	15*HZ	/* timer rate is 15s */
 
-static void mce_checkregs (void *info)
+static void mce_checkregs(void *info)
 {
 	u32 low, high;
 	int i;
 
-	for (i=firstbank; i<nr_mce_banks; i++) {
-		rdmsr (MSR_IA32_MC0_STATUS+i*4, low, high);
+	for (i = firstbank; i < nr_mce_banks; i++) {
+		rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
 
 		if (high & (1<<31)) {
 			printk(KERN_INFO "MCE: The hardware reports a non "
 				"fatal, correctable incident occurred on "
 				"CPU %d.\n",
 				smp_processor_id());
-			printk (KERN_INFO "Bank %d: %08x%08x\n", i, high, low);
+			printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low);
 
-			/* Scrub the error so we don't pick it up in MCE_RATE seconds time. */
-			wrmsr (MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
+			/*
+			 * Scrub the error so we don't pick it up in MCE_RATE
+			 * seconds time.
+			 */
+			wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
 
 			/* Serialize */
 			wmb();
@@ -55,10 +58,10 @@
 static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
 
 static void mce_work_fn(struct work_struct *work)
-{ 
+{
 	on_each_cpu(mce_checkregs, NULL, 1, 1);
 	schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
-} 
+}
 
 static int __init init_nonfatal_mce_checker(void)
 {
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index a18310a..bfa5817 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -9,20 +9,20 @@
 #include <linux/interrupt.h>
 #include <linux/smp.h>
 
-#include <asm/processor.h> 
+#include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/msr.h>
 
 #include "mce.h"
 
 /* Machine check handler for Pentium class Intel */
-static void pentium_machine_check(struct pt_regs * regs, long error_code)
+static void pentium_machine_check(struct pt_regs *regs, long error_code)
 {
 	u32 loaddr, hi, lotype;
 	rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
 	rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
 	printk(KERN_EMERG "CPU#%d: Machine Check Exception:  0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype);
-	if(lotype&(1<<5))
+	if (lotype&(1<<5))
 		printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id());
 	add_taint(TAINT_MACHINE_CHECK);
 }
@@ -31,13 +31,13 @@
 void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
 {
 	u32 l, h;
-	
+
 	/*Check for MCE support */
-	if( !cpu_has(c, X86_FEATURE_MCE) )
-		return;	
+	if (!cpu_has(c, X86_FEATURE_MCE))
+		return;
 
 	/* Default P5 to off as its often misconnected */
-	if(mce_disabled != -1)
+	if (mce_disabled != -1)
 		return;
 	machine_check_vector = pentium_machine_check;
 	wmb();
@@ -47,7 +47,7 @@
 	rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
 	printk(KERN_INFO "Intel old style machine check architecture supported.\n");
 
- 	/* Enable MCE */
+	/* Enable MCE */
 	set_in_cr4(X86_CR4_MCE);
 	printk(KERN_INFO "Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id());
 }
diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c
index 7434260..62efc9c 100644
--- a/arch/x86/kernel/cpu/mcheck/p6.c
+++ b/arch/x86/kernel/cpu/mcheck/p6.c
@@ -9,23 +9,23 @@
 #include <linux/interrupt.h>
 #include <linux/smp.h>
 
-#include <asm/processor.h> 
+#include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/msr.h>
 
 #include "mce.h"
 
 /* Machine Check Handler For PII/PIII */
-static void intel_machine_check(struct pt_regs * regs, long error_code)
+static void intel_machine_check(struct pt_regs *regs, long error_code)
 {
-	int recover=1;
+	int recover = 1;
 	u32 alow, ahigh, high, low;
 	u32 mcgstl, mcgsth;
 	int i;
 
-	rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
+	rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
 	if (mcgstl & (1<<0))	/* Recoverable ? */
-		recover=0;
+		recover = 0;
 
 	printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
 		smp_processor_id(), mcgsth, mcgstl);
@@ -55,30 +55,30 @@
 	}
 
 	if (recover & 2)
-		panic ("CPU context corrupt");
+		panic("CPU context corrupt");
 	if (recover & 1)
-		panic ("Unable to continue");
+		panic("Unable to continue");
 
-	printk (KERN_EMERG "Attempting to continue.\n");
-	/* 
-	 * Do not clear the MSR_IA32_MCi_STATUS if the error is not 
+	printk(KERN_EMERG "Attempting to continue.\n");
+	/*
+	 * Do not clear the MSR_IA32_MCi_STATUS if the error is not
 	 * recoverable/continuable.This will allow BIOS to look at the MSRs
 	 * for errors if the OS could not log the error.
 	 */
-	for (i=0; i<nr_mce_banks; i++) {
+	for (i = 0; i < nr_mce_banks; i++) {
 		unsigned int msr;
 		msr = MSR_IA32_MC0_STATUS+i*4;
-		rdmsr (msr,low, high);
+		rdmsr(msr, low, high);
 		if (high & (1<<31)) {
 			/* Clear it */
-			wrmsr (msr, 0UL, 0UL);
+			wrmsr(msr, 0UL, 0UL);
 			/* Serialize */
 			wmb();
 			add_taint(TAINT_MACHINE_CHECK);
 		}
 	}
 	mcgstl &= ~(1<<2);
-	wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth);
+	wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
 }
 
 /* Set up machine check reporting for processors with Intel style MCE */
@@ -86,21 +86,21 @@
 {
 	u32 l, h;
 	int i;
-	
+
 	/* Check for MCE support */
 	if (!cpu_has(c, X86_FEATURE_MCE))
 		return;
 
 	/* Check for PPro style MCA */
- 	if (!cpu_has(c, X86_FEATURE_MCA))
+	if (!cpu_has(c, X86_FEATURE_MCA))
 		return;
 
 	/* Ok machine check is available */
 	machine_check_vector = intel_machine_check;
 	wmb();
 
-	printk (KERN_INFO "Intel machine check architecture supported.\n");
-	rdmsr (MSR_IA32_MCG_CAP, l, h);
+	printk(KERN_INFO "Intel machine check architecture supported.\n");
+	rdmsr(MSR_IA32_MCG_CAP, l, h);
 	if (l & (1<<8))	/* Control register present ? */
 		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
 	nr_mce_banks = l & 0xff;
@@ -110,13 +110,13 @@
 	 * - MC0_CTL should not be written
 	 * - Status registers on all banks should be cleared on reset
 	 */
-	for (i=1; i<nr_mce_banks; i++)
-		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
+	for (i = 1; i < nr_mce_banks; i++)
+		wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
 
-	for (i=0; i<nr_mce_banks; i++)
-		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
+	for (i = 0; i < nr_mce_banks; i++)
+		wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
 
-	set_in_cr4 (X86_CR4_MCE);
-	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
+	set_in_cr4(X86_CR4_MCE);
+	printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
 		smp_processor_id());
 }
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 3d428d5..f2be3e1 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -8,14 +8,14 @@
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 
-#include <asm/processor.h> 
+#include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/msr.h>
 
 #include "mce.h"
 
 /* Machine check handler for WinChip C6 */
-static void winchip_machine_check(struct pt_regs * regs, long error_code)
+static void winchip_machine_check(struct pt_regs *regs, long error_code)
 {
 	printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
 	add_taint(TAINT_MACHINE_CHECK);
@@ -28,8 +28,8 @@
 	machine_check_vector = winchip_machine_check;
 	wmb();
 	rdmsr(MSR_IDT_FCR1, lo, hi);
-	lo|= (1<<2);	/* Enable EIERRINT (int 18 MCE) */
-	lo&= ~(1<<4);	/* Enable MCE */
+	lo |= (1<<2);	/* Enable EIERRINT (int 18 MCE) */
+	lo &= ~(1<<4);	/* Enable MCE */
 	wrmsr(MSR_IDT_FCR1, lo, hi);
 	set_in_cr4(X86_CR4_MCE);
 	printk(KERN_INFO "Winchip machine check reporting enabled on CPU#0.\n");
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 3e18db4..353efe4 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -11,6 +11,7 @@
 #include <asm/cpufeature.h>
 #include <asm/processor-flags.h>
 #include <asm/tlbflush.h>
+#include <asm/pat.h>
 #include "mtrr.h"
 
 struct mtrr_state {
@@ -35,6 +36,8 @@
 
 static unsigned long smp_changes_mask;
 static struct mtrr_state mtrr_state = {};
+static int mtrr_state_set;
+static u64 tom2;
 
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "mtrr."
@@ -42,6 +45,111 @@
 static int mtrr_show;
 module_param_named(show, mtrr_show, bool, 0);
 
+/*
+ * Returns the effective MTRR type for the region
+ * Error returns:
+ * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
+ * - 0xFF - when MTRR is not enabled
+ */
+u8 mtrr_type_lookup(u64 start, u64 end)
+{
+	int i;
+	u64 base, mask;
+	u8 prev_match, curr_match;
+
+	if (!mtrr_state_set)
+		return 0xFF;
+
+	if (!mtrr_state.enabled)
+		return 0xFF;
+
+	/* Make end inclusive end, instead of exclusive */
+	end--;
+
+	/* Look in fixed ranges. Just return the type as per start */
+	if (mtrr_state.have_fixed && (start < 0x100000)) {
+		int idx;
+
+		if (start < 0x80000) {
+			idx = 0;
+			idx += (start >> 16);
+			return mtrr_state.fixed_ranges[idx];
+		} else if (start < 0xC0000) {
+			idx = 1 * 8;
+			idx += ((start - 0x80000) >> 14);
+			return mtrr_state.fixed_ranges[idx];
+		} else if (start < 0x1000000) {
+			idx = 3 * 8;
+			idx += ((start - 0xC0000) >> 12);
+			return mtrr_state.fixed_ranges[idx];
+		}
+	}
+
+	/*
+	 * Look in variable ranges
+	 * Look of multiple ranges matching this address and pick type
+	 * as per MTRR precedence
+	 */
+	if (!mtrr_state.enabled & 2) {
+		return mtrr_state.def_type;
+	}
+
+	prev_match = 0xFF;
+	for (i = 0; i < num_var_ranges; ++i) {
+		unsigned short start_state, end_state;
+
+		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
+			continue;
+
+		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
+		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
+		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
+		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
+
+		start_state = ((start & mask) == (base & mask));
+		end_state = ((end & mask) == (base & mask));
+		if (start_state != end_state)
+			return 0xFE;
+
+		if ((start & mask) != (base & mask)) {
+			continue;
+		}
+
+		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
+		if (prev_match == 0xFF) {
+			prev_match = curr_match;
+			continue;
+		}
+
+		if (prev_match == MTRR_TYPE_UNCACHABLE ||
+		    curr_match == MTRR_TYPE_UNCACHABLE) {
+			return MTRR_TYPE_UNCACHABLE;
+		}
+
+		if ((prev_match == MTRR_TYPE_WRBACK &&
+		     curr_match == MTRR_TYPE_WRTHROUGH) ||
+		    (prev_match == MTRR_TYPE_WRTHROUGH &&
+		     curr_match == MTRR_TYPE_WRBACK)) {
+			prev_match = MTRR_TYPE_WRTHROUGH;
+			curr_match = MTRR_TYPE_WRTHROUGH;
+		}
+
+		if (prev_match != curr_match) {
+			return MTRR_TYPE_UNCACHABLE;
+		}
+	}
+
+	if (tom2) {
+		if (start >= (1ULL<<32) && (end < tom2))
+			return MTRR_TYPE_WRBACK;
+	}
+
+	if (prev_match != 0xFF)
+		return prev_match;
+
+	return mtrr_state.def_type;
+}
+
 /*  Get the MSR pair relating to a var range  */
 static void
 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
@@ -79,12 +187,16 @@
 			base, base + step - 1, mtrr_attrib_to_str(*types));
 }
 
+static void prepare_set(void);
+static void post_set(void);
+
 /*  Grab all of the MTRR state for this CPU into *state  */
 void __init get_mtrr_state(void)
 {
 	unsigned int i;
 	struct mtrr_var_range *vrs;
 	unsigned lo, dummy;
+	unsigned long flags;
 
 	vrs = mtrr_state.var_ranges;
 
@@ -100,6 +212,15 @@
 	mtrr_state.def_type = (lo & 0xff);
 	mtrr_state.enabled = (lo & 0xc00) >> 10;
 
+	if (amd_special_default_mtrr()) {
+		unsigned lo, hi;
+		/* TOP_MEM2 */
+		rdmsr(MSR_K8_TOP_MEM2, lo, hi);
+		tom2 = hi;
+		tom2 <<= 32;
+		tom2 |= lo;
+		tom2 &= 0xffffff8000000ULL;
+	}
 	if (mtrr_show) {
 		int high_width;
 
@@ -130,7 +251,22 @@
 			else
 				printk(KERN_INFO "MTRR %u disabled\n", i);
 		}
+		if (tom2) {
+			printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
+					  tom2, tom2>>20);
+		}
 	}
+	mtrr_state_set = 1;
+
+	/* PAT setup for BP. We need to go through sync steps here */
+	local_irq_save(flags);
+	prepare_set();
+
+	pat_init();
+
+	post_set();
+	local_irq_restore(flags);
+
 }
 
 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
@@ -397,6 +533,9 @@
 	/* Actually set the state */
 	mask = set_mtrr_state();
 
+	/* also set PAT */
+	pat_init();
+
 	post_set();
 	local_irq_restore(flags);
 
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 91e150a..1960f19 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -424,11 +424,10 @@
 		return -ENODEV;
 
 	proc_root_mtrr =
-	    create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root);
-	if (proc_root_mtrr) {
+		proc_create("mtrr", S_IWUSR | S_IRUGO, &proc_root, &mtrr_fops);
+
+	if (proc_root_mtrr)
 		proc_root_mtrr->owner = THIS_MODULE;
-		proc_root_mtrr->proc_fops = &mtrr_fops;
-	}
 	return 0;
 }
 
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index a6450b3..6a1e278 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -627,7 +627,7 @@
 #define Tom2Enabled (1U << 21)
 #define Tom2ForceMemTypeWB (1U << 22)
 
-static __init int amd_special_default_mtrr(void)
+int __init amd_special_default_mtrr(void)
 {
 	u32 l, h;
 
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c
index 9f8ba92..7f7e275 100644
--- a/arch/x86/kernel/cpu/mtrr/state.c
+++ b/arch/x86/kernel/cpu/mtrr/state.c
@@ -19,13 +19,15 @@
 	if (use_intel() || is_cpu(CYRIX)) {
 
 		/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
-		if ( cpu_has_pge ) {
+		if (cpu_has_pge) {
 			ctxt->cr4val = read_cr4();
 			write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
 		}
 
-		/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
-		    a side-effect  */
+		/*
+		 * Disable and flush caches. Note that wbinvd flushes the TLBs
+		 * as a side-effect
+		 */
 		cr0 = read_cr0() | X86_CR0_CD;
 		wbinvd();
 		write_cr0(cr0);
@@ -42,7 +44,7 @@
 
 void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
 {
-	if (use_intel()) 
+	if (use_intel())
 		/*  Disable MTRRs, and set the default type to uncached  */
 		mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
 		      ctxt->deftype_hi);
@@ -66,12 +68,12 @@
 		else
 			/* Cyrix ARRs - everything else was excluded at the top */
 			setCx86(CX86_CCR3, ctxt->ccr3);
-		
+
 		/*  Enable caches  */
 		write_cr0(read_cr0() & 0xbfffffff);
 
 		/*  Restore value of CR4  */
-		if ( cpu_has_pge )
+		if (cpu_has_pge)
 			write_cr4(ctxt->cr4val);
 	}
 	/*  Re-enable interrupts locally (if enabled previously)  */
diff --git a/arch/x86/kernel/cpu/nexgen.c b/arch/x86/kernel/cpu/nexgen.c
index 961fbe1..5d5e1c1 100644
--- a/arch/x86/kernel/cpu/nexgen.c
+++ b/arch/x86/kernel/cpu/nexgen.c
@@ -9,11 +9,11 @@
  *	Detect a NexGen CPU running without BIOS hypercode new enough
  *	to have CPUID. (Thanks to Herbert Oppmann)
  */
- 
+
 static int __cpuinit deep_magic_nexgen_probe(void)
 {
 	int ret;
-	
+
 	__asm__ __volatile__ (
 		"	movw	$0x5555, %%ax\n"
 		"	xorw	%%dx,%%dx\n"
@@ -22,22 +22,21 @@
 		"	movl	$0, %%eax\n"
 		"	jnz	1f\n"
 		"	movl	$1, %%eax\n"
-		"1:\n" 
-		: "=a" (ret) : : "cx", "dx" );
+		"1:\n"
+		: "=a" (ret) : : "cx", "dx");
 	return  ret;
 }
 
-static void __cpuinit init_nexgen(struct cpuinfo_x86 * c)
+static void __cpuinit init_nexgen(struct cpuinfo_x86 *c)
 {
 	c->x86_cache_size = 256; /* A few had 1 MB... */
 }
 
-static void __cpuinit nexgen_identify(struct cpuinfo_x86 * c)
+static void __cpuinit nexgen_identify(struct cpuinfo_x86 *c)
 {
 	/* Detect NexGen with old hypercode */
-	if ( deep_magic_nexgen_probe() ) {
+	if (deep_magic_nexgen_probe())
 		strcpy(c->x86_vendor_id, "NexGenDriven");
-	}
 }
 
 static struct cpu_dev nexgen_cpu_dev __cpuinitdata = {
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index af11d31..0978a4a 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -8,78 +8,139 @@
 /*
  *	Get CPU information for use by the procfs.
  */
+#ifdef CONFIG_X86_32
+static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
+			      unsigned int cpu)
+{
+#ifdef CONFIG_X86_HT
+	if (c->x86_max_cores * smp_num_siblings > 1) {
+		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
+		seq_printf(m, "siblings\t: %d\n",
+			   cpus_weight(per_cpu(cpu_core_map, cpu)));
+		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
+		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
+		seq_printf(m, "apicid\t\t: %d\n", c->apicid);
+		seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
+	}
+#endif
+}
+
+static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
+{
+	/*
+	 * We use exception 16 if we have hardware math and we've either seen
+	 * it or the CPU claims it is internal
+	 */
+	int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
+	seq_printf(m,
+		   "fdiv_bug\t: %s\n"
+		   "hlt_bug\t\t: %s\n"
+		   "f00f_bug\t: %s\n"
+		   "coma_bug\t: %s\n"
+		   "fpu\t\t: %s\n"
+		   "fpu_exception\t: %s\n"
+		   "cpuid level\t: %d\n"
+		   "wp\t\t: %s\n",
+		   c->fdiv_bug ? "yes" : "no",
+		   c->hlt_works_ok ? "no" : "yes",
+		   c->f00f_bug ? "yes" : "no",
+		   c->coma_bug ? "yes" : "no",
+		   c->hard_math ? "yes" : "no",
+		   fpu_exception ? "yes" : "no",
+		   c->cpuid_level,
+		   c->wp_works_ok ? "yes" : "no");
+}
+#else
+static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
+			      unsigned int cpu)
+{
+#ifdef CONFIG_SMP
+	if (c->x86_max_cores * smp_num_siblings > 1) {
+		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
+		seq_printf(m, "siblings\t: %d\n",
+			   cpus_weight(per_cpu(cpu_core_map, cpu)));
+		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
+		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
+		seq_printf(m, "apicid\t\t: %d\n", c->apicid);
+		seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
+	}
+#endif
+}
+
+static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
+{
+	seq_printf(m,
+		   "fpu\t\t: yes\n"
+		   "fpu_exception\t: yes\n"
+		   "cpuid level\t: %d\n"
+		   "wp\t\t: yes\n",
+		   c->cpuid_level);
+}
+#endif
+
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
 	struct cpuinfo_x86 *c = v;
-	int i, n = 0;
-	int fpu_exception;
+	unsigned int cpu = 0;
+	int i;
 
 #ifdef CONFIG_SMP
-	n = c->cpu_index;
+	cpu = c->cpu_index;
 #endif
-	seq_printf(m, "processor\t: %d\n"
-		"vendor_id\t: %s\n"
-		"cpu family\t: %d\n"
-		"model\t\t: %d\n"
-		"model name\t: %s\n",
-		n,
-		c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
-		c->x86,
-		c->x86_model,
-		c->x86_model_id[0] ? c->x86_model_id : "unknown");
+	seq_printf(m, "processor\t: %u\n"
+		   "vendor_id\t: %s\n"
+		   "cpu family\t: %d\n"
+		   "model\t\t: %u\n"
+		   "model name\t: %s\n",
+		   cpu,
+		   c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
+		   c->x86,
+		   c->x86_model,
+		   c->x86_model_id[0] ? c->x86_model_id : "unknown");
 
 	if (c->x86_mask || c->cpuid_level >= 0)
 		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
 	else
 		seq_printf(m, "stepping\t: unknown\n");
 
-	if ( cpu_has(c, X86_FEATURE_TSC) ) {
-		unsigned int freq = cpufreq_quick_get(n);
+	if (cpu_has(c, X86_FEATURE_TSC)) {
+		unsigned int freq = cpufreq_quick_get(cpu);
+
 		if (!freq)
 			freq = cpu_khz;
 		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-			freq / 1000, (freq % 1000));
+			   freq / 1000, (freq % 1000));
 	}
 
 	/* Cache size */
 	if (c->x86_cache_size >= 0)
 		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-#ifdef CONFIG_X86_HT
-	if (c->x86_max_cores * smp_num_siblings > 1) {
-		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-		seq_printf(m, "siblings\t: %d\n",
-				cpus_weight(per_cpu(cpu_core_map, n)));
-		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
-		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
-	}
-#endif
-	
-	/* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
-	fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
-	seq_printf(m, "fdiv_bug\t: %s\n"
-			"hlt_bug\t\t: %s\n"
-			"f00f_bug\t: %s\n"
-			"coma_bug\t: %s\n"
-			"fpu\t\t: %s\n"
-			"fpu_exception\t: %s\n"
-			"cpuid level\t: %d\n"
-			"wp\t\t: %s\n"
-			"flags\t\t:",
-		     c->fdiv_bug ? "yes" : "no",
-		     c->hlt_works_ok ? "no" : "yes",
-		     c->f00f_bug ? "yes" : "no",
-		     c->coma_bug ? "yes" : "no",
-		     c->hard_math ? "yes" : "no",
-		     fpu_exception ? "yes" : "no",
-		     c->cpuid_level,
-		     c->wp_works_ok ? "yes" : "no");
 
-	for ( i = 0 ; i < 32*NCAPINTS ; i++ )
-		if ( test_bit(i, c->x86_capability) &&
-		     x86_cap_flags[i] != NULL )
+	show_cpuinfo_core(m, c, cpu);
+	show_cpuinfo_misc(m, c);
+
+	seq_printf(m, "flags\t\t:");
+	for (i = 0; i < 32*NCAPINTS; i++)
+		if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
 			seq_printf(m, " %s", x86_cap_flags[i]);
 
-	for (i = 0; i < 32; i++)
+	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
+		   c->loops_per_jiffy/(500000/HZ),
+		   (c->loops_per_jiffy/(5000/HZ)) % 100);
+
+#ifdef CONFIG_X86_64
+	if (c->x86_tlbsize > 0)
+		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
+#endif
+	seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
+#ifdef CONFIG_X86_64
+	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
+	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
+		   c->x86_phys_bits, c->x86_virt_bits);
+#endif
+
+	seq_printf(m, "power management:");
+	for (i = 0; i < 32; i++) {
 		if (c->x86_power & (1 << i)) {
 			if (i < ARRAY_SIZE(x86_power_flags) &&
 			    x86_power_flags[i])
@@ -89,11 +150,9 @@
 			else
 				seq_printf(m, " [%d]", i);
 		}
+	}
 
-	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
-		     c->loops_per_jiffy/(500000/HZ),
-		     (c->loops_per_jiffy/(5000/HZ)) % 100);
-	seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
+	seq_printf(m, "\n\n");
 
 	return 0;
 }
@@ -106,14 +165,17 @@
 		return &cpu_data(*pos);
 	return NULL;
 }
+
 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 {
 	*pos = next_cpu(*pos, cpu_online_map);
 	return c_start(m, pos);
 }
+
 static void c_stop(struct seq_file *m, void *v)
 {
 }
+
 const struct seq_operations cpuinfo_op = {
 	.start	= c_start,
 	.next	= c_next,
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index e8b422c..b911a2c 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -18,8 +18,8 @@
 	/* Print CMS and CPU revision */
 	max = cpuid_eax(0x80860000);
 	cpu_rev = 0;
-	if ( max >= 0x80860001 ) {
-		cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); 
+	if (max >= 0x80860001) {
+		cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
 		if (cpu_rev != 0x02000000) {
 			printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
 				(cpu_rev >> 24) & 0xff,
@@ -29,7 +29,7 @@
 				cpu_freq);
 		}
 	}
-	if ( max >= 0x80860002 ) {
+	if (max >= 0x80860002) {
 		cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
 		if (cpu_rev == 0x02000000) {
 			printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
@@ -42,7 +42,7 @@
 		       cms_rev1 & 0xff,
 		       cms_rev2);
 	}
-	if ( max >= 0x80860006 ) {
+	if (max >= 0x80860006) {
 		cpuid(0x80860003,
 		      (void *)&cpu_info[0],
 		      (void *)&cpu_info[4],
@@ -74,23 +74,25 @@
 	wrmsr(0x80860004, cap_mask, uk);
 
 	/* All Transmeta CPUs have a constant TSC */
-	set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
-	
+	set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+
 #ifdef CONFIG_SYSCTL
-	/* randomize_va_space slows us down enormously;
-	   it probably triggers retranslation of x86->native bytecode */
+	/*
+	 * randomize_va_space slows us down enormously;
+	 * it probably triggers retranslation of x86->native bytecode
+	 */
 	randomize_va_space = 0;
 #endif
 }
 
-static void __cpuinit transmeta_identify(struct cpuinfo_x86 * c)
+static void __cpuinit transmeta_identify(struct cpuinfo_x86 *c)
 {
 	u32 xlvl;
 
 	/* Transmeta-defined flags: level 0x80860001 */
 	xlvl = cpuid_eax(0x80860000);
-	if ( (xlvl & 0xffff0000) == 0x80860000 ) {
-		if (  xlvl >= 0x80860001 )
+	if ((xlvl & 0xffff0000) == 0x80860000) {
+		if (xlvl >= 0x80860001)
 			c->x86_capability[2] = cpuid_edx(0x80860001);
 	}
 }
@@ -102,8 +104,4 @@
 	.c_identify	= transmeta_identify,
 };
 
-int __init transmeta_init_cpu(void)
-{
-	cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
-	return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev);
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index a7a4e75..b1fc909 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -3,24 +3,23 @@
 #include <asm/processor.h>
 #include "cpu.h"
 
-/* UMC chips appear to be only either 386 or 486, so no special init takes place.
+/*
+ * UMC chips appear to be only either 386 or 486,
+ * so no special init takes place.
  */
 
 static struct cpu_dev umc_cpu_dev __cpuinitdata = {
 	.c_vendor	= "UMC",
-	.c_ident 	= { "UMC UMC UMC" },
+	.c_ident	= { "UMC UMC UMC" },
 	.c_models = {
 		{ .vendor = X86_VENDOR_UMC, .family = 4, .model_names =
-		  { 
-			  [1] = "U5D", 
-			  [2] = "U5S", 
+		  {
+			  [1] = "U5D",
+			  [2] = "U5S",
 		  }
 		},
 	},
 };
 
-int __init umc_init_cpu(void)
-{
-	cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev;
-	return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev);
+
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 9a5fa0a..2251d0a 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -26,11 +26,7 @@
 #include <linux/kdebug.h>
 #include <asm/smp.h>
 
-#ifdef CONFIG_X86_32
 #include <mach_ipi.h>
-#else
-#include <asm/mach_apic.h>
-#endif
 
 /* This keeps a track of which one is crashing cpu. */
 static int crashing_cpu;
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index dcd918c..11c11b8 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -220,11 +220,11 @@
 
 int ds_free(void **dsp)
 {
-	if (*dsp)
+	if (*dsp) {
 		kfree((void *)get_bts_buffer_base(*dsp));
-	kfree(*dsp);
-	*dsp = NULL;
-
+		kfree(*dsp);
+		*dsp = NULL;
+	}
 	return 0;
 }
 
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
index 80444c5..0240cd7 100644
--- a/arch/x86/kernel/e820_32.c
+++ b/arch/x86/kernel/e820_32.c
@@ -450,38 +450,25 @@
  * thinkpad 560x, for example, does not cooperate with the memory
  * detection code.)
  */
-int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+int __init copy_e820_map(struct e820entry *biosmap, int nr_map)
 {
 	/* Only one memory region (or negative)? Ignore it */
 	if (nr_map < 2)
 		return -1;
 
 	do {
-		unsigned long long start = biosmap->addr;
-		unsigned long long size = biosmap->size;
-		unsigned long long end = start + size;
-		unsigned long type = biosmap->type;
+		u64 start = biosmap->addr;
+		u64 size = biosmap->size;
+		u64 end = start + size;
+		u32 type = biosmap->type;
 
 		/* Overflow in 64 bits? Ignore the memory map. */
 		if (start > end)
 			return -1;
 
-		/*
-		 * Some BIOSes claim RAM in the 640k - 1M region.
-		 * Not right. Fix it up.
-		 */
-		if (type == E820_RAM) {
-			if (start < 0x100000ULL && end > 0xA0000ULL) {
-				if (start < 0xA0000ULL)
-					add_memory_region(start, 0xA0000ULL-start, type);
-				if (end <= 0x100000ULL)
-					continue;
-				start = 0x100000ULL;
-				size = end - start;
-			}
-		}
 		add_memory_region(start, size, type);
-	} while (biosmap++,--nr_map);
+	} while (biosmap++, --nr_map);
+
 	return 0;
 }
 
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index 9be6971..7f6c0c8 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -27,6 +27,7 @@
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/kdebug.h>
+#include <asm/trampoline.h>
 
 struct e820map e820;
 
@@ -36,11 +37,11 @@
 unsigned long end_pfn;
 
 /*
- * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
- * The direct mapping extends to end_pfn_map, so that we can directly access
+ * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
+ * The direct mapping extends to max_pfn_mapped, so that we can directly access
  * apertures, ACPI and other tables without having to play with fixmaps.
  */
-unsigned long end_pfn_map;
+unsigned long max_pfn_mapped;
 
 /*
  * Last pfn which the user wants to use.
@@ -58,8 +59,8 @@
 };
 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
 	{ 0, PAGE_SIZE, "BIOS data page" },			/* BIOS data page */
-#ifdef CONFIG_SMP
-	{ SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE, "SMP_TRAMPOLINE" },
+#ifdef CONFIG_X86_TRAMPOLINE
+	{ TRAMPOLINE_BASE, TRAMPOLINE_BASE + 2 * PAGE_SIZE, "TRAMPOLINE" },
 #endif
 	{}
 };
@@ -95,7 +96,8 @@
 }
 
 /* Check for already reserved areas */
-static inline int bad_addr(unsigned long *addrp, unsigned long size)
+static inline int
+bad_addr(unsigned long *addrp, unsigned long size, unsigned long align)
 {
 	int i;
 	unsigned long addr = *addrp, last;
@@ -105,7 +107,7 @@
 	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
 		struct early_res *r = &early_res[i];
 		if (last >= r->start && addr < r->end) {
-			*addrp = addr = r->end;
+			*addrp = addr = round_up(r->end, align);
 			changed = 1;
 			goto again;
 		}
@@ -113,6 +115,40 @@
 	return changed;
 }
 
+/* Check for already reserved areas */
+static inline int
+bad_addr_size(unsigned long *addrp, unsigned long *sizep, unsigned long align)
+{
+	int i;
+	unsigned long addr = *addrp, last;
+	unsigned long size = *sizep;
+	int changed = 0;
+again:
+	last = addr + size;
+	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
+		struct early_res *r = &early_res[i];
+		if (last > r->start && addr < r->start) {
+			size = r->start - addr;
+			changed = 1;
+			goto again;
+		}
+		if (last > r->end && addr < r->end) {
+			addr = round_up(r->end, align);
+			size = last - addr;
+			changed = 1;
+			goto again;
+		}
+		if (last <= r->end && addr >= r->start) {
+			(*sizep)++;
+			return 0;
+		}
+	}
+	if (changed) {
+		*addrp = addr;
+		*sizep = size;
+	}
+	return changed;
+}
 /*
  * This function checks if any part of the range <start,end> is mapped
  * with type.
@@ -174,26 +210,27 @@
  * Find a free area with specified alignment in a specific range.
  */
 unsigned long __init find_e820_area(unsigned long start, unsigned long end,
-				    unsigned size, unsigned long align)
+				    unsigned long size, unsigned long align)
 {
 	int i;
-	unsigned long mask = ~(align - 1);
 
 	for (i = 0; i < e820.nr_map; i++) {
 		struct e820entry *ei = &e820.map[i];
-		unsigned long addr = ei->addr, last;
+		unsigned long addr, last;
+		unsigned long ei_last;
 
 		if (ei->type != E820_RAM)
 			continue;
+		addr = round_up(ei->addr, align);
+		ei_last = ei->addr + ei->size;
 		if (addr < start)
-			addr = start;
-		if (addr > ei->addr + ei->size)
+			addr = round_up(start, align);
+		if (addr >= ei_last)
 			continue;
-		while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
+		while (bad_addr(&addr, size, align) && addr+size <= ei_last)
 			;
-		addr = (addr + align - 1) & mask;
 		last = addr + size;
-		if (last > ei->addr + ei->size)
+		if (last > ei_last)
 			continue;
 		if (last > end)
 			continue;
@@ -203,6 +240,40 @@
 }
 
 /*
+ * Find next free range after *start
+ */
+unsigned long __init find_e820_area_size(unsigned long start,
+					 unsigned long *sizep,
+					 unsigned long align)
+{
+	int i;
+
+	for (i = 0; i < e820.nr_map; i++) {
+		struct e820entry *ei = &e820.map[i];
+		unsigned long addr, last;
+		unsigned long ei_last;
+
+		if (ei->type != E820_RAM)
+			continue;
+		addr = round_up(ei->addr, align);
+		ei_last = ei->addr + ei->size;
+		if (addr < start)
+			addr = round_up(start, align);
+		if (addr >= ei_last)
+			continue;
+		*sizep = ei_last - addr;
+		while (bad_addr_size(&addr, sizep, align) &&
+			addr + *sizep <= ei_last)
+			;
+		last = addr + *sizep;
+		if (last > ei_last)
+			continue;
+		return addr;
+	}
+	return -1UL;
+
+}
+/*
  * Find the highest page frame number we have available
  */
 unsigned long __init e820_end_of_ram(void)
@@ -211,29 +282,29 @@
 
 	end_pfn = find_max_pfn_with_active_regions();
 
-	if (end_pfn > end_pfn_map)
-		end_pfn_map = end_pfn;
-	if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
-		end_pfn_map = MAXMEM>>PAGE_SHIFT;
+	if (end_pfn > max_pfn_mapped)
+		max_pfn_mapped = end_pfn;
+	if (max_pfn_mapped > MAXMEM>>PAGE_SHIFT)
+		max_pfn_mapped = MAXMEM>>PAGE_SHIFT;
 	if (end_pfn > end_user_pfn)
 		end_pfn = end_user_pfn;
-	if (end_pfn > end_pfn_map)
-		end_pfn = end_pfn_map;
+	if (end_pfn > max_pfn_mapped)
+		end_pfn = max_pfn_mapped;
 
-	printk(KERN_INFO "end_pfn_map = %lu\n", end_pfn_map);
+	printk(KERN_INFO "max_pfn_mapped = %lu\n", max_pfn_mapped);
 	return end_pfn;
 }
 
 /*
  * Mark e820 reserved areas as busy for the resource manager.
  */
-void __init e820_reserve_resources(struct resource *code_resource,
-		struct resource *data_resource, struct resource *bss_resource)
+void __init e820_reserve_resources(void)
 {
 	int i;
+	struct resource *res;
+
+	res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map);
 	for (i = 0; i < e820.nr_map; i++) {
-		struct resource *res;
-		res = alloc_bootmem_low(sizeof(struct resource));
 		switch (e820.map[i].type) {
 		case E820_RAM:	res->name = "System RAM"; break;
 		case E820_ACPI:	res->name = "ACPI Tables"; break;
@@ -243,21 +314,8 @@
 		res->start = e820.map[i].addr;
 		res->end = res->start + e820.map[i].size - 1;
 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-		request_resource(&iomem_resource, res);
-		if (e820.map[i].type == E820_RAM) {
-			/*
-			 * We don't know which RAM region contains kernel data,
-			 * so we try it repeatedly and let the resource manager
-			 * test it.
-			 */
-			request_resource(res, code_resource);
-			request_resource(res, data_resource);
-			request_resource(res, bss_resource);
-#ifdef CONFIG_KEXEC
-			if (crashk_res.start != crashk_res.end)
-				request_resource(res, &crashk_res);
-#endif
-		}
+		insert_resource(&iomem_resource, res);
+		res++;
 	}
 }
 
@@ -309,9 +367,9 @@
 	if (*ei_startpfn >= *ei_endpfn)
 		return 0;
 
-	/* Check if end_pfn_map should be updated */
-	if (ei->type != E820_RAM && *ei_endpfn > end_pfn_map)
-		end_pfn_map = *ei_endpfn;
+	/* Check if max_pfn_mapped should be updated */
+	if (ei->type != E820_RAM && *ei_endpfn > max_pfn_mapped)
+		max_pfn_mapped = *ei_endpfn;
 
 	/* Skip if map is outside the node */
 	if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
@@ -634,10 +692,10 @@
 		return -1;
 
 	do {
-		unsigned long start = biosmap->addr;
-		unsigned long size = biosmap->size;
-		unsigned long end = start + size;
-		unsigned long type = biosmap->type;
+		u64 start = biosmap->addr;
+		u64 size = biosmap->size;
+		u64 end = start + size;
+		u32 type = biosmap->type;
 
 		/* Overflow in 64 bits? Ignore the memory map. */
 		if (start > end)
@@ -702,7 +760,7 @@
 		saved_max_pfn = e820_end_of_ram();
 		remove_all_active_ranges();
 #endif
-		end_pfn_map = 0;
+		max_pfn_mapped = 0;
 		e820.nr_map = 0;
 		userdef = 1;
 		return 0;
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index cff84cd..643fd86 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -13,7 +13,7 @@
 #define VGABASE		(__ISA_IO_base + 0xb8000)
 
 static int max_ypos = 25, max_xpos = 80;
-static int current_ypos = 25, current_xpos = 0;
+static int current_ypos = 25, current_xpos;
 
 static void early_vga_write(struct console *con, const char *str, unsigned n)
 {
@@ -108,12 +108,12 @@
 
 	if (*s) {
 		unsigned port;
-		if (!strncmp(s,"0x",2)) {
+		if (!strncmp(s, "0x", 2)) {
 			early_serial_base = simple_strtoul(s, &e, 16);
 		} else {
 			static int bases[] = { 0x3f8, 0x2f8 };
 
-			if (!strncmp(s,"ttyS",4))
+			if (!strncmp(s, "ttyS", 4))
 				s += 4;
 			port = simple_strtoul(s, &e, 10);
 			if (port > 1 || s == e)
@@ -194,7 +194,7 @@
 
 /* Direct interface for emergencies */
 static struct console *early_console = &early_vga_console;
-static int early_console_initialized = 0;
+static int early_console_initialized;
 
 void early_printk(const char *fmt, ...)
 {
@@ -202,9 +202,9 @@
 	int n;
 	va_list ap;
 
-	va_start(ap,fmt);
-	n = vscnprintf(buf,512,fmt,ap);
-	early_console->write(early_console,buf,n);
+	va_start(ap, fmt);
+	n = vscnprintf(buf, 512, fmt, ap);
+	early_console->write(early_console, buf, n);
 	va_end(ap);
 }
 
@@ -229,15 +229,15 @@
 		early_serial_init(buf);
 		early_console = &early_serial_console;
 	} else if (!strncmp(buf, "vga", 3)
-	           && boot_params.screen_info.orig_video_isVGA == 1) {
+		&& boot_params.screen_info.orig_video_isVGA == 1) {
 		max_xpos = boot_params.screen_info.orig_video_cols;
 		max_ypos = boot_params.screen_info.orig_video_lines;
 		current_ypos = boot_params.screen_info.orig_y;
 		early_console = &early_vga_console;
- 	} else if (!strncmp(buf, "simnow", 6)) {
- 		simnow_init(buf + 6);
- 		early_console = &simnow_console;
- 		keep_early = 1;
+	} else if (!strncmp(buf, "simnow", 6)) {
+		simnow_init(buf + 6);
+		early_console = &simnow_console;
+		keep_early = 1;
 #ifdef CONFIG_HVC_XEN
 	} else if (!strncmp(buf, "xen", 3)) {
 		early_console = &xenboot_console;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 4b87c32..9ba49a2 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -51,6 +51,7 @@
 #include <asm/desc.h>
 #include <asm/percpu.h>
 #include <asm/dwarf2.h>
+#include <asm/processor-flags.h>
 #include "irq_vectors.h"
 
 /*
@@ -68,13 +69,6 @@
 
 #define nr_syscalls ((syscall_table_size)/4)
 
-CF_MASK		= 0x00000001
-TF_MASK		= 0x00000100
-IF_MASK		= 0x00000200
-DF_MASK		= 0x00000400 
-NT_MASK		= 0x00004000
-VM_MASK		= 0x00020000
-
 #ifdef CONFIG_PREEMPT
 #define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
 #else
@@ -84,7 +78,7 @@
 
 .macro TRACE_IRQS_IRET
 #ifdef CONFIG_TRACE_IRQFLAGS
-	testl $IF_MASK,PT_EFLAGS(%esp)     # interrupts off?
+	testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)     # interrupts off?
 	jz 1f
 	TRACE_IRQS_ON
 1:
@@ -246,7 +240,7 @@
 check_userspace:
 	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
 	movb PT_CS(%esp), %al
-	andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
+	andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
 	cmpl $USER_RPL, %eax
 	jb resume_kernel		# not returning to v8086 or userspace
 
@@ -271,7 +265,7 @@
 	movl TI_flags(%ebp), %ecx	# need_resched set ?
 	testb $_TIF_NEED_RESCHED, %cl
 	jz restore_all
-	testl $IF_MASK,PT_EFLAGS(%esp)	# interrupts off (exception path) ?
+	testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)	# interrupts off (exception path) ?
 	jz restore_all
 	call preempt_schedule_irq
 	jmp need_resched
@@ -291,10 +285,10 @@
 	movl TSS_sysenter_sp0(%esp),%esp
 sysenter_past_esp:
 	/*
-	 * No need to follow this irqs on/off section: the syscall
-	 * disabled irqs and here we enable it straight after entry:
+	 * Interrupts are disabled here, but we can't trace it until
+	 * enough kernel state to call TRACE_IRQS_OFF can be called - but
+	 * we immediately enable interrupts at that point anyway.
 	 */
-	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushl $(__USER_DS)
 	CFI_ADJUST_CFA_OFFSET 4
 	/*CFI_REL_OFFSET ss, 0*/
@@ -302,6 +296,7 @@
 	CFI_ADJUST_CFA_OFFSET 4
 	CFI_REL_OFFSET esp, 0
 	pushfl
+	orl $X86_EFLAGS_IF, (%esp)
 	CFI_ADJUST_CFA_OFFSET 4
 	pushl $(__USER_CS)
 	CFI_ADJUST_CFA_OFFSET 4
@@ -315,6 +310,11 @@
 	CFI_ADJUST_CFA_OFFSET 4
 	CFI_REL_OFFSET eip, 0
 
+	pushl %eax
+	CFI_ADJUST_CFA_OFFSET 4
+	SAVE_ALL
+	ENABLE_INTERRUPTS(CLBR_NONE)
+
 /*
  * Load the potential sixth argument from user stack.
  * Careful about security.
@@ -322,14 +322,12 @@
 	cmpl $__PAGE_OFFSET-3,%ebp
 	jae syscall_fault
 1:	movl (%ebp),%ebp
+	movl %ebp,PT_EBP(%esp)
 .section __ex_table,"a"
 	.align 4
 	.long 1b,syscall_fault
 .previous
 
-	pushl %eax
-	CFI_ADJUST_CFA_OFFSET 4
-	SAVE_ALL
 	GET_THREAD_INFO(%ebp)
 
 	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
@@ -384,7 +382,7 @@
 					# setting need_resched or sigpending
 					# between sampling and the iret
 	TRACE_IRQS_OFF
-	testl $TF_MASK,PT_EFLAGS(%esp)	# If tracing set singlestep flag on exit
+	testl $X86_EFLAGS_TF,PT_EFLAGS(%esp)	# If tracing set singlestep flag on exit
 	jz no_singlestep
 	orl $_TIF_SINGLESTEP,TI_flags(%ebp)
 no_singlestep:
@@ -399,7 +397,7 @@
 	# See comments in process.c:copy_thread() for details.
 	movb PT_OLDSS(%esp), %ah
 	movb PT_CS(%esp), %al
-	andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+	andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
 	cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
 	CFI_REMEMBER_STATE
 	je ldt_ss			# returning to user-space with LDT SS
@@ -486,7 +484,7 @@
 work_notifysig:				# deal with pending signals and
 					# notify-resume requests
 #ifdef CONFIG_VM86
-	testl $VM_MASK, PT_EFLAGS(%esp)
+	testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
 	movl %esp, %eax
 	jne work_notifysig_v86		# returning to kernel-space or
 					# vm86-space
@@ -543,9 +541,6 @@
 
 	RING0_INT_FRAME			# can't unwind into user space anyway
 syscall_fault:
-	pushl %eax			# save orig_eax
-	CFI_ADJUST_CFA_OFFSET 4
-	SAVE_ALL
 	GET_THREAD_INFO(%ebp)
 	movl $-EFAULT,PT_EAX(%esp)
 	jmp resume_userspace
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index c20c9e7..556a8df 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -319,19 +319,17 @@
 	/* Do syscall tracing */
 tracesys:			 
 	SAVE_REST
-	movq $-ENOSYS,RAX(%rsp)
+	movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
 	FIXUP_TOP_OF_STACK %rdi
 	movq %rsp,%rdi
 	call syscall_trace_enter
 	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
 	RESTORE_REST
 	cmpq $__NR_syscall_max,%rax
-	movq $-ENOSYS,%rcx
-	cmova %rcx,%rax
-	ja  1f
+	ja   int_ret_from_sys_call	/* RAX(%rsp) set to -ENOSYS above */
 	movq %r10,%rcx	/* fixup for C */
 	call *sys_call_table(,%rax,8)
-1:	movq %rax,RAX-ARGOFFSET(%rsp)
+	movq %rax,RAX-ARGOFFSET(%rsp)
 	/* Use IRET because user could have changed frame */
 		
 /* 
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 4ae7b64..9546ef4 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/ctype.h>
 #include <linux/init.h>
+#include <linux/hardirq.h>
 
 #include <asm/smp.h>
 #include <asm/ipi.h>
@@ -24,20 +25,20 @@
 #include <acpi/acpi_bus.h>
 #endif
 
-/* which logical CPU number maps to which CPU (physical APIC ID) */
-u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata
-					= { [0 ... NR_CPUS-1] = BAD_APICID };
-void *x86_cpu_to_apicid_early_ptr;
-DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
-EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
+DEFINE_PER_CPU(int, x2apic_extra_bits);
 
 struct genapic __read_mostly *genapic = &apic_flat;
 
+static enum uv_system_type uv_system_type;
+
 /*
  * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
  */
 void __init setup_apic_routing(void)
 {
+	if (uv_system_type == UV_NON_UNIQUE_APIC)
+		genapic = &apic_x2apic_uv_x;
+	else
 #ifdef CONFIG_ACPI
 	/*
 	 * Quirk: some x86_64 machines can only use physical APIC mode
@@ -64,3 +65,37 @@
 {
 	__send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
 }
+
+int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+	if (!strcmp(oem_id, "SGI")) {
+		if (!strcmp(oem_table_id, "UVL"))
+			uv_system_type = UV_LEGACY_APIC;
+		else if (!strcmp(oem_table_id, "UVX"))
+			uv_system_type = UV_X2APIC;
+		else if (!strcmp(oem_table_id, "UVH"))
+			uv_system_type = UV_NON_UNIQUE_APIC;
+	}
+	return 0;
+}
+
+unsigned int read_apic_id(void)
+{
+	unsigned int id;
+
+	WARN_ON(preemptible());
+	id = apic_read(APIC_ID);
+	if (uv_system_type >= UV_X2APIC)
+		id  |= __get_cpu_var(x2apic_extra_bits);
+	return id;
+}
+
+enum uv_system_type get_uv_system_type(void)
+{
+	return uv_system_type;
+}
+
+int is_uv_system(void)
+{
+	return uv_system_type != UV_NONE;
+}
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 07352b7..1a9c688 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -97,7 +97,7 @@
 
 static int flat_apic_id_registered(void)
 {
-	return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
+	return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
 }
 
 static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
@@ -138,12 +138,9 @@
 
 static cpumask_t physflat_vector_allocation_domain(int cpu)
 {
-	cpumask_t domain = CPU_MASK_NONE;
-	cpu_set(cpu, domain);
-	return domain;
+	return cpumask_of_cpu(cpu);
 }
 
-
 static void physflat_send_IPI_mask(cpumask_t cpumask, int vector)
 {
 	send_IPI_mask_sequence(cpumask, vector);
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
new file mode 100644
index 0000000..5d77c9c
--- /dev/null
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -0,0 +1,245 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV APIC functions (note: not an Intel compatible APIC)
+ *
+ * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <asm/smp.h>
+#include <asm/ipi.h>
+#include <asm/genapic.h>
+#include <asm/uv/uv_mmrs.h>
+#include <asm/uv/uv_hub.h>
+
+DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
+
+struct uv_blade_info *uv_blade_info;
+EXPORT_SYMBOL_GPL(uv_blade_info);
+
+short *uv_node_to_blade;
+EXPORT_SYMBOL_GPL(uv_node_to_blade);
+
+short *uv_cpu_to_blade;
+EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
+
+short uv_possible_blades;
+EXPORT_SYMBOL_GPL(uv_possible_blades);
+
+/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
+
+static cpumask_t uv_target_cpus(void)
+{
+	return cpumask_of_cpu(0);
+}
+
+static cpumask_t uv_vector_allocation_domain(int cpu)
+{
+	cpumask_t domain = CPU_MASK_NONE;
+	cpu_set(cpu, domain);
+	return domain;
+}
+
+int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
+{
+	unsigned long val;
+	int nasid;
+
+	nasid = uv_apicid_to_nasid(phys_apicid);
+	val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+	    (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+	    (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
+	    (6 << UVH_IPI_INT_DELIVERY_MODE_SHFT);
+	uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
+	return 0;
+}
+
+static void uv_send_IPI_one(int cpu, int vector)
+{
+	unsigned long val, apicid;
+	int nasid;
+
+	apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */
+	nasid = uv_apicid_to_nasid(apicid);
+	val =
+	    (1UL << UVH_IPI_INT_SEND_SHFT) | (apicid <<
+					      UVH_IPI_INT_APIC_ID_SHFT) |
+	    (vector << UVH_IPI_INT_VECTOR_SHFT);
+	uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
+	printk(KERN_DEBUG
+	     "UV: IPI to cpu %d, apicid 0x%lx, vec %d, nasid%d, val 0x%lx\n",
+	     cpu, apicid, vector, nasid, val);
+}
+
+static void uv_send_IPI_mask(cpumask_t mask, int vector)
+{
+	unsigned int cpu;
+
+	for (cpu = 0; cpu < NR_CPUS; ++cpu)
+		if (cpu_isset(cpu, mask))
+			uv_send_IPI_one(cpu, vector);
+}
+
+static void uv_send_IPI_allbutself(int vector)
+{
+	cpumask_t mask = cpu_online_map;
+
+	cpu_clear(smp_processor_id(), mask);
+
+	if (!cpus_empty(mask))
+		uv_send_IPI_mask(mask, vector);
+}
+
+static void uv_send_IPI_all(int vector)
+{
+	uv_send_IPI_mask(cpu_online_map, vector);
+}
+
+static int uv_apic_id_registered(void)
+{
+	return 1;
+}
+
+static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
+{
+	int cpu;
+
+	/*
+	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
+	 * May as well be the first.
+	 */
+	cpu = first_cpu(cpumask);
+	if ((unsigned)cpu < NR_CPUS)
+		return per_cpu(x86_cpu_to_apicid, cpu);
+	else
+		return BAD_APICID;
+}
+
+static unsigned int phys_pkg_id(int index_msb)
+{
+	return GET_APIC_ID(read_apic_id()) >> index_msb;
+}
+
+#ifdef ZZZ		/* Needs x2apic patch */
+static void uv_send_IPI_self(int vector)
+{
+	apic_write(APIC_SELF_IPI, vector);
+}
+#endif
+
+struct genapic apic_x2apic_uv_x = {
+	.name = "UV large system",
+	.int_delivery_mode = dest_Fixed,
+	.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
+	.target_cpus = uv_target_cpus,
+	.vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */
+	.apic_id_registered = uv_apic_id_registered,
+	.send_IPI_all = uv_send_IPI_all,
+	.send_IPI_allbutself = uv_send_IPI_allbutself,
+	.send_IPI_mask = uv_send_IPI_mask,
+	/* ZZZ.send_IPI_self = uv_send_IPI_self, */
+	.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
+	.phys_pkg_id = phys_pkg_id,	/* Fixme ZZZ */
+};
+
+static __cpuinit void set_x2apic_extra_bits(int nasid)
+{
+	__get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6);
+}
+
+/*
+ * Called on boot cpu.
+ */
+static __init void uv_system_init(void)
+{
+	union uvh_si_addr_map_config_u m_n_config;
+	int bytes, nid, cpu, lcpu, nasid, last_nasid, blade;
+	unsigned long mmr_base;
+
+	m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
+	mmr_base =
+	    uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
+	    ~UV_MMR_ENABLE;
+	printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
+
+	last_nasid = -1;
+	for_each_possible_cpu(cpu) {
+		nid = cpu_to_node(cpu);
+		nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu));
+		if (nasid != last_nasid)
+			uv_possible_blades++;
+		last_nasid = nasid;
+	}
+	printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
+
+	bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
+	uv_blade_info = alloc_bootmem_pages(bytes);
+
+	bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
+	uv_node_to_blade = alloc_bootmem_pages(bytes);
+	memset(uv_node_to_blade, 255, bytes);
+
+	bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
+	uv_cpu_to_blade = alloc_bootmem_pages(bytes);
+	memset(uv_cpu_to_blade, 255, bytes);
+
+	last_nasid = -1;
+	blade = -1;
+	lcpu = -1;
+	for_each_possible_cpu(cpu) {
+		nid = cpu_to_node(cpu);
+		nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu));
+		if (nasid != last_nasid) {
+			blade++;
+			lcpu = -1;
+			uv_blade_info[blade].nr_posible_cpus = 0;
+			uv_blade_info[blade].nr_online_cpus = 0;
+		}
+		last_nasid = nasid;
+		lcpu++;
+
+		uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt;
+		uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt;
+		uv_cpu_hub_info(cpu)->numa_blade_id = blade;
+		uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
+		uv_cpu_hub_info(cpu)->local_nasid = nasid;
+		uv_cpu_hub_info(cpu)->gnode_upper =
+		    nasid & ~((1 << uv_hub_info->n_val) - 1);
+		uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
+		uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
+		uv_blade_info[blade].nasid = nasid;
+		uv_blade_info[blade].nr_posible_cpus++;
+		uv_node_to_blade[nid] = blade;
+		uv_cpu_to_blade[cpu] = blade;
+
+		printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n",
+		       cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid);
+		printk(KERN_DEBUG "UV   lcpu %d, blade %d\n", lcpu, blade);
+	}
+}
+
+/*
+ * Called on each cpu to initialize the per_cpu UV data area.
+ */
+void __cpuinit uv_cpu_init(void)
+{
+	if (!uv_node_to_blade)
+		uv_system_init();
+
+	uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
+
+	if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
+		set_x2apic_extra_bits(uv_hub_info->local_nasid);
+}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
new file mode 100644
index 0000000..3db0590
--- /dev/null
+++ b/arch/x86/kernel/head32.c
@@ -0,0 +1,14 @@
+/*
+ *  linux/arch/i386/kernel/head32.c -- prepare to run common code
+ *
+ *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ *  Copyright (C) 2007 Eric Biederman <ebiederm@xmission.com>
+ */
+
+#include <linux/init.h>
+#include <linux/start_kernel.h>
+
+void __init i386_start_kernel(void)
+{
+	start_kernel();
+}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index ad24408..d6d54fa 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -49,39 +49,75 @@
 	}
 }
 
-#define EBDA_ADDR_POINTER 0x40E
+#define BIOS_EBDA_SEGMENT 0x40E
+#define BIOS_LOWMEM_KILOBYTES 0x413
 
-static __init void reserve_ebda(void)
+/*
+ * The BIOS places the EBDA/XBDA at the top of conventional
+ * memory, and usually decreases the reported amount of
+ * conventional memory (int 0x12) too. This also contains a
+ * workaround for Dell systems that neglect to reserve EBDA.
+ * The same workaround also avoids a problem with the AMD768MPX
+ * chipset: reserve a page before VGA to prevent PCI prefetch
+ * into it (errata #56). Usually the page is reserved anyways,
+ * unless you have no PS/2 mouse plugged in.
+ */
+static void __init reserve_ebda_region(void)
 {
-	unsigned ebda_addr, ebda_size;
+	unsigned int lowmem, ebda_addr;
 
-	/*
-	 * there is a real-mode segmented pointer pointing to the
-	 * 4K EBDA area at 0x40E
-	 */
-	ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
-	ebda_addr <<= 4;
-
-	if (!ebda_addr)
+	/* To determine the position of the EBDA and the */
+	/* end of conventional memory, we need to look at */
+	/* the BIOS data area. In a paravirtual environment */
+	/* that area is absent. We'll just have to assume */
+	/* that the paravirt case can handle memory setup */
+	/* correctly, without our help. */
+	if (paravirt_enabled())
 		return;
 
-	ebda_size = *(unsigned short *)__va(ebda_addr);
+	/* end of low (conventional) memory */
+	lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
+	lowmem <<= 10;
 
-	/* Round EBDA up to pages */
-	if (ebda_size == 0)
-		ebda_size = 1;
-	ebda_size <<= 10;
-	ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
-	if (ebda_size > 64*1024)
-		ebda_size = 64*1024;
+	/* start of EBDA area */
+	ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT);
+	ebda_addr <<= 4;
 
-	reserve_early(ebda_addr, ebda_addr + ebda_size, "EBDA");
+	/* Fixup: bios puts an EBDA in the top 64K segment */
+	/* of conventional memory, but does not adjust lowmem. */
+	if ((lowmem - ebda_addr) <= 0x10000)
+		lowmem = ebda_addr;
+
+	/* Fixup: bios does not report an EBDA at all. */
+	/* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
+	if ((ebda_addr == 0) && (lowmem >= 0x9f000))
+		lowmem = 0x9f000;
+
+	/* Paranoia: should never happen, but... */
+	if ((lowmem == 0) || (lowmem >= 0x100000))
+		lowmem = 0x9f000;
+
+	/* reserve all memory between lowmem and the 1MB mark */
+	reserve_early(lowmem, 0x100000, "BIOS reserved");
 }
 
 void __init x86_64_start_kernel(char * real_mode_data)
 {
 	int i;
 
+	/*
+	 * Build-time sanity checks on the kernel image and module
+	 * area mappings. (these are purely build-time and produce no code)
+	 */
+	BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START);
+	BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE);
+	BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
+	BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0);
+	BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
+	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
+	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
+				(__START_KERNEL & PGDIR_MASK)));
+
 	/* clear bss before set_intr_gate with early_idt_handler */
 	clear_bss();
 
@@ -91,7 +127,7 @@
 	/* Cleanup the over mapped high alias */
 	cleanup_highmap();
 
-	for (i = 0; i < IDT_ENTRIES; i++) {
+	for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
 #ifdef CONFIG_EARLY_PRINTK
 		set_intr_gate(i, &early_idt_handlers[i]);
 #else
@@ -118,7 +154,7 @@
 		reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
 	}
 
-	reserve_ebda();
+	reserve_ebda_region();
 
 	/*
 	 * At this point everything still needed from the boot loader
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 74d87ea..826988a 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -450,7 +450,7 @@
 	jmp initialize_secondary # all other CPUs call initialize_secondary
 1:
 #endif /* CONFIG_SMP */
-	jmp start_kernel
+	jmp i386_start_kernel
 
 /*
  * We depend on ET to be correct. This checks for 287/387.
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index a007454..10a1955 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -132,10 +132,6 @@
 	addq	%rbp, trampoline_level4_pgt + 0(%rip)
 	addq	%rbp, trampoline_level4_pgt + (511*8)(%rip)
 #endif
-#ifdef CONFIG_ACPI_SLEEP
-	addq	%rbp, wakeup_level4_pgt + 0(%rip)
-	addq	%rbp, wakeup_level4_pgt + (511*8)(%rip)
-#endif
 
 	/* Due to ENTRY(), sometimes the empty space gets filled with
 	 * zeros. Better take a jmp than relying on empty space being
@@ -267,21 +263,16 @@
 bad_address:
 	jmp bad_address
 
+	.section ".init.text","ax"
 #ifdef CONFIG_EARLY_PRINTK
-.macro early_idt_tramp first, last
-	.ifgt \last-\first
-	early_idt_tramp \first, \last-1
-	.endif
-	movl $\last,%esi
-	jmp early_idt_handler
-.endm
-
 	.globl early_idt_handlers
 early_idt_handlers:
-	early_idt_tramp 0, 63
-	early_idt_tramp 64, 127
-	early_idt_tramp 128, 191
-	early_idt_tramp 192, 255
+	i = 0
+	.rept NUM_EXCEPTION_VECTORS
+	movl $i, %esi
+	jmp early_idt_handler
+	i = i + 1
+	.endr
 #endif
 
 ENTRY(early_idt_handler)
@@ -327,6 +318,7 @@
 early_idt_ripmsg:
 	.asciz "RIP %s\n"
 #endif /* CONFIG_EARLY_PRINTK */
+	.previous
 
 .balign PAGE_SIZE
 
@@ -383,12 +375,12 @@
 
 NEXT_PAGE(level2_kernel_pgt)
 	/*
-	 * 128 MB kernel mapping. We spend a full page on this pagetable
+	 * 512 MB kernel mapping. We spend a full page on this pagetable
 	 * anyway.
 	 *
 	 * The kernel code+data+bss must not be bigger than that.
 	 *
-	 * (NOTE: at +128MB starts the module area, see MODULES_VADDR.
+	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
 	 *  If you want to increase this then increase MODULES_VADDR
 	 *  too.)
 	 */
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 0616278..deb4378 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -1,13 +1,8 @@
 #include <linux/module.h>
-#include <asm/semaphore.h>
 #include <asm/checksum.h>
 #include <asm/desc.h>
 #include <asm/pgtable.h>
 
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_failed_interruptible);
-EXPORT_SYMBOL(__down_failed_trylock);
-EXPORT_SYMBOL(__up_wakeup);
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy_generic);
 
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index d2e39e6..8f8102d 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -5,45 +5,41 @@
  *  General FPU state handling cleanups
  *	Gareth Hughes <gareth@valinux.com>, May 2000
  */
-
-#include <linux/sched.h>
 #include <linux/module.h>
 #include <linux/regset.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/math_emu.h>
+#include <linux/sched.h>
+
 #include <asm/sigcontext.h>
-#include <asm/user.h>
-#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/math_emu.h>
 #include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/i387.h>
+#include <asm/user.h>
 
 #ifdef CONFIG_X86_64
-
-#include <asm/sigcontext32.h>
-#include <asm/user32.h>
-
+# include <asm/sigcontext32.h>
+# include <asm/user32.h>
 #else
-
-#define	save_i387_ia32		save_i387
-#define	restore_i387_ia32	restore_i387
-
-#define _fpstate_ia32 		_fpstate
-#define user_i387_ia32_struct	user_i387_struct
-#define user32_fxsr_struct	user_fxsr_struct
-
+# define save_i387_ia32		save_i387
+# define restore_i387_ia32	restore_i387
+# define _fpstate_ia32		_fpstate
+# define user_i387_ia32_struct	user_i387_struct
+# define user32_fxsr_struct	user_fxsr_struct
 #endif
 
 #ifdef CONFIG_MATH_EMULATION
-#define HAVE_HWFP (boot_cpu_data.hard_math)
+# define HAVE_HWFP		(boot_cpu_data.hard_math)
 #else
-#define HAVE_HWFP 1
+# define HAVE_HWFP		1
 #endif
 
-static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+static unsigned int		mxcsr_feature_mask __read_mostly = 0xffffffffu;
 
 void mxcsr_feature_mask_init(void)
 {
 	unsigned long mask = 0;
+
 	clts();
 	if (cpu_has_fxsr) {
 		memset(&current->thread.i387.fxsave, 0,
@@ -69,10 +65,11 @@
 
 	if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
 		__bad_fxsave_alignment();
+
 	set_in_cr4(X86_CR4_OSFXSR);
 	set_in_cr4(X86_CR4_OSXMMEXCPT);
 
-	write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
+	write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */
 
 	mxcsr_feature_mask_init();
 	/* clean state in init */
@@ -178,6 +175,7 @@
 	tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
 	tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
 	tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
+
 	return tmp;
 }
 
@@ -232,8 +230,8 @@
  * FXSR floating point environment conversions.
  */
 
-static void convert_from_fxsr(struct user_i387_ia32_struct *env,
-			      struct task_struct *tsk)
+static void
+convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
 {
 	struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
 	struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
@@ -252,10 +250,11 @@
 		 * should be actually ds/cs at fpu exception time, but
 		 * that information is not available in 64bit mode.
 		 */
-		asm("mov %%ds,%0" : "=r" (env->fos));
-		asm("mov %%cs,%0" : "=r" (env->fcs));
+		asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos));
+		asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs));
 	} else {
 		struct pt_regs *regs = task_pt_regs(tsk);
+
 		env->fos = 0xffff0000 | tsk->thread.ds;
 		env->fcs = regs->cs;
 	}
@@ -309,9 +308,10 @@
 
 	init_fpu(target);
 
-	if (!cpu_has_fxsr)
+	if (!cpu_has_fxsr) {
 		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 					   &target->thread.i387.fsave, 0, -1);
+	}
 
 	if (kbuf && pos == 0 && count == sizeof(env)) {
 		convert_from_fxsr(kbuf, target);
@@ -319,6 +319,7 @@
 	}
 
 	convert_from_fxsr(&env, target);
+
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
 }
 
@@ -335,9 +336,10 @@
 	init_fpu(target);
 	set_stopped_child_used_math(target);
 
-	if (!cpu_has_fxsr)
+	if (!cpu_has_fxsr) {
 		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 					  &target->thread.i387.fsave, 0, -1);
+	}
 
 	if (pos > 0 || count < sizeof(env))
 		convert_from_fxsr(&env, target);
@@ -392,28 +394,28 @@
 {
 	if (!used_math())
 		return 0;
-
-	/* This will cause a "finit" to be triggered by the next
+	/*
+	 * This will cause a "finit" to be triggered by the next
 	 * attempted FPU operation by the 'current' process.
 	 */
 	clear_used_math();
 
-	if (HAVE_HWFP) {
-		if (cpu_has_fxsr) {
-			return save_i387_fxsave(buf);
-		} else {
-			return save_i387_fsave(buf);
-		}
-	} else {
+	if (!HAVE_HWFP) {
 		return fpregs_soft_get(current, NULL,
 				       0, sizeof(struct user_i387_ia32_struct),
 				       NULL, buf) ? -1 : 1;
 	}
+
+	if (cpu_has_fxsr)
+		return save_i387_fxsave(buf);
+	else
+		return save_i387_fsave(buf);
 }
 
 static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
 {
 	struct task_struct *tsk = current;
+
 	clear_fpu(tsk);
 	return __copy_from_user(&tsk->thread.i387.fsave, buf,
 				sizeof(struct i387_fsave_struct));
@@ -421,9 +423,10 @@
 
 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
 {
-	int err;
 	struct task_struct *tsk = current;
 	struct user_i387_ia32_struct env;
+	int err;
+
 	clear_fpu(tsk);
 	err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
 			       sizeof(struct i387_fxsave_struct));
@@ -432,6 +435,7 @@
 	if (err || __copy_from_user(&env, buf, sizeof(env)))
 		return 1;
 	convert_to_fxsr(tsk, &env);
+
 	return 0;
 }
 
@@ -440,17 +444,17 @@
 	int err;
 
 	if (HAVE_HWFP) {
-		if (cpu_has_fxsr) {
+		if (cpu_has_fxsr)
 			err = restore_i387_fxsave(buf);
-		} else {
+		else
 			err = restore_i387_fsave(buf);
-		}
 	} else {
 		err = fpregs_soft_set(current, NULL,
 				      0, sizeof(struct user_i387_ia32_struct),
 				      NULL, buf) != 0;
 	}
 	set_used_math();
+
 	return err;
 }
 
@@ -463,8 +467,8 @@
  */
 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
 {
-	int fpvalid;
 	struct task_struct *tsk = current;
+	int fpvalid;
 
 	fpvalid = !!used_math();
 	if (fpvalid)
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 4ca5486..2e2f420 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -71,6 +71,16 @@
  */
 int nr_ioapic_registers[MAX_IO_APICS];
 
+/* I/O APIC entries */
+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
+int nr_ioapics;
+
+/* MP IRQ source entries */
+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+
+/* # of MP IRQ source entries */
+int mp_irq_entries;
+
 static int disable_timer_pin_1 __initdata;
 
 /*
@@ -810,10 +820,7 @@
 	for (i = 0; i < mp_irq_entries; i++) {
 		int lbus = mp_irqs[i].mpc_srcbus;
 
-		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-		     mp_bus_id_to_type[lbus] == MP_BUS_MCA
-		    ) &&
+		if (test_bit(lbus, mp_bus_not_pci) &&
 		    (mp_irqs[i].mpc_irqtype == type) &&
 		    (mp_irqs[i].mpc_srcbusirq == irq))
 
@@ -829,10 +836,7 @@
 	for (i = 0; i < mp_irq_entries; i++) {
 		int lbus = mp_irqs[i].mpc_srcbus;
 
-		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-		     mp_bus_id_to_type[lbus] == MP_BUS_MCA
-		    ) &&
+		if (test_bit(lbus, mp_bus_not_pci) &&
 		    (mp_irqs[i].mpc_irqtype == type) &&
 		    (mp_irqs[i].mpc_srcbusirq == irq))
 			break;
@@ -872,7 +876,7 @@
 			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
 				break;
 
-		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
+		if (!test_bit(lbus, mp_bus_not_pci) &&
 		    !mp_irqs[i].mpc_irqtype &&
 		    (bus == lbus) &&
 		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
@@ -921,6 +925,7 @@
 }
 #endif
 
+#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
 /*
  * EISA Edge/Level control register, ELCR
  */
@@ -934,6 +939,13 @@
 			"Broken MPtable reports ISA irq %d\n", irq);
 	return 0;
 }
+#endif
+
+/* ISA interrupts are always polarity zero edge triggered,
+ * when listed as conforming in the MP table. */
+
+#define default_ISA_trigger(idx)	(0)
+#define default_ISA_polarity(idx)	(0)
 
 /* EISA interrupts are always polarity zero and can be edge or level
  * trigger depending on the ELCR value.  If an interrupt is listed as
@@ -941,13 +953,7 @@
  * be read in from the ELCR */
 
 #define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
-#define default_EISA_polarity(idx)	(0)
-
-/* ISA interrupts are always polarity zero edge triggered,
- * when listed as conforming in the MP table. */
-
-#define default_ISA_trigger(idx)	(0)
-#define default_ISA_polarity(idx)	(0)
+#define default_EISA_polarity(idx)	default_ISA_polarity(idx)
 
 /* PCI interrupts are always polarity one level triggered,
  * when listed as conforming in the MP table. */
@@ -959,7 +965,7 @@
  * when listed as conforming in the MP table. */
 
 #define default_MCA_trigger(idx)	(1)
-#define default_MCA_polarity(idx)	(0)
+#define default_MCA_polarity(idx)	default_ISA_polarity(idx)
 
 static int MPBIOS_polarity(int idx)
 {
@@ -973,35 +979,9 @@
 	{
 		case 0: /* conforms, ie. bus-type dependent polarity */
 		{
-			switch (mp_bus_id_to_type[bus])
-			{
-				case MP_BUS_ISA: /* ISA pin */
-				{
-					polarity = default_ISA_polarity(idx);
-					break;
-				}
-				case MP_BUS_EISA: /* EISA pin */
-				{
-					polarity = default_EISA_polarity(idx);
-					break;
-				}
-				case MP_BUS_PCI: /* PCI pin */
-				{
-					polarity = default_PCI_polarity(idx);
-					break;
-				}
-				case MP_BUS_MCA: /* MCA pin */
-				{
-					polarity = default_MCA_polarity(idx);
-					break;
-				}
-				default:
-				{
-					printk(KERN_WARNING "broken BIOS!!\n");
-					polarity = 1;
-					break;
-				}
-			}
+			polarity = test_bit(bus, mp_bus_not_pci)?
+				default_ISA_polarity(idx):
+				default_PCI_polarity(idx);
 			break;
 		}
 		case 1: /* high active */
@@ -1042,11 +1022,15 @@
 	{
 		case 0: /* conforms, ie. bus-type dependent */
 		{
+			trigger = test_bit(bus, mp_bus_not_pci)?
+					default_ISA_trigger(idx):
+					default_PCI_trigger(idx);
+#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
 			switch (mp_bus_id_to_type[bus])
 			{
 				case MP_BUS_ISA: /* ISA pin */
 				{
-					trigger = default_ISA_trigger(idx);
+					/* set before the switch */
 					break;
 				}
 				case MP_BUS_EISA: /* EISA pin */
@@ -1056,7 +1040,7 @@
 				}
 				case MP_BUS_PCI: /* PCI pin */
 				{
-					trigger = default_PCI_trigger(idx);
+					/* set before the switch */
 					break;
 				}
 				case MP_BUS_MCA: /* MCA pin */
@@ -1071,6 +1055,7 @@
 					break;
 				}
 			}
+#endif
 			break;
 		}
 		case 1: /* edge */
@@ -1120,39 +1105,22 @@
 	if (mp_irqs[idx].mpc_dstirq != pin)
 		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
 
-	switch (mp_bus_id_to_type[bus])
-	{
-		case MP_BUS_ISA: /* ISA pin */
-		case MP_BUS_EISA:
-		case MP_BUS_MCA:
-		{
-			irq = mp_irqs[idx].mpc_srcbusirq;
-			break;
-		}
-		case MP_BUS_PCI: /* PCI pin */
-		{
-			/*
-			 * PCI IRQs are mapped in order
-			 */
-			i = irq = 0;
-			while (i < apic)
-				irq += nr_ioapic_registers[i++];
-			irq += pin;
+	if (test_bit(bus, mp_bus_not_pci))
+		irq = mp_irqs[idx].mpc_srcbusirq;
+	else {
+		/*
+		 * PCI IRQs are mapped in order
+		 */
+		i = irq = 0;
+		while (i < apic)
+			irq += nr_ioapic_registers[i++];
+		irq += pin;
 
-			/*
-			 * For MPS mode, so far only needed by ES7000 platform
-			 */
-			if (ioapic_renumber_irq)
-				irq = ioapic_renumber_irq(apic, irq);
-
-			break;
-		}
-		default:
-		{
-			printk(KERN_ERR "unknown bus type %d.\n",bus); 
-			irq = 0;
-			break;
-		}
+		/*
+		 * For MPS mode, so far only needed by ES7000 platform
+		 */
+		if (ioapic_renumber_irq)
+			irq = ioapic_renumber_irq(apic, irq);
 	}
 
 	/*
@@ -1260,7 +1228,6 @@
 {
 	struct IO_APIC_route_entry entry;
 	int apic, pin, idx, irq, first_notcon = 1, vector;
-	unsigned long flags;
 
 	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 
@@ -1326,9 +1293,7 @@
 			if (!apic && (irq < 16))
 				disable_8259A_irq(irq);
 		}
-		spin_lock_irqsave(&ioapic_lock, flags);
-		__ioapic_write_entry(apic, pin, entry);
-		spin_unlock_irqrestore(&ioapic_lock, flags);
+		ioapic_write_entry(apic, pin, entry);
 	}
 	}
 
@@ -1524,8 +1489,8 @@
 
 	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
 		smp_processor_id(), hard_smp_processor_id());
-	v = apic_read(APIC_ID);
-	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
+	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v,
+			GET_APIC_ID(read_apic_id()));
 	v = apic_read(APIC_LVR);
 	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
 	ver = GET_APIC_VERSION(v);
@@ -1734,7 +1699,7 @@
 		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
 		entry.vector          = 0;
 		entry.dest.physical.physical_dest =
-					GET_APIC_ID(apic_read(APIC_ID));
+					GET_APIC_ID(read_apic_id());
 
 		/*
 		 * Add it to the IO-APIC irq-routing table:
@@ -2031,8 +1996,7 @@
 	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
 	 */
 	for (irq = 0; irq < NR_IRQS ; irq++) {
-		int tmp = irq;
-		if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
+		if (IO_APIC_IRQ(irq) && !irq_vector[irq]) {
 			/*
 			 * Hmm.. We don't have an entry for this,
 			 * so default to an old-fashioned 8259
@@ -2156,8 +2120,6 @@
 	ioapic_write_entry(apic, pin, entry0);
 }
 
-int timer_uses_ioapic_pin_0;
-
 /*
  * This code may look a bit paranoid, but it's supposed to cooperate with
  * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
@@ -2168,10 +2130,14 @@
 {
 	int apic1, pin1, apic2, pin2;
 	int vector;
+	unsigned int ver;
 	unsigned long flags;
 
 	local_irq_save(flags);
 
+	ver = apic_read(APIC_LVR);
+	ver = GET_APIC_VERSION(ver);
+
 	/*
 	 * get/set the timer IRQ vector:
 	 */
@@ -2184,11 +2150,15 @@
 	 * mode for the 8259A whenever interrupts are routed
 	 * through I/O APICs.  Also IRQ0 has to be enabled in
 	 * the 8259A which implies the virtual wire has to be
-	 * disabled in the local APIC.
+	 * disabled in the local APIC.  Finally timer interrupts
+	 * need to be acknowledged manually in the 8259A for
+	 * timer_interrupt() and for the i82489DX when using
+	 * the NMI watchdog.
 	 */
 	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 	init_8259A(1);
-	timer_ack = 1;
+	timer_ack = !cpu_has_tsc;
+	timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
 	if (timer_over_8254 > 0)
 		enable_8259A_irq(0);
 
@@ -2197,9 +2167,6 @@
 	pin2  = ioapic_i8259.pin;
 	apic2 = ioapic_i8259.apic;
 
-	if (pin1 == 0)
-		timer_uses_ioapic_pin_0 = 1;
-
 	printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
 		vector, apic1, pin1, apic2, pin2);
 
@@ -2789,7 +2756,6 @@
 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
 {
 	struct IO_APIC_route_entry entry;
-	unsigned long flags;
 
 	if (!IO_APIC_IRQ(irq)) {
 		printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
@@ -2830,9 +2796,7 @@
 	if (!ioapic && (irq < 16))
 		disable_8259A_irq(irq);
 
-	spin_lock_irqsave(&ioapic_lock, flags);
-	__ioapic_write_entry(ioapic, pin, entry);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
+	ioapic_write_entry(ioapic, pin, entry);
 
 	return 0;
 }
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 1627c0d..b54464b 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -43,13 +43,15 @@
 #include <asm/smp.h>
 #include <asm/desc.h>
 #include <asm/proto.h>
-#include <asm/mach_apic.h>
 #include <asm/acpi.h>
 #include <asm/dma.h>
 #include <asm/nmi.h>
 #include <asm/msidef.h>
 #include <asm/hypertransport.h>
 
+#include <mach_ipi.h>
+#include <mach_apic.h>
+
 struct irq_cfg {
 	cpumask_t domain;
 	cpumask_t old_domain;
@@ -101,6 +103,16 @@
  */
 int nr_ioapic_registers[MAX_IO_APICS];
 
+/* I/O APIC entries */
+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
+int nr_ioapics;
+
+/* MP IRQ source entries */
+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+
+/* # of MP IRQ source entries */
+int mp_irq_entries;
+
 /*
  * Rough estimation of how many shared IRQs there are, can
  * be changed anytime.
@@ -155,11 +167,10 @@
 	writel(value, &io_apic->data);
 }
 
-static int io_apic_level_ack_pending(unsigned int irq)
+static bool io_apic_level_ack_pending(unsigned int irq)
 {
 	struct irq_pin_list *entry;
 	unsigned long flags;
-	int pending = 0;
 
 	spin_lock_irqsave(&ioapic_lock, flags);
 	entry = irq_2_pin + irq;
@@ -172,13 +183,17 @@
 			break;
 		reg = io_apic_read(entry->apic, 0x10 + pin*2);
 		/* Is the remote IRR bit set? */
-		pending |= (reg >> 14) & 1;
+		if ((reg >> 14) & 1) {
+			spin_unlock_irqrestore(&ioapic_lock, flags);
+			return true;
+		}
 		if (!entry->next)
 			break;
 		entry = irq_2_pin + entry->next;
 	}
 	spin_unlock_irqrestore(&ioapic_lock, flags);
-	return pending;
+
+	return false;
 }
 
 /*
@@ -902,9 +917,8 @@
 static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
 {
 	struct IO_APIC_route_entry entry;
-	unsigned long flags;
 
-	memset(&entry,0,sizeof(entry));
+	memset(&entry, 0, sizeof(entry));
 
 	disable_8259A_irq(0);
 
@@ -932,10 +946,7 @@
 	/*
 	 * Add it to the IO-APIC irq-routing table:
 	 */
-	spin_lock_irqsave(&ioapic_lock, flags);
-	io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-	io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-	spin_unlock_irqrestore(&ioapic_lock, flags);
+	ioapic_write_entry(apic, pin, entry);
 
 	enable_8259A_irq(0);
 }
@@ -1066,8 +1077,7 @@
 
 	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
 		smp_processor_id(), hard_smp_processor_id());
-	v = apic_read(APIC_ID);
-	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
+	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(read_apic_id()));
 	v = apic_read(APIC_LVR);
 	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
 	ver = GET_APIC_VERSION(v);
@@ -1261,7 +1271,7 @@
 		entry.dest_mode       = 0; /* Physical */
 		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
 		entry.vector          = 0;
-		entry.dest          = GET_APIC_ID(apic_read(APIC_ID));
+		entry.dest          = GET_APIC_ID(read_apic_id());
 
 		/*
 		 * Add it to the IO-APIC irq-routing table:
@@ -1352,9 +1362,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&vector_lock, flags);
-	cpus_clear(mask);
-	cpu_set(first_cpu(cfg->domain), mask);
-
+	mask = cpumask_of_cpu(first_cpu(cfg->domain));
 	send_IPI_mask(mask, cfg->vector);
 	spin_unlock_irqrestore(&vector_lock, flags);
 
@@ -1517,8 +1525,7 @@
 	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
 	 */
 	for (irq = 0; irq < NR_IRQS ; irq++) {
-		int tmp = irq;
-		if (IO_APIC_IRQ(tmp) && !irq_cfg[tmp].vector) {
+		if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) {
 			/*
 			 * Hmm.. We don't have an entry for this,
 			 * so default to an old-fashioned 8259
@@ -1597,17 +1604,14 @@
 	int apic, pin, i;
 	struct IO_APIC_route_entry entry0, entry1;
 	unsigned char save_control, save_freq_select;
-	unsigned long flags;
 
 	pin  = find_isa_irq_pin(8, mp_INT);
 	apic = find_isa_irq_apic(8, mp_INT);
 	if (pin == -1)
 		return;
 
-	spin_lock_irqsave(&ioapic_lock, flags);
-	*(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-	*(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
+	entry0 = ioapic_read_entry(apic, pin);
+
 	clear_IO_APIC_pin(apic, pin);
 
 	memset(&entry1, 0, sizeof(entry1));
@@ -1620,10 +1624,7 @@
 	entry1.trigger = 0;
 	entry1.vector = 0;
 
-	spin_lock_irqsave(&ioapic_lock, flags);
-	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
-	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
-	spin_unlock_irqrestore(&ioapic_lock, flags);
+	ioapic_write_entry(apic, pin, entry1);
 
 	save_control = CMOS_READ(RTC_CONTROL);
 	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
@@ -1642,10 +1643,7 @@
 	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
 	clear_IO_APIC_pin(apic, pin);
 
-	spin_lock_irqsave(&ioapic_lock, flags);
-	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
-	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
-	spin_unlock_irqrestore(&ioapic_lock, flags);
+	ioapic_write_entry(apic, pin, entry0);
 }
 
 /*
@@ -2314,7 +2312,6 @@
 	res = (void *)mem;
 
 	if (mem != NULL) {
-		memset(mem, 0, n);
 		mem += sizeof(struct resource) * nr_ioapics;
 
 		for (i = 0; i < nr_ioapics; i++) {
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
new file mode 100644
index 0000000..c0df7b8
--- /dev/null
+++ b/arch/x86/kernel/ipi.c
@@ -0,0 +1,178 @@
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+
+#include <asm/smp.h>
+#include <asm/mtrr.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/apic.h>
+#include <asm/proto.h>
+
+#ifdef CONFIG_X86_32
+#include <mach_apic.h>
+/*
+ * the following functions deal with sending IPIs between CPUs.
+ *
+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
+ */
+
+static inline int __prepare_ICR(unsigned int shortcut, int vector)
+{
+	unsigned int icr = shortcut | APIC_DEST_LOGICAL;
+
+	switch (vector) {
+	default:
+		icr |= APIC_DM_FIXED | vector;
+		break;
+	case NMI_VECTOR:
+		icr |= APIC_DM_NMI;
+		break;
+	}
+	return icr;
+}
+
+static inline int __prepare_ICR2(unsigned int mask)
+{
+	return SET_APIC_DEST_FIELD(mask);
+}
+
+void __send_IPI_shortcut(unsigned int shortcut, int vector)
+{
+	/*
+	 * Subtle. In the case of the 'never do double writes' workaround
+	 * we have to lock out interrupts to be safe.  As we don't care
+	 * of the value read we use an atomic rmw access to avoid costly
+	 * cli/sti.  Otherwise we use an even cheaper single atomic write
+	 * to the APIC.
+	 */
+	unsigned int cfg;
+
+	/*
+	 * Wait for idle.
+	 */
+	apic_wait_icr_idle();
+
+	/*
+	 * No need to touch the target chip field
+	 */
+	cfg = __prepare_ICR(shortcut, vector);
+
+	/*
+	 * Send the IPI. The write to APIC_ICR fires this off.
+	 */
+	apic_write_around(APIC_ICR, cfg);
+}
+
+void send_IPI_self(int vector)
+{
+	__send_IPI_shortcut(APIC_DEST_SELF, vector);
+}
+
+/*
+ * This is used to send an IPI with no shorthand notation (the destination is
+ * specified in bits 56 to 63 of the ICR).
+ */
+static inline void __send_IPI_dest_field(unsigned long mask, int vector)
+{
+	unsigned long cfg;
+
+	/*
+	 * Wait for idle.
+	 */
+	if (unlikely(vector == NMI_VECTOR))
+		safe_apic_wait_icr_idle();
+	else
+		apic_wait_icr_idle();
+
+	/*
+	 * prepare target chip field
+	 */
+	cfg = __prepare_ICR2(mask);
+	apic_write_around(APIC_ICR2, cfg);
+
+	/*
+	 * program the ICR
+	 */
+	cfg = __prepare_ICR(0, vector);
+
+	/*
+	 * Send the IPI. The write to APIC_ICR fires this off.
+	 */
+	apic_write_around(APIC_ICR, cfg);
+}
+
+/*
+ * This is only used on smaller machines.
+ */
+void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
+{
+	unsigned long mask = cpus_addr(cpumask)[0];
+	unsigned long flags;
+
+	local_irq_save(flags);
+	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
+	__send_IPI_dest_field(mask, vector);
+	local_irq_restore(flags);
+}
+
+void send_IPI_mask_sequence(cpumask_t mask, int vector)
+{
+	unsigned long flags;
+	unsigned int query_cpu;
+
+	/*
+	 * Hack. The clustered APIC addressing mode doesn't allow us to send
+	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
+	 * should be modified to do 1 message per cluster ID - mbligh
+	 */
+
+	local_irq_save(flags);
+	for_each_possible_cpu(query_cpu) {
+		if (cpu_isset(query_cpu, mask)) {
+			__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
+					      vector);
+		}
+	}
+	local_irq_restore(flags);
+}
+
+/* must come after the send_IPI functions above for inlining */
+#include <mach_ipi.h>
+static int convert_apicid_to_cpu(int apic_id)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
+			return i;
+	}
+	return -1;
+}
+
+int safe_smp_processor_id(void)
+{
+	int apicid, cpuid;
+
+	if (!boot_cpu_has(X86_FEATURE_APIC))
+		return 0;
+
+	apicid = hard_smp_processor_id();
+	if (apicid == BAD_APICID)
+		return 0;
+
+	cpuid = convert_apicid_to_cpu(apicid);
+
+	return cpuid >= 0 ? cpuid : 0;
+}
+#endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index cef054b..6ea67b7 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -79,7 +79,7 @@
 
 	if (unlikely((unsigned)irq >= NR_IRQS)) {
 		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-					__FUNCTION__, irq);
+					__func__, irq);
 		BUG();
 	}
 
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
new file mode 100644
index 0000000..24362ec
--- /dev/null
+++ b/arch/x86/kernel/kgdb.c
@@ -0,0 +1,571 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+
+/*
+ * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com>
+ * Copyright (C) 2000-2001 VERITAS Software Corporation.
+ * Copyright (C) 2002 Andi Kleen, SuSE Labs
+ * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd.
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc.
+ */
+/****************************************************************************
+ *  Contributor:     Lake Stevens Instrument Division$
+ *  Written by:      Glenn Engel $
+ *  Updated by:	     Amit Kale<akale@veritas.com>
+ *  Updated by:	     Tom Rini <trini@kernel.crashing.org>
+ *  Updated by:	     Jason Wessel <jason.wessel@windriver.com>
+ *  Modified for 386 by Jim Kingdon, Cygnus Support.
+ *  Origianl kgdb, compatibility with 2.1.xx kernel by
+ *  David Grothe <dave@gcom.com>
+ *  Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
+ *  X86_64 changes from Andi Kleen's patch merged by Jim Houston
+ */
+#include <linux/spinlock.h>
+#include <linux/kdebug.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/kgdb.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/nmi.h>
+
+#include <asm/apicdef.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_X86_32
+# include <mach_ipi.h>
+#else
+# include <asm/mach_apic.h>
+#endif
+
+/*
+ * Put the error code here just in case the user cares:
+ */
+static int gdb_x86errcode;
+
+/*
+ * Likewise, the vector number here (since GDB only gets the signal
+ * number through the usual means, and that's not very specific):
+ */
+static int gdb_x86vector = -1;
+
+/**
+ *	pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs
+ *	@gdb_regs: A pointer to hold the registers in the order GDB wants.
+ *	@regs: The &struct pt_regs of the current process.
+ *
+ *	Convert the pt_regs in @regs into the format for registers that
+ *	GDB expects, stored in @gdb_regs.
+ */
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+	gdb_regs[GDB_AX]	= regs->ax;
+	gdb_regs[GDB_BX]	= regs->bx;
+	gdb_regs[GDB_CX]	= regs->cx;
+	gdb_regs[GDB_DX]	= regs->dx;
+	gdb_regs[GDB_SI]	= regs->si;
+	gdb_regs[GDB_DI]	= regs->di;
+	gdb_regs[GDB_BP]	= regs->bp;
+	gdb_regs[GDB_PS]	= regs->flags;
+	gdb_regs[GDB_PC]	= regs->ip;
+#ifdef CONFIG_X86_32
+	gdb_regs[GDB_DS]	= regs->ds;
+	gdb_regs[GDB_ES]	= regs->es;
+	gdb_regs[GDB_CS]	= regs->cs;
+	gdb_regs[GDB_SS]	= __KERNEL_DS;
+	gdb_regs[GDB_FS]	= 0xFFFF;
+	gdb_regs[GDB_GS]	= 0xFFFF;
+#else
+	gdb_regs[GDB_R8]	= regs->r8;
+	gdb_regs[GDB_R9]	= regs->r9;
+	gdb_regs[GDB_R10]	= regs->r10;
+	gdb_regs[GDB_R11]	= regs->r11;
+	gdb_regs[GDB_R12]	= regs->r12;
+	gdb_regs[GDB_R13]	= regs->r13;
+	gdb_regs[GDB_R14]	= regs->r14;
+	gdb_regs[GDB_R15]	= regs->r15;
+#endif
+	gdb_regs[GDB_SP]	= regs->sp;
+}
+
+/**
+ *	sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
+ *	@gdb_regs: A pointer to hold the registers in the order GDB wants.
+ *	@p: The &struct task_struct of the desired process.
+ *
+ *	Convert the register values of the sleeping process in @p to
+ *	the format that GDB expects.
+ *	This function is called when kgdb does not have access to the
+ *	&struct pt_regs and therefore it should fill the gdb registers
+ *	@gdb_regs with what has	been saved in &struct thread_struct
+ *	thread field during switch_to.
+ */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+	gdb_regs[GDB_AX]	= 0;
+	gdb_regs[GDB_BX]	= 0;
+	gdb_regs[GDB_CX]	= 0;
+	gdb_regs[GDB_DX]	= 0;
+	gdb_regs[GDB_SI]	= 0;
+	gdb_regs[GDB_DI]	= 0;
+	gdb_regs[GDB_BP]	= *(unsigned long *)p->thread.sp;
+#ifdef CONFIG_X86_32
+	gdb_regs[GDB_DS]	= __KERNEL_DS;
+	gdb_regs[GDB_ES]	= __KERNEL_DS;
+	gdb_regs[GDB_PS]	= 0;
+	gdb_regs[GDB_CS]	= __KERNEL_CS;
+	gdb_regs[GDB_PC]	= p->thread.ip;
+	gdb_regs[GDB_SS]	= __KERNEL_DS;
+	gdb_regs[GDB_FS]	= 0xFFFF;
+	gdb_regs[GDB_GS]	= 0xFFFF;
+#else
+	gdb_regs[GDB_PS]	= *(unsigned long *)(p->thread.sp + 8);
+	gdb_regs[GDB_PC]	= 0;
+	gdb_regs[GDB_R8]	= 0;
+	gdb_regs[GDB_R9]	= 0;
+	gdb_regs[GDB_R10]	= 0;
+	gdb_regs[GDB_R11]	= 0;
+	gdb_regs[GDB_R12]	= 0;
+	gdb_regs[GDB_R13]	= 0;
+	gdb_regs[GDB_R14]	= 0;
+	gdb_regs[GDB_R15]	= 0;
+#endif
+	gdb_regs[GDB_SP]	= p->thread.sp;
+}
+
+/**
+ *	gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs.
+ *	@gdb_regs: A pointer to hold the registers we've received from GDB.
+ *	@regs: A pointer to a &struct pt_regs to hold these values in.
+ *
+ *	Convert the GDB regs in @gdb_regs into the pt_regs, and store them
+ *	in @regs.
+ */
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+	regs->ax		= gdb_regs[GDB_AX];
+	regs->bx		= gdb_regs[GDB_BX];
+	regs->cx		= gdb_regs[GDB_CX];
+	regs->dx		= gdb_regs[GDB_DX];
+	regs->si		= gdb_regs[GDB_SI];
+	regs->di		= gdb_regs[GDB_DI];
+	regs->bp		= gdb_regs[GDB_BP];
+	regs->flags		= gdb_regs[GDB_PS];
+	regs->ip		= gdb_regs[GDB_PC];
+#ifdef CONFIG_X86_32
+	regs->ds		= gdb_regs[GDB_DS];
+	regs->es		= gdb_regs[GDB_ES];
+	regs->cs		= gdb_regs[GDB_CS];
+#else
+	regs->r8		= gdb_regs[GDB_R8];
+	regs->r9		= gdb_regs[GDB_R9];
+	regs->r10		= gdb_regs[GDB_R10];
+	regs->r11		= gdb_regs[GDB_R11];
+	regs->r12		= gdb_regs[GDB_R12];
+	regs->r13		= gdb_regs[GDB_R13];
+	regs->r14		= gdb_regs[GDB_R14];
+	regs->r15		= gdb_regs[GDB_R15];
+#endif
+}
+
+static struct hw_breakpoint {
+	unsigned		enabled;
+	unsigned		type;
+	unsigned		len;
+	unsigned long		addr;
+} breakinfo[4];
+
+static void kgdb_correct_hw_break(void)
+{
+	unsigned long dr7;
+	int correctit = 0;
+	int breakbit;
+	int breakno;
+
+	get_debugreg(dr7, 7);
+	for (breakno = 0; breakno < 4; breakno++) {
+		breakbit = 2 << (breakno << 1);
+		if (!(dr7 & breakbit) && breakinfo[breakno].enabled) {
+			correctit = 1;
+			dr7 |= breakbit;
+			dr7 &= ~(0xf0000 << (breakno << 2));
+			dr7 |= ((breakinfo[breakno].len << 2) |
+				 breakinfo[breakno].type) <<
+			       ((breakno << 2) + 16);
+			if (breakno >= 0 && breakno <= 3)
+				set_debugreg(breakinfo[breakno].addr, breakno);
+
+		} else {
+			if ((dr7 & breakbit) && !breakinfo[breakno].enabled) {
+				correctit = 1;
+				dr7 &= ~breakbit;
+				dr7 &= ~(0xf0000 << (breakno << 2));
+			}
+		}
+	}
+	if (correctit)
+		set_debugreg(dr7, 7);
+}
+
+static int
+kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
+{
+	int i;
+
+	for (i = 0; i < 4; i++)
+		if (breakinfo[i].addr == addr && breakinfo[i].enabled)
+			break;
+	if (i == 4)
+		return -1;
+
+	breakinfo[i].enabled = 0;
+
+	return 0;
+}
+
+static void kgdb_remove_all_hw_break(void)
+{
+	int i;
+
+	for (i = 0; i < 4; i++)
+		memset(&breakinfo[i], 0, sizeof(struct hw_breakpoint));
+}
+
+static int
+kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
+{
+	unsigned type;
+	int i;
+
+	for (i = 0; i < 4; i++)
+		if (!breakinfo[i].enabled)
+			break;
+	if (i == 4)
+		return -1;
+
+	switch (bptype) {
+	case BP_HARDWARE_BREAKPOINT:
+		type = 0;
+		len  = 1;
+		break;
+	case BP_WRITE_WATCHPOINT:
+		type = 1;
+		break;
+	case BP_ACCESS_WATCHPOINT:
+		type = 3;
+		break;
+	default:
+		return -1;
+	}
+
+	if (len == 1 || len == 2 || len == 4)
+		breakinfo[i].len  = len - 1;
+	else
+		return -1;
+
+	breakinfo[i].enabled = 1;
+	breakinfo[i].addr = addr;
+	breakinfo[i].type = type;
+
+	return 0;
+}
+
+/**
+ *	kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
+ *	@regs: Current &struct pt_regs.
+ *
+ *	This function will be called if the particular architecture must
+ *	disable hardware debugging while it is processing gdb packets or
+ *	handling exception.
+ */
+void kgdb_disable_hw_debug(struct pt_regs *regs)
+{
+	/* Disable hardware debugging while we are in kgdb: */
+	set_debugreg(0UL, 7);
+}
+
+/**
+ *	kgdb_post_primary_code - Save error vector/code numbers.
+ *	@regs: Original pt_regs.
+ *	@e_vector: Original error vector.
+ *	@err_code: Original error code.
+ *
+ *	This is needed on architectures which support SMP and KGDB.
+ *	This function is called after all the slave cpus have been put
+ *	to a know spin state and the primary CPU has control over KGDB.
+ */
+void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
+{
+	/* primary processor is completely in the debugger */
+	gdb_x86vector = e_vector;
+	gdb_x86errcode = err_code;
+}
+
+#ifdef CONFIG_SMP
+/**
+ *	kgdb_roundup_cpus - Get other CPUs into a holding pattern
+ *	@flags: Current IRQ state
+ *
+ *	On SMP systems, we need to get the attention of the other CPUs
+ *	and get them be in a known state.  This should do what is needed
+ *	to get the other CPUs to call kgdb_wait(). Note that on some arches,
+ *	the NMI approach is not used for rounding up all the CPUs. For example,
+ *	in case of MIPS, smp_call_function() is used to roundup CPUs. In
+ *	this case, we have to make sure that interrupts are enabled before
+ *	calling smp_call_function(). The argument to this function is
+ *	the flags that will be used when restoring the interrupts. There is
+ *	local_irq_save() call before kgdb_roundup_cpus().
+ *
+ *	On non-SMP systems, this is not called.
+ */
+void kgdb_roundup_cpus(unsigned long flags)
+{
+	send_IPI_allbutself(APIC_DM_NMI);
+}
+#endif
+
+/**
+ *	kgdb_arch_handle_exception - Handle architecture specific GDB packets.
+ *	@vector: The error vector of the exception that happened.
+ *	@signo: The signal number of the exception that happened.
+ *	@err_code: The error code of the exception that happened.
+ *	@remcom_in_buffer: The buffer of the packet we have read.
+ *	@remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ *	@regs: The &struct pt_regs of the current process.
+ *
+ *	This function MUST handle the 'c' and 's' command packets,
+ *	as well packets to set / remove a hardware breakpoint, if used.
+ *	If there are additional packets which the hardware needs to handle,
+ *	they are handled here.  The code should return -1 if it wants to
+ *	process more packets, and a %0 or %1 if it wants to exit from the
+ *	kgdb callback.
+ */
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+			       char *remcomInBuffer, char *remcomOutBuffer,
+			       struct pt_regs *linux_regs)
+{
+	unsigned long addr;
+	unsigned long dr6;
+	char *ptr;
+	int newPC;
+
+	switch (remcomInBuffer[0]) {
+	case 'c':
+	case 's':
+		/* try to read optional parameter, pc unchanged if no parm */
+		ptr = &remcomInBuffer[1];
+		if (kgdb_hex2long(&ptr, &addr))
+			linux_regs->ip = addr;
+	case 'D':
+	case 'k':
+		newPC = linux_regs->ip;
+
+		/* clear the trace bit */
+		linux_regs->flags &= ~X86_EFLAGS_TF;
+		atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+		/* set the trace bit if we're stepping */
+		if (remcomInBuffer[0] == 's') {
+			linux_regs->flags |= X86_EFLAGS_TF;
+			kgdb_single_step = 1;
+			if (kgdb_contthread) {
+				atomic_set(&kgdb_cpu_doing_single_step,
+					   raw_smp_processor_id());
+			}
+		}
+
+		get_debugreg(dr6, 6);
+		if (!(dr6 & 0x4000)) {
+			int breakno;
+
+			for (breakno = 0; breakno < 4; breakno++) {
+				if (dr6 & (1 << breakno) &&
+				    breakinfo[breakno].type == 0) {
+					/* Set restore flag: */
+					linux_regs->flags |= X86_EFLAGS_RF;
+					break;
+				}
+			}
+		}
+		set_debugreg(0UL, 6);
+		kgdb_correct_hw_break();
+
+		return 0;
+	}
+
+	/* this means that we do not want to exit from the handler: */
+	return -1;
+}
+
+static inline int
+single_step_cont(struct pt_regs *regs, struct die_args *args)
+{
+	/*
+	 * Single step exception from kernel space to user space so
+	 * eat the exception and continue the process:
+	 */
+	printk(KERN_ERR "KGDB: trap/step from kernel to user space, "
+			"resuming...\n");
+	kgdb_arch_handle_exception(args->trapnr, args->signr,
+				   args->err, "c", "", regs);
+
+	return NOTIFY_STOP;
+}
+
+static int was_in_debug_nmi[NR_CPUS];
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+	struct pt_regs *regs = args->regs;
+
+	switch (cmd) {
+	case DIE_NMI:
+		if (atomic_read(&kgdb_active) != -1) {
+			/* KGDB CPU roundup */
+			kgdb_nmicallback(raw_smp_processor_id(), regs);
+			was_in_debug_nmi[raw_smp_processor_id()] = 1;
+			touch_nmi_watchdog();
+			return NOTIFY_STOP;
+		}
+		return NOTIFY_DONE;
+
+	case DIE_NMI_IPI:
+		if (atomic_read(&kgdb_active) != -1) {
+			/* KGDB CPU roundup */
+			kgdb_nmicallback(raw_smp_processor_id(), regs);
+			was_in_debug_nmi[raw_smp_processor_id()] = 1;
+			touch_nmi_watchdog();
+		}
+		return NOTIFY_DONE;
+
+	case DIE_NMIUNKNOWN:
+		if (was_in_debug_nmi[raw_smp_processor_id()]) {
+			was_in_debug_nmi[raw_smp_processor_id()] = 0;
+			return NOTIFY_STOP;
+		}
+		return NOTIFY_DONE;
+
+	case DIE_NMIWATCHDOG:
+		if (atomic_read(&kgdb_active) != -1) {
+			/* KGDB CPU roundup: */
+			kgdb_nmicallback(raw_smp_processor_id(), regs);
+			return NOTIFY_STOP;
+		}
+		/* Enter debugger: */
+		break;
+
+	case DIE_DEBUG:
+		if (atomic_read(&kgdb_cpu_doing_single_step) ==
+			raw_smp_processor_id() &&
+			user_mode(regs))
+			return single_step_cont(regs, args);
+		/* fall through */
+	default:
+		if (user_mode(regs))
+			return NOTIFY_DONE;
+	}
+
+	if (kgdb_handle_exception(args->trapnr, args->signr, args->err, regs))
+		return NOTIFY_DONE;
+
+	/* Must touch watchdog before return to normal operation */
+	touch_nmi_watchdog();
+	return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+	unsigned long flags;
+	int ret;
+
+	local_irq_save(flags);
+	ret = __kgdb_notify(ptr, cmd);
+	local_irq_restore(flags);
+
+	return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+	.notifier_call	= kgdb_notify,
+
+	/*
+	 * Lowest-prio notifier priority, we want to be notified last:
+	 */
+	.priority	= -INT_MAX,
+};
+
+/**
+ *	kgdb_arch_init - Perform any architecture specific initalization.
+ *
+ *	This function will handle the initalization of any architecture
+ *	specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+	return register_die_notifier(&kgdb_notifier);
+}
+
+/**
+ *	kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ *	This function will handle the uninitalization of any architecture
+ *	specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+	unregister_die_notifier(&kgdb_notifier);
+}
+
+/**
+ *
+ *	kgdb_skipexception - Bail out of KGDB when we've been triggered.
+ *	@exception: Exception vector number
+ *	@regs: Current &struct pt_regs.
+ *
+ *	On some architectures we need to skip a breakpoint exception when
+ *	it occurs after a breakpoint has been removed.
+ *
+ * Skip an int3 exception when it occurs after a breakpoint has been
+ * removed. Backtrack eip by 1 since the int3 would have caused it to
+ * increment by 1.
+ */
+int kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+	if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) {
+		regs->ip -= 1;
+		return 1;
+	}
+	return 0;
+}
+
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+	if (exception == 3)
+		return instruction_pointer(regs) - 1;
+	return instruction_pointer(regs);
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+	/* Breakpoint instruction: */
+	.gdb_bpt_instr		= { 0xcc },
+	.flags			= KGDB_HW_BREAKPOINT,
+	.set_hw_breakpoint	= kgdb_set_hw_break,
+	.remove_hw_breakpoint	= kgdb_remove_hw_break,
+	.remove_all_hw_break	= kgdb_remove_all_hw_break,
+	.correct_hw_break	= kgdb_correct_hw_break,
+};
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 34a5912..b8c6743 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -410,13 +410,13 @@
 static void __kprobes clear_btf(void)
 {
 	if (test_thread_flag(TIF_DEBUGCTLMSR))
-		wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
+		update_debugctlmsr(0);
 }
 
 static void __kprobes restore_btf(void)
 {
 	if (test_thread_flag(TIF_DEBUGCTLMSR))
-		wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr);
+		update_debugctlmsr(current->thread.debugctlmsr);
 }
 
 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -489,7 +489,7 @@
 		break;
 	case KPROBE_HIT_SS:
 		if (p == kprobe_running()) {
-			regs->flags &= ~TF_MASK;
+			regs->flags &= ~X86_EFLAGS_TF;
 			regs->flags |= kcb->kprobe_saved_flags;
 			return 0;
 		} else {
@@ -858,15 +858,15 @@
 	if (!cur)
 		return 0;
 
+	resume_execution(cur, regs, kcb);
+	regs->flags |= kcb->kprobe_saved_flags;
+	trace_hardirqs_fixup_flags(regs->flags);
+
 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
 		cur->post_handler(cur, regs, 0);
 	}
 
-	resume_execution(cur, regs, kcb);
-	regs->flags |= kcb->kprobe_saved_flags;
-	trace_hardirqs_fixup_flags(regs->flags);
-
 	/* Restore back the original saved kprobes variables and continue. */
 	if (kcb->kprobe_status == KPROBE_REENTER) {
 		restore_previous_kprobe(kcb);
diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c
index 9482033..2dc1837 100644
--- a/arch/x86/kernel/mca_32.c
+++ b/arch/x86/kernel/mca_32.c
@@ -53,9 +53,9 @@
 #include <linux/init.h>
 #include <asm/arch_hooks.h>
 
-static unsigned char which_scsi = 0;
+static unsigned char which_scsi;
 
-int MCA_bus = 0;
+int MCA_bus;
 EXPORT_SYMBOL(MCA_bus);
 
 /*
@@ -68,15 +68,17 @@
 
 /* Build the status info for the adapter */
 
-static void mca_configure_adapter_status(struct mca_device *mca_dev) {
+static void mca_configure_adapter_status(struct mca_device *mca_dev)
+{
 	mca_dev->status = MCA_ADAPTER_NONE;
 
 	mca_dev->pos_id = mca_dev->pos[0]
 		+ (mca_dev->pos[1] << 8);
 
-	if(!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) {
+	if (!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) {
 
-		/* id = 0x0000 usually indicates hardware failure,
+		/*
+		 * id = 0x0000 usually indicates hardware failure,
 		 * however, ZP Gu (zpg@castle.net> reports that his 9556
 		 * has 0x0000 as id and everything still works. There
 		 * also seem to be an adapter with id = 0x0000; the
@@ -87,9 +89,10 @@
 		mca_dev->status = MCA_ADAPTER_ERROR;
 
 		return;
-	} else if(mca_dev->pos_id != 0xffff) {
+	} else if (mca_dev->pos_id != 0xffff) {
 
-		/* 0xffff usually indicates that there's no adapter,
+		/*
+		 * 0xffff usually indicates that there's no adapter,
 		 * however, some integrated adapters may have 0xffff as
 		 * their id and still be valid. Examples are on-board
 		 * VGA of the 55sx, the integrated SCSI of the 56 & 57,
@@ -99,19 +102,19 @@
 		mca_dev->status = MCA_ADAPTER_NORMAL;
 	}
 
-	if((mca_dev->pos_id == 0xffff ||
+	if ((mca_dev->pos_id == 0xffff ||
 	    mca_dev->pos_id == 0x0000) && mca_dev->slot >= MCA_MAX_SLOT_NR) {
 		int j;
 
-		for(j = 2; j < 8; j++) {
-			if(mca_dev->pos[j] != 0xff) {
+		for (j = 2; j < 8; j++) {
+			if (mca_dev->pos[j] != 0xff) {
 				mca_dev->status = MCA_ADAPTER_NORMAL;
 				break;
 			}
 		}
 	}
 
-	if(!(mca_dev->pos[2] & MCA_ENABLED)) {
+	if (!(mca_dev->pos[2] & MCA_ENABLED)) {
 
 		/* enabled bit is in POS 2 */
 
@@ -133,7 +136,7 @@
 
 #define MCA_STANDARD_RESOURCES	ARRAY_SIZE(mca_standard_resources)
 
-/**
+/*
  *	mca_read_and_store_pos - read the POS registers into a memory buffer
  *      @pos: a char pointer to 8 bytes, contains the POS register value on
  *            successful return
@@ -141,12 +144,14 @@
  *	Returns 1 if a card actually exists (i.e. the pos isn't
  *	all 0xff) or 0 otherwise
  */
-static int mca_read_and_store_pos(unsigned char *pos) {
+static int mca_read_and_store_pos(unsigned char *pos)
+{
 	int j;
 	int found = 0;
 
-	for(j=0; j<8; j++) {
-		if((pos[j] = inb_p(MCA_POS_REG(j))) != 0xff) {
+	for (j = 0; j < 8; j++) {
+		pos[j] = inb_p(MCA_POS_REG(j));
+		if (pos[j] != 0xff) {
 			/* 0xff all across means no device. 0x00 means
 			 * something's broken, but a device is
 			 * probably there.  However, if you get 0x00
@@ -167,11 +172,11 @@
 	unsigned char byte;
 	unsigned long flags;
 
-	if(reg < 0 || reg >= 8)
+	if (reg < 0 || reg >= 8)
 		return 0;
 
 	spin_lock_irqsave(&mca_lock, flags);
-	if(mca_dev->pos_register) {
+	if (mca_dev->pos_register) {
 		/* Disable adapter setup, enable motherboard setup */
 
 		outb_p(0, MCA_ADAPTER_SETUP_REG);
@@ -203,7 +208,7 @@
 {
 	unsigned long flags;
 
-	if(reg < 0 || reg >= 8)
+	if (reg < 0 || reg >= 8)
 		return;
 
 	spin_lock_irqsave(&mca_lock, flags);
@@ -227,17 +232,17 @@
 }
 
 /* for the primary MCA bus, we have identity transforms */
-static int mca_dummy_transform_irq(struct mca_device * mca_dev, int irq)
+static int mca_dummy_transform_irq(struct mca_device *mca_dev, int irq)
 {
 	return irq;
 }
 
-static int mca_dummy_transform_ioport(struct mca_device * mca_dev, int port)
+static int mca_dummy_transform_ioport(struct mca_device *mca_dev, int port)
 {
 	return port;
 }
 
-static void *mca_dummy_transform_memory(struct mca_device * mca_dev, void *mem)
+static void *mca_dummy_transform_memory(struct mca_device *mca_dev, void *mem)
 {
 	return mem;
 }
@@ -251,7 +256,8 @@
 	short mca_builtin_scsi_ports[] = {0xf7, 0xfd, 0x00};
 	struct mca_bus *bus;
 
-	/* WARNING: Be careful when making changes here. Putting an adapter
+	/*
+	 * WARNING: Be careful when making changes here. Putting an adapter
 	 * and the motherboard simultaneously into setup mode may result in
 	 * damage to chips (according to The Indispensible PC Hardware Book
 	 * by Hans-Peter Messmer). Also, we disable system interrupts (so
@@ -283,7 +289,7 @@
 
 	/* get the motherboard device */
 	mca_dev = kzalloc(sizeof(struct mca_device), GFP_KERNEL);
-	if(unlikely(!mca_dev))
+	if (unlikely(!mca_dev))
 		goto out_nomem;
 
 	/*
@@ -309,7 +315,7 @@
 	mca_register_device(MCA_PRIMARY_BUS, mca_dev);
 
 	mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
-	if(unlikely(!mca_dev))
+	if (unlikely(!mca_dev))
 		goto out_unlock_nomem;
 
 	/* Put motherboard into video setup mode, read integrated video
@@ -326,7 +332,8 @@
 	mca_dev->slot = MCA_INTEGVIDEO;
 	mca_register_device(MCA_PRIMARY_BUS, mca_dev);
 
-	/* Put motherboard into scsi setup mode, read integrated scsi
+	/*
+	 * Put motherboard into scsi setup mode, read integrated scsi
 	 * POS registers, and turn motherboard setup off.
 	 *
 	 * It seems there are two possible SCSI registers. Martin says that
@@ -338,18 +345,18 @@
 	 * machine.
 	 */
 
-	for(i = 0; (which_scsi = mca_builtin_scsi_ports[i]) != 0; i++) {
+	for (i = 0; (which_scsi = mca_builtin_scsi_ports[i]) != 0; i++) {
 		outb_p(which_scsi, MCA_MOTHERBOARD_SETUP_REG);
-		if(mca_read_and_store_pos(pos))
+		if (mca_read_and_store_pos(pos))
 			break;
 	}
-	if(which_scsi) {
+	if (which_scsi) {
 		/* found a scsi card */
 		mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
-		if(unlikely(!mca_dev))
+		if (unlikely(!mca_dev))
 			goto out_unlock_nomem;
 
-		for(j = 0; j < 8; j++)
+		for (j = 0; j < 8; j++)
 			mca_dev->pos[j] = pos[j];
 
 		mca_configure_adapter_status(mca_dev);
@@ -364,21 +371,22 @@
 
 	outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
 
-	/* Now loop over MCA slots: put each adapter into setup mode, and
+	/*
+	 * Now loop over MCA slots: put each adapter into setup mode, and
 	 * read its POS registers. Then put adapter setup off.
 	 */
 
-	for(i=0; i<MCA_MAX_SLOT_NR; i++) {
+	for (i = 0; i < MCA_MAX_SLOT_NR; i++) {
 		outb_p(0x8|(i&0xf), MCA_ADAPTER_SETUP_REG);
-		if(!mca_read_and_store_pos(pos))
+		if (!mca_read_and_store_pos(pos))
 			continue;
 
 		mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
-		if(unlikely(!mca_dev))
+		if (unlikely(!mca_dev))
 			goto out_unlock_nomem;
 
-		for(j=0; j<8; j++)
-			mca_dev->pos[j]=pos[j];
+		for (j = 0; j < 8; j++)
+			mca_dev->pos[j] = pos[j];
 
 		mca_dev->driver_loaded = 0;
 		mca_dev->slot = i;
@@ -414,20 +422,20 @@
 {
 	int slot = mca_dev->slot;
 
-	if(slot == MCA_INTEGSCSI) {
+	if (slot == MCA_INTEGSCSI) {
 		printk(KERN_CRIT "NMI: caused by MCA integrated SCSI adapter (%s)\n",
 			mca_dev->name);
-	} else if(slot == MCA_INTEGVIDEO) {
+	} else if (slot == MCA_INTEGVIDEO) {
 		printk(KERN_CRIT "NMI: caused by MCA integrated video adapter (%s)\n",
 			mca_dev->name);
-	} else if(slot == MCA_MOTHERBOARD) {
+	} else if (slot == MCA_MOTHERBOARD) {
 		printk(KERN_CRIT "NMI: caused by motherboard (%s)\n",
 			mca_dev->name);
 	}
 
 	/* More info available in POS 6 and 7? */
 
-	if(check_flag) {
+	if (check_flag) {
 		unsigned char pos6, pos7;
 
 		pos6 = mca_device_read_pos(mca_dev, 6);
@@ -447,8 +455,9 @@
 
 	pos5 = mca_device_read_pos(mca_dev, 5);
 
-	if(!(pos5 & 0x80)) {
-		/* Bit 7 of POS 5 is reset when this adapter has a hardware
+	if (!(pos5 & 0x80)) {
+		/*
+		 *  Bit 7 of POS 5 is reset when this adapter has a hardware
 		 * error. Bit 7 it reset if there's error information
 		 * available in POS 6 and 7.
 		 */
@@ -460,7 +469,8 @@
 
 void __kprobes mca_handle_nmi(void)
 {
-	/* First try - scan the various adapters and see if a specific
+	/*
+	 *  First try - scan the various adapters and see if a specific
 	 * adapter was responsible for the error.
 	 */
 	bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback);
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index f2702d0..25cf6de 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -290,7 +290,7 @@
 	}
 	return 0;
 find:
-	pr_debug("microcode: CPU %d found a matching microcode update with"
+	pr_debug("microcode: CPU%d found a matching microcode update with"
 		" version 0x%x (current=0x%x)\n", cpu, mc_header->rev,uci->rev);
 	new_mc = vmalloc(total_size);
 	if (!new_mc) {
@@ -336,11 +336,11 @@
 
 	spin_unlock_irqrestore(&microcode_update_lock, flags);
 	if (val[1] != uci->mc->hdr.rev) {
-		printk(KERN_ERR "microcode: CPU%d updated from revision "
+		printk(KERN_ERR "microcode: CPU%d update from revision "
 			"0x%x to 0x%x failed\n", cpu_num, uci->rev, val[1]);
 		return;
 	}
-	pr_debug("microcode: CPU%d updated from revision "
+	printk(KERN_INFO "microcode: CPU%d updated from revision "
 	       "0x%x to 0x%x, date = %08x \n", 
 	       cpu_num, uci->rev, val[1], uci->mc->hdr.date);
 	uci->rev = val[1];
@@ -534,7 +534,7 @@
 		c->x86, c->x86_model, c->x86_mask);
 	error = request_firmware(&firmware, name, &microcode_pdev->dev);
 	if (error) {
-		pr_debug("ucode data file %s load failed\n", name);
+		pr_debug("microcode: ucode data file %s load failed\n", name);
 		return error;
 	}
 	buf = firmware->data;
@@ -709,7 +709,7 @@
 	if (!cpu_online(cpu))
 		return 0;
 
-	pr_debug("Microcode:CPU %d added\n", cpu);
+	pr_debug("microcode: CPU%d added\n", cpu);
 	memset(uci, 0, sizeof(*uci));
 
 	err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
@@ -733,7 +733,7 @@
 	if (!cpu_online(cpu))
 		return 0;
 
-	pr_debug("Microcode:CPU %d removed\n", cpu);
+	pr_debug("microcode: CPU%d removed\n", cpu);
 	microcode_fini_cpu(cpu);
 	sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
 	return 0;
@@ -745,7 +745,7 @@
 
 	if (!cpu_online(cpu))
 		return 0;
-	pr_debug("Microcode:CPU %d resumed\n", cpu);
+	pr_debug("microcode: CPU%d resumed\n", cpu);
 	/* only CPU 0 will apply ucode here */
 	apply_microcode(0);
 	return 0;
@@ -783,7 +783,7 @@
 		}
 	case CPU_DOWN_FAILED_FROZEN:
 		if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group))
-			printk(KERN_ERR "Microcode: Failed to create the sysfs "
+			printk(KERN_ERR "microcode: Failed to create the sysfs "
 				"group for CPU%d\n", cpu);
 		break;
 	case CPU_DOWN_PREPARE:
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
new file mode 100644
index 0000000..70744e3
--- /dev/null
+++ b/arch/x86/kernel/mpparse.c
@@ -0,0 +1,1105 @@
+/*
+ *	Intel Multiprocessor Specification 1.1 and 1.4
+ *	compliant MP-table parsing routines.
+ *
+ *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
+ */
+
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/bitops.h>
+#include <linux/acpi.h>
+#include <linux/module.h>
+
+#include <asm/smp.h>
+#include <asm/mtrr.h>
+#include <asm/mpspec.h>
+#include <asm/pgalloc.h>
+#include <asm/io_apic.h>
+#include <asm/proto.h>
+#include <asm/acpi.h>
+#include <asm/bios_ebda.h>
+
+#include <mach_apic.h>
+#ifdef CONFIG_X86_32
+#include <mach_apicdef.h>
+#include <mach_mpparse.h>
+#endif
+
+/* Have we found an MP table */
+int smp_found_config;
+
+/*
+ * Various Linux-internal data structures created from the
+ * MP-table.
+ */
+#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
+int mp_bus_id_to_type[MAX_MP_BUSSES];
+#endif
+
+DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
+int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 };
+
+static int mp_current_pci_id;
+
+int pic_mode;
+
+/*
+ * Intel MP BIOS table parsing routines:
+ */
+
+/*
+ * Checksum an MP configuration block.
+ */
+
+static int __init mpf_checksum(unsigned char *mp, int len)
+{
+	int sum = 0;
+
+	while (len--)
+		sum += *mp++;
+
+	return sum & 0xFF;
+}
+
+#ifdef CONFIG_X86_NUMAQ
+/*
+ * Have to match translation table entries to main table entries by counter
+ * hence the mpc_record variable .... can't see a less disgusting way of
+ * doing this ....
+ */
+
+static int mpc_record;
+static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY]
+    __cpuinitdata;
+#endif
+
+static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
+{
+	int apicid;
+	char *bootup_cpu = "";
+
+	if (!(m->mpc_cpuflag & CPU_ENABLED)) {
+		disabled_cpus++;
+		return;
+	}
+#ifdef CONFIG_X86_NUMAQ
+	apicid = mpc_apic_id(m, translation_table[mpc_record]);
+#else
+	apicid = m->mpc_apicid;
+#endif
+	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
+		bootup_cpu = " (Bootup-CPU)";
+		boot_cpu_physical_apicid = m->mpc_apicid;
+	}
+
+	printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu);
+	generic_processor_info(apicid, m->mpc_apicver);
+}
+
+static void __init MP_bus_info(struct mpc_config_bus *m)
+{
+	char str[7];
+
+	memcpy(str, m->mpc_bustype, 6);
+	str[6] = 0;
+
+#ifdef CONFIG_X86_NUMAQ
+	mpc_oem_bus_info(m, str, translation_table[mpc_record]);
+#else
+	Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
+#endif
+
+#if MAX_MP_BUSSES < 256
+	if (m->mpc_busid >= MAX_MP_BUSSES) {
+		printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
+		       " is too large, max. supported is %d\n",
+		       m->mpc_busid, str, MAX_MP_BUSSES - 1);
+		return;
+	}
+#endif
+
+	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
+		 set_bit(m->mpc_busid, mp_bus_not_pci);
+#if defined(CONFIG_EISA) || defined (CONFIG_MCA)
+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
+#endif
+	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
+#ifdef CONFIG_X86_NUMAQ
+		mpc_oem_pci_bus(m, translation_table[mpc_record]);
+#endif
+		clear_bit(m->mpc_busid, mp_bus_not_pci);
+		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
+		mp_current_pci_id++;
+#if defined(CONFIG_EISA) || defined (CONFIG_MCA)
+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
+	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
+	} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) {
+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
+#endif
+	} else
+		printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
+}
+
+#ifdef CONFIG_X86_IO_APIC
+
+static int bad_ioapic(unsigned long address)
+{
+	if (nr_ioapics >= MAX_IO_APICS) {
+		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
+		       "(found %d)\n", MAX_IO_APICS, nr_ioapics);
+		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
+	}
+	if (!address) {
+		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
+		       " found in table, skipping!\n");
+		return 1;
+	}
+	return 0;
+}
+
+static void __init MP_ioapic_info(struct mpc_config_ioapic *m)
+{
+	if (!(m->mpc_flags & MPC_APIC_USABLE))
+		return;
+
+	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
+	       m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
+
+	if (bad_ioapic(m->mpc_apicaddr))
+		return;
+
+	mp_ioapics[nr_ioapics] = *m;
+	nr_ioapics++;
+}
+
+static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
+{
+	mp_irqs[mp_irq_entries] = *m;
+	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
+		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
+		m->mpc_irqtype, m->mpc_irqflag & 3,
+		(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
+		m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
+		panic("Max # of irq sources exceeded!!\n");
+}
+
+#endif
+
+static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
+{
+	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
+		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
+		m->mpc_irqtype, m->mpc_irqflag & 3,
+		(m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid,
+		m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
+}
+
+#ifdef CONFIG_X86_NUMAQ
+static void __init MP_translation_info(struct mpc_config_translation *m)
+{
+	printk(KERN_INFO
+	       "Translation: record %d, type %d, quad %d, global %d, local %d\n",
+	       mpc_record, m->trans_type, m->trans_quad, m->trans_global,
+	       m->trans_local);
+
+	if (mpc_record >= MAX_MPC_ENTRY)
+		printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
+	else
+		translation_table[mpc_record] = m;	/* stash this for later */
+	if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
+		node_set_online(m->trans_quad);
+}
+
+/*
+ * Read/parse the MPC oem tables
+ */
+
+static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
+				    unsigned short oemsize)
+{
+	int count = sizeof(*oemtable);	/* the header size */
+	unsigned char *oemptr = ((unsigned char *)oemtable) + count;
+
+	mpc_record = 0;
+	printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
+	       oemtable);
+	if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) {
+		printk(KERN_WARNING
+		       "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
+		       oemtable->oem_signature[0], oemtable->oem_signature[1],
+		       oemtable->oem_signature[2], oemtable->oem_signature[3]);
+		return;
+	}
+	if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) {
+		printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
+		return;
+	}
+	while (count < oemtable->oem_length) {
+		switch (*oemptr) {
+		case MP_TRANSLATION:
+			{
+				struct mpc_config_translation *m =
+				    (struct mpc_config_translation *)oemptr;
+				MP_translation_info(m);
+				oemptr += sizeof(*m);
+				count += sizeof(*m);
+				++mpc_record;
+				break;
+			}
+		default:
+			{
+				printk(KERN_WARNING
+				       "Unrecognised OEM table entry type! - %d\n",
+				       (int)*oemptr);
+				return;
+			}
+		}
+	}
+}
+
+static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
+				 char *productid)
+{
+	if (strncmp(oem, "IBM NUMA", 8))
+		printk("Warning!  May not be a NUMA-Q system!\n");
+	if (mpc->mpc_oemptr)
+		smp_read_mpc_oem((struct mp_config_oemtable *)mpc->mpc_oemptr,
+				 mpc->mpc_oemsize);
+}
+#endif /* CONFIG_X86_NUMAQ */
+
+/*
+ * Read/parse the MPC
+ */
+
+static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
+{
+	char str[16];
+	char oem[10];
+	int count = sizeof(*mpc);
+	unsigned char *mpt = ((unsigned char *)mpc) + count;
+
+	if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) {
+		printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n",
+		       mpc->mpc_signature[0], mpc->mpc_signature[1],
+		       mpc->mpc_signature[2], mpc->mpc_signature[3]);
+		return 0;
+	}
+	if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) {
+		printk(KERN_ERR "MPTABLE: checksum error!\n");
+		return 0;
+	}
+	if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) {
+		printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
+		       mpc->mpc_spec);
+		return 0;
+	}
+	if (!mpc->mpc_lapic) {
+		printk(KERN_ERR "MPTABLE: null local APIC address!\n");
+		return 0;
+	}
+	memcpy(oem, mpc->mpc_oem, 8);
+	oem[8] = 0;
+	printk(KERN_INFO "MPTABLE: OEM ID: %s ", oem);
+
+	memcpy(str, mpc->mpc_productid, 12);
+	str[12] = 0;
+	printk("Product ID: %s ", str);
+
+#ifdef CONFIG_X86_32
+	mps_oem_check(mpc, oem, str);
+#endif
+	printk(KERN_INFO "MPTABLE: Product ID: %s ", str);
+
+	printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic);
+
+	/* save the local APIC address, it might be non-default */
+	if (!acpi_lapic)
+		mp_lapic_addr = mpc->mpc_lapic;
+
+	if (early)
+		return 1;
+
+	/*
+	 *      Now process the configuration blocks.
+	 */
+#ifdef CONFIG_X86_NUMAQ
+	mpc_record = 0;
+#endif
+	while (count < mpc->mpc_length) {
+		switch (*mpt) {
+		case MP_PROCESSOR:
+			{
+				struct mpc_config_processor *m =
+				    (struct mpc_config_processor *)mpt;
+				/* ACPI may have already provided this data */
+				if (!acpi_lapic)
+					MP_processor_info(m);
+				mpt += sizeof(*m);
+				count += sizeof(*m);
+				break;
+			}
+		case MP_BUS:
+			{
+				struct mpc_config_bus *m =
+				    (struct mpc_config_bus *)mpt;
+				MP_bus_info(m);
+				mpt += sizeof(*m);
+				count += sizeof(*m);
+				break;
+			}
+		case MP_IOAPIC:
+			{
+#ifdef CONFIG_X86_IO_APIC
+				struct mpc_config_ioapic *m =
+				    (struct mpc_config_ioapic *)mpt;
+				MP_ioapic_info(m);
+#endif
+				mpt += sizeof(struct mpc_config_ioapic);
+				count += sizeof(struct mpc_config_ioapic);
+				break;
+			}
+		case MP_INTSRC:
+			{
+#ifdef CONFIG_X86_IO_APIC
+				struct mpc_config_intsrc *m =
+				    (struct mpc_config_intsrc *)mpt;
+
+				MP_intsrc_info(m);
+#endif
+				mpt += sizeof(struct mpc_config_intsrc);
+				count += sizeof(struct mpc_config_intsrc);
+				break;
+			}
+		case MP_LINTSRC:
+			{
+				struct mpc_config_lintsrc *m =
+				    (struct mpc_config_lintsrc *)mpt;
+				MP_lintsrc_info(m);
+				mpt += sizeof(*m);
+				count += sizeof(*m);
+				break;
+			}
+		default:
+			/* wrong mptable */
+			printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n");
+			printk(KERN_ERR "type %x\n", *mpt);
+			print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
+					1, mpc, mpc->mpc_length, 1);
+			count = mpc->mpc_length;
+			break;
+		}
+#ifdef CONFIG_X86_NUMAQ
+		++mpc_record;
+#endif
+	}
+	setup_apic_routing();
+	if (!num_processors)
+		printk(KERN_ERR "MPTABLE: no processors registered!\n");
+	return num_processors;
+}
+
+#ifdef CONFIG_X86_IO_APIC
+
+static int __init ELCR_trigger(unsigned int irq)
+{
+	unsigned int port;
+
+	port = 0x4d0 + (irq >> 3);
+	return (inb(port) >> (irq & 7)) & 1;
+}
+
+static void __init construct_default_ioirq_mptable(int mpc_default_type)
+{
+	struct mpc_config_intsrc intsrc;
+	int i;
+	int ELCR_fallback = 0;
+
+	intsrc.mpc_type = MP_INTSRC;
+	intsrc.mpc_irqflag = 0;	/* conforming */
+	intsrc.mpc_srcbus = 0;
+	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
+
+	intsrc.mpc_irqtype = mp_INT;
+
+	/*
+	 *  If true, we have an ISA/PCI system with no IRQ entries
+	 *  in the MP table. To prevent the PCI interrupts from being set up
+	 *  incorrectly, we try to use the ELCR. The sanity check to see if
+	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
+	 *  never be level sensitive, so we simply see if the ELCR agrees.
+	 *  If it does, we assume it's valid.
+	 */
+	if (mpc_default_type == 5) {
+		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... "
+		       "falling back to ELCR\n");
+
+		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
+		    ELCR_trigger(13))
+			printk(KERN_ERR "ELCR contains invalid data... "
+			       "not using ELCR\n");
+		else {
+			printk(KERN_INFO
+			       "Using ELCR to identify PCI interrupts\n");
+			ELCR_fallback = 1;
+		}
+	}
+
+	for (i = 0; i < 16; i++) {
+		switch (mpc_default_type) {
+		case 2:
+			if (i == 0 || i == 13)
+				continue;	/* IRQ0 & IRQ13 not connected */
+			/* fall through */
+		default:
+			if (i == 2)
+				continue;	/* IRQ2 is never connected */
+		}
+
+		if (ELCR_fallback) {
+			/*
+			 *  If the ELCR indicates a level-sensitive interrupt, we
+			 *  copy that information over to the MP table in the
+			 *  irqflag field (level sensitive, active high polarity).
+			 */
+			if (ELCR_trigger(i))
+				intsrc.mpc_irqflag = 13;
+			else
+				intsrc.mpc_irqflag = 0;
+		}
+
+		intsrc.mpc_srcbusirq = i;
+		intsrc.mpc_dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
+		MP_intsrc_info(&intsrc);
+	}
+
+	intsrc.mpc_irqtype = mp_ExtINT;
+	intsrc.mpc_srcbusirq = 0;
+	intsrc.mpc_dstirq = 0;	/* 8259A to INTIN0 */
+	MP_intsrc_info(&intsrc);
+}
+
+#endif
+
+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
+{
+	struct mpc_config_processor processor;
+	struct mpc_config_bus bus;
+#ifdef CONFIG_X86_IO_APIC
+	struct mpc_config_ioapic ioapic;
+#endif
+	struct mpc_config_lintsrc lintsrc;
+	int linttypes[2] = { mp_ExtINT, mp_NMI };
+	int i;
+
+	/*
+	 * local APIC has default address
+	 */
+	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+
+	/*
+	 * 2 CPUs, numbered 0 & 1.
+	 */
+	processor.mpc_type = MP_PROCESSOR;
+	/* Either an integrated APIC or a discrete 82489DX. */
+	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
+	processor.mpc_cpuflag = CPU_ENABLED;
+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
+	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
+	processor.mpc_reserved[0] = 0;
+	processor.mpc_reserved[1] = 0;
+	for (i = 0; i < 2; i++) {
+		processor.mpc_apicid = i;
+		MP_processor_info(&processor);
+	}
+
+	bus.mpc_type = MP_BUS;
+	bus.mpc_busid = 0;
+	switch (mpc_default_type) {
+	default:
+		printk(KERN_ERR "???\nUnknown standard configuration %d\n",
+		       mpc_default_type);
+		/* fall through */
+	case 1:
+	case 5:
+		memcpy(bus.mpc_bustype, "ISA   ", 6);
+		break;
+	case 2:
+	case 6:
+	case 3:
+		memcpy(bus.mpc_bustype, "EISA  ", 6);
+		break;
+	case 4:
+	case 7:
+		memcpy(bus.mpc_bustype, "MCA   ", 6);
+	}
+	MP_bus_info(&bus);
+	if (mpc_default_type > 4) {
+		bus.mpc_busid = 1;
+		memcpy(bus.mpc_bustype, "PCI   ", 6);
+		MP_bus_info(&bus);
+	}
+
+#ifdef CONFIG_X86_IO_APIC
+	ioapic.mpc_type = MP_IOAPIC;
+	ioapic.mpc_apicid = 2;
+	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
+	ioapic.mpc_flags = MPC_APIC_USABLE;
+	ioapic.mpc_apicaddr = 0xFEC00000;
+	MP_ioapic_info(&ioapic);
+
+	/*
+	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
+	 */
+	construct_default_ioirq_mptable(mpc_default_type);
+#endif
+	lintsrc.mpc_type = MP_LINTSRC;
+	lintsrc.mpc_irqflag = 0;	/* conforming */
+	lintsrc.mpc_srcbusid = 0;
+	lintsrc.mpc_srcbusirq = 0;
+	lintsrc.mpc_destapic = MP_APIC_ALL;
+	for (i = 0; i < 2; i++) {
+		lintsrc.mpc_irqtype = linttypes[i];
+		lintsrc.mpc_destapiclint = i;
+		MP_lintsrc_info(&lintsrc);
+	}
+}
+
+static struct intel_mp_floating *mpf_found;
+
+/*
+ * Scan the memory blocks for an SMP configuration block.
+ */
+static void __init __get_smp_config(unsigned early)
+{
+	struct intel_mp_floating *mpf = mpf_found;
+
+	if (acpi_lapic && early)
+		return;
+	/*
+	 * ACPI supports both logical (e.g. Hyper-Threading) and physical
+	 * processors, where MPS only supports physical.
+	 */
+	if (acpi_lapic && acpi_ioapic) {
+		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
+		       "information\n");
+		return;
+	} else if (acpi_lapic)
+		printk(KERN_INFO "Using ACPI for processor (LAPIC) "
+		       "configuration information\n");
+
+	printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
+	       mpf->mpf_specification);
+#ifdef CONFIG_X86_32
+	if (mpf->mpf_feature2 & (1 << 7)) {
+		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
+		pic_mode = 1;
+	} else {
+		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
+		pic_mode = 0;
+	}
+#endif
+	/*
+	 * Now see if we need to read further.
+	 */
+	if (mpf->mpf_feature1 != 0) {
+		if (early) {
+			/*
+			 * local APIC has default address
+			 */
+			mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+			return;
+		}
+
+		printk(KERN_INFO "Default MP configuration #%d\n",
+		       mpf->mpf_feature1);
+		construct_default_ISA_mptable(mpf->mpf_feature1);
+
+	} else if (mpf->mpf_physptr) {
+
+		/*
+		 * Read the physical hardware table.  Anything here will
+		 * override the defaults.
+		 */
+		if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) {
+			smp_found_config = 0;
+			printk(KERN_ERR
+			       "BIOS bug, MP table errors detected!...\n");
+			printk(KERN_ERR "... disabling SMP support. "
+			       "(tell your hw vendor)\n");
+			return;
+		}
+
+		if (early)
+			return;
+#ifdef CONFIG_X86_IO_APIC
+		/*
+		 * If there are no explicit MP IRQ entries, then we are
+		 * broken.  We set up most of the low 16 IO-APIC pins to
+		 * ISA defaults and hope it will work.
+		 */
+		if (!mp_irq_entries) {
+			struct mpc_config_bus bus;
+
+			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
+			       "using default mptable. "
+			       "(tell your hw vendor)\n");
+
+			bus.mpc_type = MP_BUS;
+			bus.mpc_busid = 0;
+			memcpy(bus.mpc_bustype, "ISA   ", 6);
+			MP_bus_info(&bus);
+
+			construct_default_ioirq_mptable(0);
+		}
+#endif
+	} else
+		BUG();
+
+	if (!early)
+		printk(KERN_INFO "Processors: %d\n", num_processors);
+	/*
+	 * Only use the first configuration found.
+	 */
+}
+
+void __init early_get_smp_config(void)
+{
+	__get_smp_config(1);
+}
+
+void __init get_smp_config(void)
+{
+	__get_smp_config(0);
+}
+
+static int __init smp_scan_config(unsigned long base, unsigned long length,
+				  unsigned reserve)
+{
+	extern void __bad_mpf_size(void);
+	unsigned int *bp = phys_to_virt(base);
+	struct intel_mp_floating *mpf;
+
+	Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length);
+	if (sizeof(*mpf) != 16)
+		__bad_mpf_size();
+
+	while (length > 0) {
+		mpf = (struct intel_mp_floating *)bp;
+		if ((*bp == SMP_MAGIC_IDENT) &&
+		    (mpf->mpf_length == 1) &&
+		    !mpf_checksum((unsigned char *)bp, 16) &&
+		    ((mpf->mpf_specification == 1)
+		     || (mpf->mpf_specification == 4))) {
+
+			smp_found_config = 1;
+			mpf_found = mpf;
+#ifdef CONFIG_X86_32
+			printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
+			       mpf, virt_to_phys(mpf));
+			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
+					BOOTMEM_DEFAULT);
+			if (mpf->mpf_physptr) {
+				/*
+				 * We cannot access to MPC table to compute
+				 * table size yet, as only few megabytes from
+				 * the bottom is mapped now.
+				 * PC-9800's MPC table places on the very last
+				 * of physical memory; so that simply reserving
+				 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
+				 * in reserve_bootmem.
+				 */
+				unsigned long size = PAGE_SIZE;
+				unsigned long end = max_low_pfn * PAGE_SIZE;
+				if (mpf->mpf_physptr + size > end)
+					size = end - mpf->mpf_physptr;
+				reserve_bootmem(mpf->mpf_physptr, size,
+						BOOTMEM_DEFAULT);
+			}
+
+#else
+			if (!reserve)
+				return 1;
+
+			reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE);
+			if (mpf->mpf_physptr)
+				reserve_bootmem_generic(mpf->mpf_physptr,
+							PAGE_SIZE);
+#endif
+		return 1;
+		}
+		bp += 4;
+		length -= 16;
+	}
+	return 0;
+}
+
+static void __init __find_smp_config(unsigned reserve)
+{
+	unsigned int address;
+
+	/*
+	 * FIXME: Linux assumes you have 640K of base ram..
+	 * this continues the error...
+	 *
+	 * 1) Scan the bottom 1K for a signature
+	 * 2) Scan the top 1K of base RAM
+	 * 3) Scan the 64K of bios
+	 */
+	if (smp_scan_config(0x0, 0x400, reserve) ||
+	    smp_scan_config(639 * 0x400, 0x400, reserve) ||
+	    smp_scan_config(0xF0000, 0x10000, reserve))
+		return;
+	/*
+	 * If it is an SMP machine we should know now, unless the
+	 * configuration is in an EISA/MCA bus machine with an
+	 * extended bios data area.
+	 *
+	 * there is a real-mode segmented pointer pointing to the
+	 * 4K EBDA area at 0x40E, calculate and scan it here.
+	 *
+	 * NOTE! There are Linux loaders that will corrupt the EBDA
+	 * area, and as such this kind of SMP config may be less
+	 * trustworthy, simply because the SMP table may have been
+	 * stomped on during early boot. These loaders are buggy and
+	 * should be fixed.
+	 *
+	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
+	 */
+
+	address = get_bios_ebda();
+	if (address)
+		smp_scan_config(address, 0x400, reserve);
+}
+
+void __init early_find_smp_config(void)
+{
+	__find_smp_config(0);
+}
+
+void __init find_smp_config(void)
+{
+	__find_smp_config(1);
+}
+
+/* --------------------------------------------------------------------------
+                            ACPI-based MP Configuration
+   -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI
+
+#ifdef	CONFIG_X86_IO_APIC
+
+#define MP_ISA_BUS		0
+#define MP_MAX_IOAPIC_PIN	127
+
+extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
+
+static int mp_find_ioapic(int gsi)
+{
+	int i = 0;
+
+	/* Find the IOAPIC that manages this GSI. */
+	for (i = 0; i < nr_ioapics; i++) {
+		if ((gsi >= mp_ioapic_routing[i].gsi_base)
+		    && (gsi <= mp_ioapic_routing[i].gsi_end))
+			return i;
+	}
+
+	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
+	return -1;
+}
+
+static u8 uniq_ioapic_id(u8 id)
+{
+#ifdef CONFIG_X86_32
+	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+	    !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
+		return io_apic_get_unique_id(nr_ioapics, id);
+	else
+		return id;
+#else
+	int i;
+	DECLARE_BITMAP(used, 256);
+	bitmap_zero(used, 256);
+	for (i = 0; i < nr_ioapics; i++) {
+		struct mpc_config_ioapic *ia = &mp_ioapics[i];
+		__set_bit(ia->mpc_apicid, used);
+	}
+	if (!test_bit(id, used))
+		return id;
+	return find_first_zero_bit(used, 256);
+#endif
+}
+
+void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
+{
+	int idx = 0;
+
+	if (bad_ioapic(address))
+		return;
+
+	idx = nr_ioapics;
+
+	mp_ioapics[idx].mpc_type = MP_IOAPIC;
+	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
+	mp_ioapics[idx].mpc_apicaddr = address;
+
+	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
+	mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
+#ifdef CONFIG_X86_32
+	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
+#else
+	mp_ioapics[idx].mpc_apicver = 0;
+#endif
+	/*
+	 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
+	 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
+	 */
+	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
+	mp_ioapic_routing[idx].gsi_base = gsi_base;
+	mp_ioapic_routing[idx].gsi_end = gsi_base +
+	    io_apic_get_redir_entries(idx);
+
+	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
+	       "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
+	       mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
+	       mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
+
+	nr_ioapics++;
+}
+
+void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
+{
+	struct mpc_config_intsrc intsrc;
+	int ioapic = -1;
+	int pin = -1;
+
+	/*
+	 * Convert 'gsi' to 'ioapic.pin'.
+	 */
+	ioapic = mp_find_ioapic(gsi);
+	if (ioapic < 0)
+		return;
+	pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+
+	/*
+	 * TBD: This check is for faulty timer entries, where the override
+	 *      erroneously sets the trigger to level, resulting in a HUGE
+	 *      increase of timer interrupts!
+	 */
+	if ((bus_irq == 0) && (trigger == 3))
+		trigger = 1;
+
+	intsrc.mpc_type = MP_INTSRC;
+	intsrc.mpc_irqtype = mp_INT;
+	intsrc.mpc_irqflag = (trigger << 2) | polarity;
+	intsrc.mpc_srcbus = MP_ISA_BUS;
+	intsrc.mpc_srcbusirq = bus_irq;	/* IRQ */
+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	/* APIC ID */
+	intsrc.mpc_dstirq = pin;	/* INTIN# */
+
+	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
+		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
+		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
+		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
+
+	mp_irqs[mp_irq_entries] = intsrc;
+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
+		panic("Max # of irq sources exceeded!\n");
+}
+
+int es7000_plat;
+
+void __init mp_config_acpi_legacy_irqs(void)
+{
+	struct mpc_config_intsrc intsrc;
+	int i = 0;
+	int ioapic = -1;
+
+#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
+	/*
+	 * Fabricate the legacy ISA bus (bus #31).
+	 */
+	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
+#endif
+	set_bit(MP_ISA_BUS, mp_bus_not_pci);
+	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
+
+	/*
+	 * Older generations of ES7000 have no legacy identity mappings
+	 */
+	if (es7000_plat == 1)
+		return;
+
+	/*
+	 * Locate the IOAPIC that manages the ISA IRQs (0-15).
+	 */
+	ioapic = mp_find_ioapic(0);
+	if (ioapic < 0)
+		return;
+
+	intsrc.mpc_type = MP_INTSRC;
+	intsrc.mpc_irqflag = 0;	/* Conforming */
+	intsrc.mpc_srcbus = MP_ISA_BUS;
+#ifdef CONFIG_X86_IO_APIC
+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
+#endif
+	/*
+	 * Use the default configuration for the IRQs 0-15.  Unless
+	 * overridden by (MADT) interrupt source override entries.
+	 */
+	for (i = 0; i < 16; i++) {
+		int idx;
+
+		for (idx = 0; idx < mp_irq_entries; idx++) {
+			struct mpc_config_intsrc *irq = mp_irqs + idx;
+
+			/* Do we already have a mapping for this ISA IRQ? */
+			if (irq->mpc_srcbus == MP_ISA_BUS
+			    && irq->mpc_srcbusirq == i)
+				break;
+
+			/* Do we already have a mapping for this IOAPIC pin */
+			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
+			    (irq->mpc_dstirq == i))
+				break;
+		}
+
+		if (idx != mp_irq_entries) {
+			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
+			continue;	/* IRQ already used */
+		}
+
+		intsrc.mpc_irqtype = mp_INT;
+		intsrc.mpc_srcbusirq = i;	/* Identity mapped */
+		intsrc.mpc_dstirq = i;
+
+		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
+			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
+			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
+			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
+			intsrc.mpc_dstirq);
+
+		mp_irqs[mp_irq_entries] = intsrc;
+		if (++mp_irq_entries == MAX_IRQ_SOURCES)
+			panic("Max # of irq sources exceeded!\n");
+	}
+}
+
+int mp_register_gsi(u32 gsi, int triggering, int polarity)
+{
+	int ioapic = -1;
+	int ioapic_pin = 0;
+	int idx, bit = 0;
+#ifdef CONFIG_X86_32
+#define MAX_GSI_NUM	4096
+#define IRQ_COMPRESSION_START	64
+
+	static int pci_irq = IRQ_COMPRESSION_START;
+	/*
+	 * Mapping between Global System Interrupts, which
+	 * represent all possible interrupts, and IRQs
+	 * assigned to actual devices.
+	 */
+	static int gsi_to_irq[MAX_GSI_NUM];
+#else
+
+	if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
+		return gsi;
+#endif
+
+	/* Don't set up the ACPI SCI because it's already set up */
+	if (acpi_gbl_FADT.sci_interrupt == gsi)
+		return gsi;
+
+	ioapic = mp_find_ioapic(gsi);
+	if (ioapic < 0) {
+		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
+		return gsi;
+	}
+
+	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+
+#ifdef CONFIG_X86_32
+	if (ioapic_renumber_irq)
+		gsi = ioapic_renumber_irq(ioapic, gsi);
+#endif
+
+	/*
+	 * Avoid pin reprogramming.  PRTs typically include entries
+	 * with redundant pin->gsi mappings (but unique PCI devices);
+	 * we only program the IOAPIC on the first.
+	 */
+	bit = ioapic_pin % 32;
+	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
+	if (idx > 3) {
+		printk(KERN_ERR "Invalid reference to IOAPIC pin "
+		       "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
+		       ioapic_pin);
+		return gsi;
+	}
+	if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
+		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
+			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
+#ifdef CONFIG_X86_32
+		return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
+#else
+		return gsi;
+#endif
+	}
+
+	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit);
+#ifdef CONFIG_X86_32
+	/*
+	 * For GSI >= 64, use IRQ compression
+	 */
+	if ((gsi >= IRQ_COMPRESSION_START)
+	    && (triggering == ACPI_LEVEL_SENSITIVE)) {
+		/*
+		 * For PCI devices assign IRQs in order, avoiding gaps
+		 * due to unused I/O APIC pins.
+		 */
+		int irq = gsi;
+		if (gsi < MAX_GSI_NUM) {
+			/*
+			 * Retain the VIA chipset work-around (gsi > 15), but
+			 * avoid a problem where the 8254 timer (IRQ0) is setup
+			 * via an override (so it's not on pin 0 of the ioapic),
+			 * and at the same time, the pin 0 interrupt is a PCI
+			 * type.  The gsi > 15 test could cause these two pins
+			 * to be shared as IRQ0, and they are not shareable.
+			 * So test for this condition, and if necessary, avoid
+			 * the pin collision.
+			 */
+			gsi = pci_irq++;
+			/*
+			 * Don't assign IRQ used by ACPI SCI
+			 */
+			if (gsi == acpi_gbl_FADT.sci_interrupt)
+				gsi = pci_irq++;
+			gsi_to_irq[irq] = gsi;
+		} else {
+			printk(KERN_ERR "GSI %u is too high\n", gsi);
+			return gsi;
+		}
+	}
+#endif
+	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
+				triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
+				polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+	return gsi;
+}
+
+#endif /* CONFIG_X86_IO_APIC */
+#endif /* CONFIG_ACPI */
diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c
deleted file mode 100644
index f349e68..0000000
--- a/arch/x86/kernel/mpparse_32.c
+++ /dev/null
@@ -1,1139 +0,0 @@
-/*
- *	Intel Multiprocessor Specification 1.1 and 1.4
- *	compliant MP-table parsing routines.
- *
- *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
- *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
- *
- *	Fixes
- *		Erich Boleyn	:	MP v1.4 and additional changes.
- *		Alan Cox	:	Added EBDA scanning
- *		Ingo Molnar	:	various cleanups and rewrites
- *		Maciej W. Rozycki:	Bits for default MP configurations
- *		Paul Diefenbaugh:	Added full ACPI support
- */
-
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/kernel_stat.h>
-#include <linux/mc146818rtc.h>
-#include <linux/bitops.h>
-
-#include <asm/smp.h>
-#include <asm/acpi.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
-#include <asm/io_apic.h>
-
-#include <mach_apic.h>
-#include <mach_apicdef.h>
-#include <mach_mpparse.h>
-#include <bios_ebda.h>
-
-/* Have we found an MP table */
-int smp_found_config;
-unsigned int __cpuinitdata maxcpus = NR_CPUS;
-
-/*
- * Various Linux-internal data structures created from the
- * MP-table.
- */
-int apic_version [MAX_APICS];
-int mp_bus_id_to_type [MAX_MP_BUSSES];
-int mp_bus_id_to_node [MAX_MP_BUSSES];
-int mp_bus_id_to_local [MAX_MP_BUSSES];
-int quad_local_to_mp_bus_id [NR_CPUS/4][4];
-int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-static int mp_current_pci_id;
-
-/* I/O APIC entries */
-struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-
-/* # of MP IRQ source entries */
-struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-
-/* MP IRQ source entries */
-int mp_irq_entries;
-
-int nr_ioapics;
-
-int pic_mode;
-unsigned long mp_lapic_addr;
-
-unsigned int def_to_bigsmp = 0;
-
-/* Processor that is doing the boot up */
-unsigned int boot_cpu_physical_apicid = -1U;
-/* Internal processor count */
-unsigned int num_processors;
-
-/* Bitmask of physically existing CPUs */
-physid_mask_t phys_cpu_present_map;
-
-u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-
-/*
- * Intel MP BIOS table parsing routines:
- */
-
-
-/*
- * Checksum an MP configuration block.
- */
-
-static int __init mpf_checksum(unsigned char *mp, int len)
-{
-	int sum = 0;
-
-	while (len--)
-		sum += *mp++;
-
-	return sum & 0xFF;
-}
-
-/*
- * Have to match translation table entries to main table entries by counter
- * hence the mpc_record variable .... can't see a less disgusting way of
- * doing this ....
- */
-
-static int mpc_record; 
-static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata;
-
-static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
-{
- 	int ver, apicid;
-	physid_mask_t phys_cpu;
- 	
-	if (!(m->mpc_cpuflag & CPU_ENABLED))
-		return;
-
-	apicid = mpc_apic_id(m, translation_table[mpc_record]);
-
-	if (m->mpc_featureflag&(1<<0))
-		Dprintk("    Floating point unit present.\n");
-	if (m->mpc_featureflag&(1<<7))
-		Dprintk("    Machine Exception supported.\n");
-	if (m->mpc_featureflag&(1<<8))
-		Dprintk("    64 bit compare & exchange supported.\n");
-	if (m->mpc_featureflag&(1<<9))
-		Dprintk("    Internal APIC present.\n");
-	if (m->mpc_featureflag&(1<<11))
-		Dprintk("    SEP present.\n");
-	if (m->mpc_featureflag&(1<<12))
-		Dprintk("    MTRR  present.\n");
-	if (m->mpc_featureflag&(1<<13))
-		Dprintk("    PGE  present.\n");
-	if (m->mpc_featureflag&(1<<14))
-		Dprintk("    MCA  present.\n");
-	if (m->mpc_featureflag&(1<<15))
-		Dprintk("    CMOV  present.\n");
-	if (m->mpc_featureflag&(1<<16))
-		Dprintk("    PAT  present.\n");
-	if (m->mpc_featureflag&(1<<17))
-		Dprintk("    PSE  present.\n");
-	if (m->mpc_featureflag&(1<<18))
-		Dprintk("    PSN  present.\n");
-	if (m->mpc_featureflag&(1<<19))
-		Dprintk("    Cache Line Flush Instruction present.\n");
-	/* 20 Reserved */
-	if (m->mpc_featureflag&(1<<21))
-		Dprintk("    Debug Trace and EMON Store present.\n");
-	if (m->mpc_featureflag&(1<<22))
-		Dprintk("    ACPI Thermal Throttle Registers  present.\n");
-	if (m->mpc_featureflag&(1<<23))
-		Dprintk("    MMX  present.\n");
-	if (m->mpc_featureflag&(1<<24))
-		Dprintk("    FXSR  present.\n");
-	if (m->mpc_featureflag&(1<<25))
-		Dprintk("    XMM  present.\n");
-	if (m->mpc_featureflag&(1<<26))
-		Dprintk("    Willamette New Instructions  present.\n");
-	if (m->mpc_featureflag&(1<<27))
-		Dprintk("    Self Snoop  present.\n");
-	if (m->mpc_featureflag&(1<<28))
-		Dprintk("    HT  present.\n");
-	if (m->mpc_featureflag&(1<<29))
-		Dprintk("    Thermal Monitor present.\n");
-	/* 30, 31 Reserved */
-
-
-	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-		Dprintk("    Bootup CPU\n");
-		boot_cpu_physical_apicid = m->mpc_apicid;
-	}
-
-	ver = m->mpc_apicver;
-
-	/*
-	 * Validate version
-	 */
-	if (ver == 0x0) {
-		printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
-				"fixing up to 0x10. (tell your hw vendor)\n",
-				m->mpc_apicid);
-		ver = 0x10;
-	}
-	apic_version[m->mpc_apicid] = ver;
-
-	phys_cpu = apicid_to_cpu_present(apicid);
-	physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
-
-	if (num_processors >= NR_CPUS) {
-		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-			"  Processor ignored.\n", NR_CPUS);
-		return;
-	}
-
-	if (num_processors >= maxcpus) {
-		printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
-			" Processor ignored.\n", maxcpus);
-		return;
-	}
-
-	cpu_set(num_processors, cpu_possible_map);
-	num_processors++;
-
-	/*
-	 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
-	 * but we need to work other dependencies like SMP_SUSPEND etc
-	 * before this can be done without some confusion.
-	 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
-	 *       - Ashok Raj <ashok.raj@intel.com>
-	 */
-	if (num_processors > 8) {
-		switch (boot_cpu_data.x86_vendor) {
-		case X86_VENDOR_INTEL:
-			if (!APIC_XAPIC(ver)) {
-				def_to_bigsmp = 0;
-				break;
-			}
-			/* If P4 and above fall through */
-		case X86_VENDOR_AMD:
-			def_to_bigsmp = 1;
-		}
-	}
-	bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
-}
-
-static void __init MP_bus_info (struct mpc_config_bus *m)
-{
-	char str[7];
-
-	memcpy(str, m->mpc_bustype, 6);
-	str[6] = 0;
-
-	mpc_oem_bus_info(m, str, translation_table[mpc_record]);
-
-#if MAX_MP_BUSSES < 256
-	if (m->mpc_busid >= MAX_MP_BUSSES) {
-		printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
-			" is too large, max. supported is %d\n",
-			m->mpc_busid, str, MAX_MP_BUSSES - 1);
-		return;
-	}
-#endif
-
-	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
-		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
-	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
-		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
-	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
-		mpc_oem_pci_bus(m, translation_table[mpc_record]);
-		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
-		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-		mp_current_pci_id++;
-	} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
-		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-	} else {
-		printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
-	}
-}
-
-static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
-{
-	if (!(m->mpc_flags & MPC_APIC_USABLE))
-		return;
-
-	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
-		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
-	if (nr_ioapics >= MAX_IO_APICS) {
-		printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
-			MAX_IO_APICS, nr_ioapics);
-		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
-	}
-	if (!m->mpc_apicaddr) {
-		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
-			" found in MP table, skipping!\n");
-		return;
-	}
-	mp_ioapics[nr_ioapics] = *m;
-	nr_ioapics++;
-}
-
-static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
-{
-	mp_irqs [mp_irq_entries] = *m;
-	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
-			m->mpc_irqtype, m->mpc_irqflag & 3,
-			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-		panic("Max # of irq sources exceeded!!\n");
-}
-
-static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
-{
-	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-			m->mpc_irqtype, m->mpc_irqflag & 3,
-			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-}
-
-#ifdef CONFIG_X86_NUMAQ
-static void __init MP_translation_info (struct mpc_config_translation *m)
-{
-	printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
-
-	if (mpc_record >= MAX_MPC_ENTRY) 
-		printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
-	else
-		translation_table[mpc_record] = m; /* stash this for later */
-	if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
-		node_set_online(m->trans_quad);
-}
-
-/*
- * Read/parse the MPC oem tables
- */
-
-static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
-	unsigned short oemsize)
-{
-	int count = sizeof (*oemtable); /* the header size */
-	unsigned char *oemptr = ((unsigned char *)oemtable)+count;
-	
-	mpc_record = 0;
-	printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
-	if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
-	{
-		printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
-			oemtable->oem_signature[0],
-			oemtable->oem_signature[1],
-			oemtable->oem_signature[2],
-			oemtable->oem_signature[3]);
-		return;
-	}
-	if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
-	{
-		printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
-		return;
-	}
-	while (count < oemtable->oem_length) {
-		switch (*oemptr) {
-			case MP_TRANSLATION:
-			{
-				struct mpc_config_translation *m=
-					(struct mpc_config_translation *)oemptr;
-				MP_translation_info(m);
-				oemptr += sizeof(*m);
-				count += sizeof(*m);
-				++mpc_record;
-				break;
-			}
-			default:
-			{
-				printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
-				return;
-			}
-		}
-       }
-}
-
-static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
-		char *productid)
-{
-	if (strncmp(oem, "IBM NUMA", 8))
-		printk("Warning!  May not be a NUMA-Q system!\n");
-	if (mpc->mpc_oemptr)
-		smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
-				mpc->mpc_oemsize);
-}
-#endif	/* CONFIG_X86_NUMAQ */
-
-/*
- * Read/parse the MPC
- */
-
-static int __init smp_read_mpc(struct mp_config_table *mpc)
-{
-	char str[16];
-	char oem[10];
-	int count=sizeof(*mpc);
-	unsigned char *mpt=((unsigned char *)mpc)+count;
-
-	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-		printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
-			*(u32 *)mpc->mpc_signature);
-		return 0;
-	}
-	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-		printk(KERN_ERR "SMP mptable: checksum error!\n");
-		return 0;
-	}
-	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
-			mpc->mpc_spec);
-		return 0;
-	}
-	if (!mpc->mpc_lapic) {
-		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
-		return 0;
-	}
-	memcpy(oem,mpc->mpc_oem,8);
-	oem[8]=0;
-	printk(KERN_INFO "OEM ID: %s ",oem);
-
-	memcpy(str,mpc->mpc_productid,12);
-	str[12]=0;
-	printk("Product ID: %s ",str);
-
-	mps_oem_check(mpc, oem, str);
-
-	printk("APIC at: 0x%X\n", mpc->mpc_lapic);
-
-	/*
-	 * Save the local APIC address (it might be non-default) -- but only
-	 * if we're not using ACPI.
-	 */
-	if (!acpi_lapic)
-		mp_lapic_addr = mpc->mpc_lapic;
-
-	/*
-	 *	Now process the configuration blocks.
-	 */
-	mpc_record = 0;
-	while (count < mpc->mpc_length) {
-		switch(*mpt) {
-			case MP_PROCESSOR:
-			{
-				struct mpc_config_processor *m=
-					(struct mpc_config_processor *)mpt;
-				/* ACPI may have already provided this data */
-				if (!acpi_lapic)
-					MP_processor_info(m);
-				mpt += sizeof(*m);
-				count += sizeof(*m);
-				break;
-			}
-			case MP_BUS:
-			{
-				struct mpc_config_bus *m=
-					(struct mpc_config_bus *)mpt;
-				MP_bus_info(m);
-				mpt += sizeof(*m);
-				count += sizeof(*m);
-				break;
-			}
-			case MP_IOAPIC:
-			{
-				struct mpc_config_ioapic *m=
-					(struct mpc_config_ioapic *)mpt;
-				MP_ioapic_info(m);
-				mpt+=sizeof(*m);
-				count+=sizeof(*m);
-				break;
-			}
-			case MP_INTSRC:
-			{
-				struct mpc_config_intsrc *m=
-					(struct mpc_config_intsrc *)mpt;
-
-				MP_intsrc_info(m);
-				mpt+=sizeof(*m);
-				count+=sizeof(*m);
-				break;
-			}
-			case MP_LINTSRC:
-			{
-				struct mpc_config_lintsrc *m=
-					(struct mpc_config_lintsrc *)mpt;
-				MP_lintsrc_info(m);
-				mpt+=sizeof(*m);
-				count+=sizeof(*m);
-				break;
-			}
-			default:
-			{
-				count = mpc->mpc_length;
-				break;
-			}
-		}
-		++mpc_record;
-	}
-	setup_apic_routing();
-	if (!num_processors)
-		printk(KERN_ERR "SMP mptable: no processors registered!\n");
-	return num_processors;
-}
-
-static int __init ELCR_trigger(unsigned int irq)
-{
-	unsigned int port;
-
-	port = 0x4d0 + (irq >> 3);
-	return (inb(port) >> (irq & 7)) & 1;
-}
-
-static void __init construct_default_ioirq_mptable(int mpc_default_type)
-{
-	struct mpc_config_intsrc intsrc;
-	int i;
-	int ELCR_fallback = 0;
-
-	intsrc.mpc_type = MP_INTSRC;
-	intsrc.mpc_irqflag = 0;			/* conforming */
-	intsrc.mpc_srcbus = 0;
-	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
-
-	intsrc.mpc_irqtype = mp_INT;
-
-	/*
-	 *  If true, we have an ISA/PCI system with no IRQ entries
-	 *  in the MP table. To prevent the PCI interrupts from being set up
-	 *  incorrectly, we try to use the ELCR. The sanity check to see if
-	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-	 *  never be level sensitive, so we simply see if the ELCR agrees.
-	 *  If it does, we assume it's valid.
-	 */
-	if (mpc_default_type == 5) {
-		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
-
-		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-			printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
-		else {
-			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-			ELCR_fallback = 1;
-		}
-	}
-
-	for (i = 0; i < 16; i++) {
-		switch (mpc_default_type) {
-		case 2:
-			if (i == 0 || i == 13)
-				continue;	/* IRQ0 & IRQ13 not connected */
-			/* fall through */
-		default:
-			if (i == 2)
-				continue;	/* IRQ2 is never connected */
-		}
-
-		if (ELCR_fallback) {
-			/*
-			 *  If the ELCR indicates a level-sensitive interrupt, we
-			 *  copy that information over to the MP table in the
-			 *  irqflag field (level sensitive, active high polarity).
-			 */
-			if (ELCR_trigger(i))
-				intsrc.mpc_irqflag = 13;
-			else
-				intsrc.mpc_irqflag = 0;
-		}
-
-		intsrc.mpc_srcbusirq = i;
-		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
-		MP_intsrc_info(&intsrc);
-	}
-
-	intsrc.mpc_irqtype = mp_ExtINT;
-	intsrc.mpc_srcbusirq = 0;
-	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
-	MP_intsrc_info(&intsrc);
-}
-
-static inline void __init construct_default_ISA_mptable(int mpc_default_type)
-{
-	struct mpc_config_processor processor;
-	struct mpc_config_bus bus;
-	struct mpc_config_ioapic ioapic;
-	struct mpc_config_lintsrc lintsrc;
-	int linttypes[2] = { mp_ExtINT, mp_NMI };
-	int i;
-
-	/*
-	 * local APIC has default address
-	 */
-	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-
-	/*
-	 * 2 CPUs, numbered 0 & 1.
-	 */
-	processor.mpc_type = MP_PROCESSOR;
-	/* Either an integrated APIC or a discrete 82489DX. */
-	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-	processor.mpc_cpuflag = CPU_ENABLED;
-	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-				   (boot_cpu_data.x86_model << 4) |
-				   boot_cpu_data.x86_mask;
-	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-	processor.mpc_reserved[0] = 0;
-	processor.mpc_reserved[1] = 0;
-	for (i = 0; i < 2; i++) {
-		processor.mpc_apicid = i;
-		MP_processor_info(&processor);
-	}
-
-	bus.mpc_type = MP_BUS;
-	bus.mpc_busid = 0;
-	switch (mpc_default_type) {
-		default:
-			printk("???\n");
-			printk(KERN_ERR "Unknown standard configuration %d\n",
-				mpc_default_type);
-			/* fall through */
-		case 1:
-		case 5:
-			memcpy(bus.mpc_bustype, "ISA   ", 6);
-			break;
-		case 2:
-		case 6:
-		case 3:
-			memcpy(bus.mpc_bustype, "EISA  ", 6);
-			break;
-		case 4:
-		case 7:
-			memcpy(bus.mpc_bustype, "MCA   ", 6);
-	}
-	MP_bus_info(&bus);
-	if (mpc_default_type > 4) {
-		bus.mpc_busid = 1;
-		memcpy(bus.mpc_bustype, "PCI   ", 6);
-		MP_bus_info(&bus);
-	}
-
-	ioapic.mpc_type = MP_IOAPIC;
-	ioapic.mpc_apicid = 2;
-	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-	ioapic.mpc_flags = MPC_APIC_USABLE;
-	ioapic.mpc_apicaddr = 0xFEC00000;
-	MP_ioapic_info(&ioapic);
-
-	/*
-	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
-	 */
-	construct_default_ioirq_mptable(mpc_default_type);
-
-	lintsrc.mpc_type = MP_LINTSRC;
-	lintsrc.mpc_irqflag = 0;		/* conforming */
-	lintsrc.mpc_srcbusid = 0;
-	lintsrc.mpc_srcbusirq = 0;
-	lintsrc.mpc_destapic = MP_APIC_ALL;
-	for (i = 0; i < 2; i++) {
-		lintsrc.mpc_irqtype = linttypes[i];
-		lintsrc.mpc_destapiclint = i;
-		MP_lintsrc_info(&lintsrc);
-	}
-}
-
-static struct intel_mp_floating *mpf_found;
-
-/*
- * Scan the memory blocks for an SMP configuration block.
- */
-void __init get_smp_config (void)
-{
-	struct intel_mp_floating *mpf = mpf_found;
-
-	/*
-	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
-	 * processors, where MPS only supports physical.
-	 */
-	if (acpi_lapic && acpi_ioapic) {
-		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-		return;
-	}
-	else if (acpi_lapic)
-		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
-
-	printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-	if (mpf->mpf_feature2 & (1<<7)) {
-		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
-		pic_mode = 1;
-	} else {
-		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
-		pic_mode = 0;
-	}
-
-	/*
-	 * Now see if we need to read further.
-	 */
-	if (mpf->mpf_feature1 != 0) {
-
-		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-		construct_default_ISA_mptable(mpf->mpf_feature1);
-
-	} else if (mpf->mpf_physptr) {
-
-		/*
-		 * Read the physical hardware table.  Anything here will
-		 * override the defaults.
-		 */
-		if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
-			smp_found_config = 0;
-			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-			return;
-		}
-		/*
-		 * If there are no explicit MP IRQ entries, then we are
-		 * broken.  We set up most of the low 16 IO-APIC pins to
-		 * ISA defaults and hope it will work.
-		 */
-		if (!mp_irq_entries) {
-			struct mpc_config_bus bus;
-
-			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
-
-			bus.mpc_type = MP_BUS;
-			bus.mpc_busid = 0;
-			memcpy(bus.mpc_bustype, "ISA   ", 6);
-			MP_bus_info(&bus);
-
-			construct_default_ioirq_mptable(0);
-		}
-
-	} else
-		BUG();
-
-	printk(KERN_INFO "Processors: %d\n", num_processors);
-	/*
-	 * Only use the first configuration found.
-	 */
-}
-
-static int __init smp_scan_config (unsigned long base, unsigned long length)
-{
-	unsigned long *bp = phys_to_virt(base);
-	struct intel_mp_floating *mpf;
-
-	printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length);
-	if (sizeof(*mpf) != 16)
-		printk("Error: MPF size\n");
-
-	while (length > 0) {
-		mpf = (struct intel_mp_floating *)bp;
-		if ((*bp == SMP_MAGIC_IDENT) &&
-			(mpf->mpf_length == 1) &&
-			!mpf_checksum((unsigned char *)bp, 16) &&
-			((mpf->mpf_specification == 1)
-				|| (mpf->mpf_specification == 4)) ) {
-
-			smp_found_config = 1;
-			printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
-				mpf, virt_to_phys(mpf));
-			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
-					BOOTMEM_DEFAULT);
-			if (mpf->mpf_physptr) {
-				/*
-				 * We cannot access to MPC table to compute
-				 * table size yet, as only few megabytes from
-				 * the bottom is mapped now.
-				 * PC-9800's MPC table places on the very last
-				 * of physical memory; so that simply reserving
-				 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
-				 * in reserve_bootmem.
-				 */
-				unsigned long size = PAGE_SIZE;
-				unsigned long end = max_low_pfn * PAGE_SIZE;
-				if (mpf->mpf_physptr + size > end)
-					size = end - mpf->mpf_physptr;
-				reserve_bootmem(mpf->mpf_physptr, size,
-						BOOTMEM_DEFAULT);
-			}
-
-			mpf_found = mpf;
-			return 1;
-		}
-		bp += 4;
-		length -= 16;
-	}
-	return 0;
-}
-
-void __init find_smp_config (void)
-{
-	unsigned int address;
-
-	/*
-	 * FIXME: Linux assumes you have 640K of base ram..
-	 * this continues the error...
-	 *
-	 * 1) Scan the bottom 1K for a signature
-	 * 2) Scan the top 1K of base RAM
-	 * 3) Scan the 64K of bios
-	 */
-	if (smp_scan_config(0x0,0x400) ||
-		smp_scan_config(639*0x400,0x400) ||
-			smp_scan_config(0xF0000,0x10000))
-		return;
-	/*
-	 * If it is an SMP machine we should know now, unless the
-	 * configuration is in an EISA/MCA bus machine with an
-	 * extended bios data area.
-	 *
-	 * there is a real-mode segmented pointer pointing to the
-	 * 4K EBDA area at 0x40E, calculate and scan it here.
-	 *
-	 * NOTE! There are Linux loaders that will corrupt the EBDA
-	 * area, and as such this kind of SMP config may be less
-	 * trustworthy, simply because the SMP table may have been
-	 * stomped on during early boot. These loaders are buggy and
-	 * should be fixed.
-	 *
-	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
-	 */
-
-	address = get_bios_ebda();
-	if (address)
-		smp_scan_config(address, 0x400);
-}
-
-int es7000_plat;
-
-/* --------------------------------------------------------------------------
-                            ACPI-based MP Configuration
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI
-
-void __init mp_register_lapic_address(u64 address)
-{
-	mp_lapic_addr = (unsigned long) address;
-
-	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
-
-	if (boot_cpu_physical_apicid == -1U)
-		boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
-
-	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
-}
-
-void __cpuinit mp_register_lapic (u8 id, u8 enabled)
-{
-	struct mpc_config_processor processor;
-	int boot_cpu = 0;
-	
-	if (MAX_APICS - id <= 0) {
-		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
-			id, MAX_APICS);
-		return;
-	}
-
-	if (id == boot_cpu_physical_apicid)
-		boot_cpu = 1;
-
-	processor.mpc_type = MP_PROCESSOR;
-	processor.mpc_apicid = id;
-	processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
-	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
-		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-	processor.mpc_reserved[0] = 0;
-	processor.mpc_reserved[1] = 0;
-
-	MP_processor_info(&processor);
-}
-
-#ifdef	CONFIG_X86_IO_APIC
-
-#define MP_ISA_BUS		0
-#define MP_MAX_IOAPIC_PIN	127
-
-static struct mp_ioapic_routing {
-	int			apic_id;
-	int			gsi_base;
-	int			gsi_end;
-	u32			pin_programmed[4];
-} mp_ioapic_routing[MAX_IO_APICS];
-
-static int mp_find_ioapic (int gsi)
-{
-	int i = 0;
-
-	/* Find the IOAPIC that manages this GSI. */
-	for (i = 0; i < nr_ioapics; i++) {
-		if ((gsi >= mp_ioapic_routing[i].gsi_base)
-			&& (gsi <= mp_ioapic_routing[i].gsi_end))
-			return i;
-	}
-
-	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
-
-	return -1;
-}
-
-void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
-{
-	int idx = 0;
-	int tmpid;
-
-	if (nr_ioapics >= MAX_IO_APICS) {
-		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
-		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-	}
-	if (!address) {
-		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-			" found in MADT table, skipping!\n");
-		return;
-	}
-
-	idx = nr_ioapics++;
-
-	mp_ioapics[idx].mpc_type = MP_IOAPIC;
-	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-	mp_ioapics[idx].mpc_apicaddr = address;
-
-	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-		&& !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
-		tmpid = io_apic_get_unique_id(idx, id);
-	else
-		tmpid = id;
-	if (tmpid == -1) {
-		nr_ioapics--;
-		return;
-	}
-	mp_ioapics[idx].mpc_apicid = tmpid;
-	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
-	
-	/* 
-	 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
-	 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
-	 */
-	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-	mp_ioapic_routing[idx].gsi_base = gsi_base;
-	mp_ioapic_routing[idx].gsi_end = gsi_base +
-		io_apic_get_redir_entries(idx);
-
-	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
-	       "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
-	       mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-	       mp_ioapic_routing[idx].gsi_base,
-	       mp_ioapic_routing[idx].gsi_end);
-}
-
-void __init
-mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
-{
-	struct mpc_config_intsrc intsrc;
-	int			ioapic = -1;
-	int			pin = -1;
-
-	/* 
-	 * Convert 'gsi' to 'ioapic.pin'.
-	 */
-	ioapic = mp_find_ioapic(gsi);
-	if (ioapic < 0)
-		return;
-	pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
-
-	/*
-	 * TBD: This check is for faulty timer entries, where the override
-	 *      erroneously sets the trigger to level, resulting in a HUGE 
-	 *      increase of timer interrupts!
-	 */
-	if ((bus_irq == 0) && (trigger == 3))
-		trigger = 1;
-
-	intsrc.mpc_type = MP_INTSRC;
-	intsrc.mpc_irqtype = mp_INT;
-	intsrc.mpc_irqflag = (trigger << 2) | polarity;
-	intsrc.mpc_srcbus = MP_ISA_BUS;
-	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
-	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
-	intsrc.mpc_dstirq = pin;				    /* INTIN# */
-
-	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
-		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-
-	mp_irqs[mp_irq_entries] = intsrc;
-	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-		panic("Max # of irq sources exceeded!\n");
-}
-
-void __init mp_config_acpi_legacy_irqs (void)
-{
-	struct mpc_config_intsrc intsrc;
-	int i = 0;
-	int ioapic = -1;
-
-	/* 
-	 * Fabricate the legacy ISA bus (bus #31).
-	 */
-	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
-	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
-
-	/*
-	 * Older generations of ES7000 have no legacy identity mappings
-	 */
-	if (es7000_plat == 1)
-		return;
-
-	/* 
-	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
-	 */
-	ioapic = mp_find_ioapic(0);
-	if (ioapic < 0)
-		return;
-
-	intsrc.mpc_type = MP_INTSRC;
-	intsrc.mpc_irqflag = 0;					/* Conforming */
-	intsrc.mpc_srcbus = MP_ISA_BUS;
-	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
-
-	/* 
-	 * Use the default configuration for the IRQs 0-15.  Unless
-	 * overridden by (MADT) interrupt source override entries.
-	 */
-	for (i = 0; i < 16; i++) {
-		int idx;
-
-		for (idx = 0; idx < mp_irq_entries; idx++) {
-			struct mpc_config_intsrc *irq = mp_irqs + idx;
-
-			/* Do we already have a mapping for this ISA IRQ? */
-			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-				break;
-
-			/* Do we already have a mapping for this IOAPIC pin */
-			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-				(irq->mpc_dstirq == i))
-				break;
-		}
-
-		if (idx != mp_irq_entries) {
-			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-			continue;			/* IRQ already used */
-		}
-
-		intsrc.mpc_irqtype = mp_INT;
-		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
-		intsrc.mpc_dstirq = i;
-
-		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
-			intsrc.mpc_dstirq);
-
-		mp_irqs[mp_irq_entries] = intsrc;
-		if (++mp_irq_entries == MAX_IRQ_SOURCES)
-			panic("Max # of irq sources exceeded!\n");
-	}
-}
-
-#define MAX_GSI_NUM	4096
-#define IRQ_COMPRESSION_START	64
-
-int mp_register_gsi(u32 gsi, int triggering, int polarity)
-{
-	int ioapic = -1;
-	int ioapic_pin = 0;
-	int idx, bit = 0;
-	static int pci_irq = IRQ_COMPRESSION_START;
-	/*
-	 * Mapping between Global System Interrupts, which
-	 * represent all possible interrupts, and IRQs
-	 * assigned to actual devices.
-	 */
-	static int		gsi_to_irq[MAX_GSI_NUM];
-
-	/* Don't set up the ACPI SCI because it's already set up */
-	if (acpi_gbl_FADT.sci_interrupt == gsi)
-		return gsi;
-
-	ioapic = mp_find_ioapic(gsi);
-	if (ioapic < 0) {
-		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-		return gsi;
-	}
-
-	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
-
-	if (ioapic_renumber_irq)
-		gsi = ioapic_renumber_irq(ioapic, gsi);
-
-	/* 
-	 * Avoid pin reprogramming.  PRTs typically include entries  
-	 * with redundant pin->gsi mappings (but unique PCI devices);
-	 * we only program the IOAPIC on the first.
-	 */
-	bit = ioapic_pin % 32;
-	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-	if (idx > 3) {
-		printk(KERN_ERR "Invalid reference to IOAPIC pin "
-			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
-			ioapic_pin);
-		return gsi;
-	}
-	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-		return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
-	}
-
-	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
-
-	/*
-	 * For GSI >= 64, use IRQ compression
-	 */
-	if ((gsi >= IRQ_COMPRESSION_START)
-		&& (triggering == ACPI_LEVEL_SENSITIVE)) {
-		/*
-		 * For PCI devices assign IRQs in order, avoiding gaps
-		 * due to unused I/O APIC pins.
-		 */
-		int irq = gsi;
-		if (gsi < MAX_GSI_NUM) {
-			/*
-			 * Retain the VIA chipset work-around (gsi > 15), but
-			 * avoid a problem where the 8254 timer (IRQ0) is setup
-			 * via an override (so it's not on pin 0 of the ioapic),
-			 * and at the same time, the pin 0 interrupt is a PCI
-			 * type.  The gsi > 15 test could cause these two pins
-			 * to be shared as IRQ0, and they are not shareable.
-			 * So test for this condition, and if necessary, avoid
-			 * the pin collision.
-			 */
-			if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
-				gsi = pci_irq++;
-			/*
-			 * Don't assign IRQ used by ACPI SCI
-			 */
-			if (gsi == acpi_gbl_FADT.sci_interrupt)
-				gsi = pci_irq++;
-			gsi_to_irq[irq] = gsi;
-		} else {
-			printk(KERN_ERR "GSI %u is too high\n", gsi);
-			return gsi;
-		}
-	}
-
-	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-		    triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
-		    polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-	return gsi;
-}
-
-#endif /* CONFIG_X86_IO_APIC */
-#endif /* CONFIG_ACPI */
diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c
deleted file mode 100644
index 72ab140..0000000
--- a/arch/x86/kernel/mpparse_64.c
+++ /dev/null
@@ -1,867 +0,0 @@
-/*
- *	Intel Multiprocessor Specification 1.1 and 1.4
- *	compliant MP-table parsing routines.
- *
- *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
- *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
- *
- *	Fixes
- *		Erich Boleyn	:	MP v1.4 and additional changes.
- *		Alan Cox	:	Added EBDA scanning
- *		Ingo Molnar	:	various cleanups and rewrites
- *		Maciej W. Rozycki:	Bits for default MP configurations
- *		Paul Diefenbaugh:	Added full ACPI support
- */
-
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/kernel_stat.h>
-#include <linux/mc146818rtc.h>
-#include <linux/acpi.h>
-#include <linux/module.h>
-
-#include <asm/smp.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
-#include <asm/pgalloc.h>
-#include <asm/io_apic.h>
-#include <asm/proto.h>
-#include <asm/acpi.h>
-
-/* Have we found an MP table */
-int smp_found_config;
-
-/*
- * Various Linux-internal data structures created from the
- * MP-table.
- */
-DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
-int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-
-static int mp_current_pci_id = 0;
-/* I/O APIC entries */
-struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-
-/* # of MP IRQ source entries */
-struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-
-/* MP IRQ source entries */
-int mp_irq_entries;
-
-int nr_ioapics;
-unsigned long mp_lapic_addr = 0;
-
-
-
-/* Processor that is doing the boot up */
-unsigned int boot_cpu_id = -1U;
-EXPORT_SYMBOL(boot_cpu_id);
-
-/* Internal processor count */
-unsigned int num_processors;
-
-unsigned disabled_cpus __cpuinitdata;
-
-/* Bitmask of physically existing CPUs */
-physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
-
-u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
-				= { [0 ... NR_CPUS-1] = BAD_APICID };
-void *x86_bios_cpu_apicid_early_ptr;
-DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
-EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
-
-
-/*
- * Intel MP BIOS table parsing routines:
- */
-
-/*
- * Checksum an MP configuration block.
- */
-
-static int __init mpf_checksum(unsigned char *mp, int len)
-{
-	int sum = 0;
-
-	while (len--)
-		sum += *mp++;
-
-	return sum & 0xFF;
-}
-
-static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
-{
-	int cpu;
-	cpumask_t tmp_map;
-	char *bootup_cpu = "";
-
-	if (!(m->mpc_cpuflag & CPU_ENABLED)) {
-		disabled_cpus++;
-		return;
-	}
-	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-		bootup_cpu = " (Bootup-CPU)";
-		boot_cpu_id = m->mpc_apicid;
-	}
-
-	printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu);
-
-	if (num_processors >= NR_CPUS) {
-		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-			" Processor ignored.\n", NR_CPUS);
-		return;
-	}
-
-	num_processors++;
-	cpus_complement(tmp_map, cpu_present_map);
-	cpu = first_cpu(tmp_map);
-
-	physid_set(m->mpc_apicid, phys_cpu_present_map);
- 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
- 		/*
-		 * x86_bios_cpu_apicid is required to have processors listed
- 		 * in same order as logical cpu numbers. Hence the first
- 		 * entry is BSP, and so on.
- 		 */
-		cpu = 0;
- 	}
-	/* are we being called early in kernel startup? */
-	if (x86_cpu_to_apicid_early_ptr) {
-		u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
-		u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
-
-		cpu_to_apicid[cpu] = m->mpc_apicid;
-		bios_cpu_apicid[cpu] = m->mpc_apicid;
-	} else {
-		per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
-		per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
-	}
-
-	cpu_set(cpu, cpu_possible_map);
-	cpu_set(cpu, cpu_present_map);
-}
-
-static void __init MP_bus_info (struct mpc_config_bus *m)
-{
-	char str[7];
-
-	memcpy(str, m->mpc_bustype, 6);
-	str[6] = 0;
-	Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
-
-	if (strncmp(str, "ISA", 3) == 0) {
-		set_bit(m->mpc_busid, mp_bus_not_pci);
-	} else if (strncmp(str, "PCI", 3) == 0) {
-		clear_bit(m->mpc_busid, mp_bus_not_pci);
-		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-		mp_current_pci_id++;
-	} else {
-		printk(KERN_ERR "Unknown bustype %s\n", str);
-	}
-}
-
-static int bad_ioapic(unsigned long address)
-{
-	if (nr_ioapics >= MAX_IO_APICS) {
-		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
-		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-	}
-	if (!address) {
-		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-			" found in table, skipping!\n");
-		return 1;
-	}
-	return 0;
-}
-
-static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
-{
-	if (!(m->mpc_flags & MPC_APIC_USABLE))
-		return;
-
-	printk("I/O APIC #%d at 0x%X.\n",
-		m->mpc_apicid, m->mpc_apicaddr);
-
-	if (bad_ioapic(m->mpc_apicaddr))
-		return;
-
-	mp_ioapics[nr_ioapics] = *m;
-	nr_ioapics++;
-}
-
-static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
-{
-	mp_irqs [mp_irq_entries] = *m;
-	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
-			m->mpc_irqtype, m->mpc_irqflag & 3,
-			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-	if (++mp_irq_entries >= MAX_IRQ_SOURCES)
-		panic("Max # of irq sources exceeded!!\n");
-}
-
-static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
-{
-	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-			m->mpc_irqtype, m->mpc_irqflag & 3,
-			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-}
-
-/*
- * Read/parse the MPC
- */
-
-static int __init smp_read_mpc(struct mp_config_table *mpc)
-{
-	char str[16];
-	int count=sizeof(*mpc);
-	unsigned char *mpt=((unsigned char *)mpc)+count;
-
-	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-		printk("MPTABLE: bad signature [%c%c%c%c]!\n",
-			mpc->mpc_signature[0],
-			mpc->mpc_signature[1],
-			mpc->mpc_signature[2],
-			mpc->mpc_signature[3]);
-		return 0;
-	}
-	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-		printk("MPTABLE: checksum error!\n");
-		return 0;
-	}
-	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-		printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
-			mpc->mpc_spec);
-		return 0;
-	}
-	if (!mpc->mpc_lapic) {
-		printk(KERN_ERR "MPTABLE: null local APIC address!\n");
-		return 0;
-	}
-	memcpy(str,mpc->mpc_oem,8);
-	str[8] = 0;
-	printk(KERN_INFO "MPTABLE: OEM ID: %s ",str);
-
-	memcpy(str,mpc->mpc_productid,12);
-	str[12] = 0;
-	printk("MPTABLE: Product ID: %s ",str);
-
-	printk("MPTABLE: APIC at: 0x%X\n",mpc->mpc_lapic);
-
-	/* save the local APIC address, it might be non-default */
-	if (!acpi_lapic)
-		mp_lapic_addr = mpc->mpc_lapic;
-
-	/*
-	 *	Now process the configuration blocks.
-	 */
-	while (count < mpc->mpc_length) {
-		switch(*mpt) {
-			case MP_PROCESSOR:
-			{
-				struct mpc_config_processor *m=
-					(struct mpc_config_processor *)mpt;
-				if (!acpi_lapic)
-					MP_processor_info(m);
-				mpt += sizeof(*m);
-				count += sizeof(*m);
-				break;
-			}
-			case MP_BUS:
-			{
-				struct mpc_config_bus *m=
-					(struct mpc_config_bus *)mpt;
-				MP_bus_info(m);
-				mpt += sizeof(*m);
-				count += sizeof(*m);
-				break;
-			}
-			case MP_IOAPIC:
-			{
-				struct mpc_config_ioapic *m=
-					(struct mpc_config_ioapic *)mpt;
-				MP_ioapic_info(m);
-				mpt += sizeof(*m);
-				count += sizeof(*m);
-				break;
-			}
-			case MP_INTSRC:
-			{
-				struct mpc_config_intsrc *m=
-					(struct mpc_config_intsrc *)mpt;
-
-				MP_intsrc_info(m);
-				mpt += sizeof(*m);
-				count += sizeof(*m);
-				break;
-			}
-			case MP_LINTSRC:
-			{
-				struct mpc_config_lintsrc *m=
-					(struct mpc_config_lintsrc *)mpt;
-				MP_lintsrc_info(m);
-				mpt += sizeof(*m);
-				count += sizeof(*m);
-				break;
-			}
-		}
-	}
-	setup_apic_routing();
-	if (!num_processors)
-		printk(KERN_ERR "MPTABLE: no processors registered!\n");
-	return num_processors;
-}
-
-static int __init ELCR_trigger(unsigned int irq)
-{
-	unsigned int port;
-
-	port = 0x4d0 + (irq >> 3);
-	return (inb(port) >> (irq & 7)) & 1;
-}
-
-static void __init construct_default_ioirq_mptable(int mpc_default_type)
-{
-	struct mpc_config_intsrc intsrc;
-	int i;
-	int ELCR_fallback = 0;
-
-	intsrc.mpc_type = MP_INTSRC;
-	intsrc.mpc_irqflag = 0;			/* conforming */
-	intsrc.mpc_srcbus = 0;
-	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
-
-	intsrc.mpc_irqtype = mp_INT;
-
-	/*
-	 *  If true, we have an ISA/PCI system with no IRQ entries
-	 *  in the MP table. To prevent the PCI interrupts from being set up
-	 *  incorrectly, we try to use the ELCR. The sanity check to see if
-	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-	 *  never be level sensitive, so we simply see if the ELCR agrees.
-	 *  If it does, we assume it's valid.
-	 */
-	if (mpc_default_type == 5) {
-		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
-
-		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-			printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
-		else {
-			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-			ELCR_fallback = 1;
-		}
-	}
-
-	for (i = 0; i < 16; i++) {
-		switch (mpc_default_type) {
-		case 2:
-			if (i == 0 || i == 13)
-				continue;	/* IRQ0 & IRQ13 not connected */
-			/* fall through */
-		default:
-			if (i == 2)
-				continue;	/* IRQ2 is never connected */
-		}
-
-		if (ELCR_fallback) {
-			/*
-			 *  If the ELCR indicates a level-sensitive interrupt, we
-			 *  copy that information over to the MP table in the
-			 *  irqflag field (level sensitive, active high polarity).
-			 */
-			if (ELCR_trigger(i))
-				intsrc.mpc_irqflag = 13;
-			else
-				intsrc.mpc_irqflag = 0;
-		}
-
-		intsrc.mpc_srcbusirq = i;
-		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
-		MP_intsrc_info(&intsrc);
-	}
-
-	intsrc.mpc_irqtype = mp_ExtINT;
-	intsrc.mpc_srcbusirq = 0;
-	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
-	MP_intsrc_info(&intsrc);
-}
-
-static inline void __init construct_default_ISA_mptable(int mpc_default_type)
-{
-	struct mpc_config_processor processor;
-	struct mpc_config_bus bus;
-	struct mpc_config_ioapic ioapic;
-	struct mpc_config_lintsrc lintsrc;
-	int linttypes[2] = { mp_ExtINT, mp_NMI };
-	int i;
-
-	/*
-	 * local APIC has default address
-	 */
-	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-
-	/*
-	 * 2 CPUs, numbered 0 & 1.
-	 */
-	processor.mpc_type = MP_PROCESSOR;
-	processor.mpc_apicver = 0;
-	processor.mpc_cpuflag = CPU_ENABLED;
-	processor.mpc_cpufeature = 0;
-	processor.mpc_featureflag = 0;
-	processor.mpc_reserved[0] = 0;
-	processor.mpc_reserved[1] = 0;
-	for (i = 0; i < 2; i++) {
-		processor.mpc_apicid = i;
-		MP_processor_info(&processor);
-	}
-
-	bus.mpc_type = MP_BUS;
-	bus.mpc_busid = 0;
-	switch (mpc_default_type) {
-		default:
-			printk(KERN_ERR "???\nUnknown standard configuration %d\n",
-				mpc_default_type);
-			/* fall through */
-		case 1:
-		case 5:
-			memcpy(bus.mpc_bustype, "ISA   ", 6);
-			break;
-	}
-	MP_bus_info(&bus);
-	if (mpc_default_type > 4) {
-		bus.mpc_busid = 1;
-		memcpy(bus.mpc_bustype, "PCI   ", 6);
-		MP_bus_info(&bus);
-	}
-
-	ioapic.mpc_type = MP_IOAPIC;
-	ioapic.mpc_apicid = 2;
-	ioapic.mpc_apicver = 0;
-	ioapic.mpc_flags = MPC_APIC_USABLE;
-	ioapic.mpc_apicaddr = 0xFEC00000;
-	MP_ioapic_info(&ioapic);
-
-	/*
-	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
-	 */
-	construct_default_ioirq_mptable(mpc_default_type);
-
-	lintsrc.mpc_type = MP_LINTSRC;
-	lintsrc.mpc_irqflag = 0;		/* conforming */
-	lintsrc.mpc_srcbusid = 0;
-	lintsrc.mpc_srcbusirq = 0;
-	lintsrc.mpc_destapic = MP_APIC_ALL;
-	for (i = 0; i < 2; i++) {
-		lintsrc.mpc_irqtype = linttypes[i];
-		lintsrc.mpc_destapiclint = i;
-		MP_lintsrc_info(&lintsrc);
-	}
-}
-
-static struct intel_mp_floating *mpf_found;
-
-/*
- * Scan the memory blocks for an SMP configuration block.
- */
-void __init get_smp_config (void)
-{
-	struct intel_mp_floating *mpf = mpf_found;
-
-	/*
- 	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
- 	 * processors, where MPS only supports physical.
- 	 */
- 	if (acpi_lapic && acpi_ioapic) {
- 		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
- 		return;
-	}
- 	else if (acpi_lapic)
- 		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
-
-	printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-
-	/*
-	 * Now see if we need to read further.
-	 */
-	if (mpf->mpf_feature1 != 0) {
-
-		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-		construct_default_ISA_mptable(mpf->mpf_feature1);
-
-	} else if (mpf->mpf_physptr) {
-
-		/*
-		 * Read the physical hardware table.  Anything here will
-		 * override the defaults.
-		 */
-		if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
-			smp_found_config = 0;
-			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-			return;
-		}
-		/*
-		 * If there are no explicit MP IRQ entries, then we are
-		 * broken.  We set up most of the low 16 IO-APIC pins to
-		 * ISA defaults and hope it will work.
-		 */
-		if (!mp_irq_entries) {
-			struct mpc_config_bus bus;
-
-			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
-
-			bus.mpc_type = MP_BUS;
-			bus.mpc_busid = 0;
-			memcpy(bus.mpc_bustype, "ISA   ", 6);
-			MP_bus_info(&bus);
-
-			construct_default_ioirq_mptable(0);
-		}
-
-	} else
-		BUG();
-
-	printk(KERN_INFO "Processors: %d\n", num_processors);
-	/*
-	 * Only use the first configuration found.
-	 */
-}
-
-static int __init smp_scan_config (unsigned long base, unsigned long length)
-{
-	extern void __bad_mpf_size(void); 
-	unsigned int *bp = phys_to_virt(base);
-	struct intel_mp_floating *mpf;
-
-	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-	if (sizeof(*mpf) != 16)
-		__bad_mpf_size();
-
-	while (length > 0) {
-		mpf = (struct intel_mp_floating *)bp;
-		if ((*bp == SMP_MAGIC_IDENT) &&
-			(mpf->mpf_length == 1) &&
-			!mpf_checksum((unsigned char *)bp, 16) &&
-			((mpf->mpf_specification == 1)
-				|| (mpf->mpf_specification == 4)) ) {
-
-			smp_found_config = 1;
-			reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE);
-			if (mpf->mpf_physptr)
-				reserve_bootmem_generic(mpf->mpf_physptr, PAGE_SIZE);
-			mpf_found = mpf;
-			return 1;
-		}
-		bp += 4;
-		length -= 16;
-	}
-	return 0;
-}
-
-void __init find_smp_config(void)
-{
-	unsigned int address;
-
-	/*
-	 * FIXME: Linux assumes you have 640K of base ram..
-	 * this continues the error...
-	 *
-	 * 1) Scan the bottom 1K for a signature
-	 * 2) Scan the top 1K of base RAM
-	 * 3) Scan the 64K of bios
-	 */
-	if (smp_scan_config(0x0,0x400) ||
-		smp_scan_config(639*0x400,0x400) ||
-			smp_scan_config(0xF0000,0x10000))
-		return;
-	/*
-	 * If it is an SMP machine we should know now.
-	 *
-	 * there is a real-mode segmented pointer pointing to the
-	 * 4K EBDA area at 0x40E, calculate and scan it here.
-	 *
-	 * NOTE! There are Linux loaders that will corrupt the EBDA
-	 * area, and as such this kind of SMP config may be less
-	 * trustworthy, simply because the SMP table may have been
-	 * stomped on during early boot. These loaders are buggy and
-	 * should be fixed.
-	 */
-
-	address = *(unsigned short *)phys_to_virt(0x40E);
-	address <<= 4;
-	if (smp_scan_config(address, 0x1000))
-		return;
-
-	/* If we have come this far, we did not find an MP table  */
-	 printk(KERN_INFO "No mptable found.\n");
-}
-
-/* --------------------------------------------------------------------------
-                            ACPI-based MP Configuration
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI
-
-void __init mp_register_lapic_address(u64 address)
-{
-	mp_lapic_addr = (unsigned long) address;
-	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
-	if (boot_cpu_id == -1U)
-		boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
-}
-
-void __cpuinit mp_register_lapic (u8 id, u8 enabled)
-{
-	struct mpc_config_processor processor;
-	int			boot_cpu = 0;
-	
-	if (id == boot_cpu_id)
-		boot_cpu = 1;
-
-	processor.mpc_type = MP_PROCESSOR;
-	processor.mpc_apicid = id;
-	processor.mpc_apicver = 0;
-	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-	processor.mpc_cpufeature = 0;
-	processor.mpc_featureflag = 0;
-	processor.mpc_reserved[0] = 0;
-	processor.mpc_reserved[1] = 0;
-
-	MP_processor_info(&processor);
-}
-
-#define MP_ISA_BUS		0
-#define MP_MAX_IOAPIC_PIN	127
-
-static struct mp_ioapic_routing {
-	int			apic_id;
-	int			gsi_start;
-	int			gsi_end;
-	u32			pin_programmed[4];
-} mp_ioapic_routing[MAX_IO_APICS];
-
-static int mp_find_ioapic(int gsi)
-{
-	int i = 0;
-
-	/* Find the IOAPIC that manages this GSI. */
-	for (i = 0; i < nr_ioapics; i++) {
-		if ((gsi >= mp_ioapic_routing[i].gsi_start)
-			&& (gsi <= mp_ioapic_routing[i].gsi_end))
-			return i;
-	}
-
-	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
-	return -1;
-}
-
-static u8 uniq_ioapic_id(u8 id)
-{
-	int i;
-	DECLARE_BITMAP(used, 256);
-	bitmap_zero(used, 256);
-	for (i = 0; i < nr_ioapics; i++) {
-		struct mpc_config_ioapic *ia = &mp_ioapics[i];
-		__set_bit(ia->mpc_apicid, used);
-	}
-	if (!test_bit(id, used))
-		return id;
-	return find_first_zero_bit(used, 256);
-}
-
-void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
-{
-	int idx = 0;
-
-	if (bad_ioapic(address))
-		return;
-
-	idx = nr_ioapics;
-
-	mp_ioapics[idx].mpc_type = MP_IOAPIC;
-	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-	mp_ioapics[idx].mpc_apicaddr = address;
-
-	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-	mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
-	mp_ioapics[idx].mpc_apicver = 0;
-	
-	/* 
-	 * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
-	 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
-	 */
-	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-	mp_ioapic_routing[idx].gsi_start = gsi_base;
-	mp_ioapic_routing[idx].gsi_end = gsi_base + 
-		io_apic_get_redir_entries(idx);
-
-	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, address 0x%x, "
-		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
-		mp_ioapics[idx].mpc_apicaddr,
-		mp_ioapic_routing[idx].gsi_start,
-		mp_ioapic_routing[idx].gsi_end);
-
-	nr_ioapics++;
-}
-
-void __init
-mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32	gsi)
-{
-	struct mpc_config_intsrc intsrc;
-	int			ioapic = -1;
-	int			pin = -1;
-
-	/* 
-	 * Convert 'gsi' to 'ioapic.pin'.
-	 */
-	ioapic = mp_find_ioapic(gsi);
-	if (ioapic < 0)
-		return;
-	pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-
-	/*
-	 * TBD: This check is for faulty timer entries, where the override
-	 *      erroneously sets the trigger to level, resulting in a HUGE 
-	 *      increase of timer interrupts!
-	 */
-	if ((bus_irq == 0) && (trigger == 3))
-		trigger = 1;
-
-	intsrc.mpc_type = MP_INTSRC;
-	intsrc.mpc_irqtype = mp_INT;
-	intsrc.mpc_irqflag = (trigger << 2) | polarity;
-	intsrc.mpc_srcbus = MP_ISA_BUS;
-	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
-	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
-	intsrc.mpc_dstirq = pin;				    /* INTIN# */
-
-	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", 
-		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-
-	mp_irqs[mp_irq_entries] = intsrc;
-	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-		panic("Max # of irq sources exceeded!\n");
-}
-
-void __init mp_config_acpi_legacy_irqs(void)
-{
-	struct mpc_config_intsrc intsrc;
-	int i = 0;
-	int ioapic = -1;
-
-	/* 
-	 * Fabricate the legacy ISA bus (bus #31).
-	 */
-	set_bit(MP_ISA_BUS, mp_bus_not_pci);
-
-	/* 
-	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
-	 */
-	ioapic = mp_find_ioapic(0);
-	if (ioapic < 0)
-		return;
-
-	intsrc.mpc_type = MP_INTSRC;
-	intsrc.mpc_irqflag = 0;					/* Conforming */
-	intsrc.mpc_srcbus = MP_ISA_BUS;
-	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
-
-	/* 
-	 * Use the default configuration for the IRQs 0-15.  Unless
-	 * overridden by (MADT) interrupt source override entries.
-	 */
-	for (i = 0; i < 16; i++) {
-		int idx;
-
-		for (idx = 0; idx < mp_irq_entries; idx++) {
-			struct mpc_config_intsrc *irq = mp_irqs + idx;
-
-			/* Do we already have a mapping for this ISA IRQ? */
-			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-				break;
-
-			/* Do we already have a mapping for this IOAPIC pin */
-			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-				(irq->mpc_dstirq == i))
-				break;
-		}
-
-		if (idx != mp_irq_entries) {
-			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-			continue;			/* IRQ already used */
-		}
-
-		intsrc.mpc_irqtype = mp_INT;
-		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
-		intsrc.mpc_dstirq = i;
-
-		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
-			intsrc.mpc_dstirq);
-
-		mp_irqs[mp_irq_entries] = intsrc;
-		if (++mp_irq_entries == MAX_IRQ_SOURCES)
-			panic("Max # of irq sources exceeded!\n");
-	}
-}
-
-int mp_register_gsi(u32 gsi, int triggering, int polarity)
-{
-	int ioapic = -1;
-	int ioapic_pin = 0;
-	int idx, bit = 0;
-
-	if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
-		return gsi;
-
-	/* Don't set up the ACPI SCI because it's already set up */
-	if (acpi_gbl_FADT.sci_interrupt == gsi)
-		return gsi;
-
-	ioapic = mp_find_ioapic(gsi);
-	if (ioapic < 0) {
-		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-		return gsi;
-	}
-
-	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-
-	/* 
-	 * Avoid pin reprogramming.  PRTs typically include entries  
-	 * with redundant pin->gsi mappings (but unique PCI devices);
-	 * we only program the IOAPIC on the first.
-	 */
-	bit = ioapic_pin % 32;
-	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-	if (idx > 3) {
-		printk(KERN_ERR "Invalid reference to IOAPIC pin "
-			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
-			ioapic_pin);
-		return gsi;
-	}
-	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-		return gsi;
-	}
-
-	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
-
-	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-		triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
-		polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-	return gsi;
-}
-#endif /*CONFIG_ACPI*/
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index af51ea8..4dfb405 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -65,8 +65,8 @@
 	return ret;
 }
 
-static ssize_t msr_read(struct file *file, char __user * buf,
-			size_t count, loff_t * ppos)
+static ssize_t msr_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
 {
 	u32 __user *tmp = (u32 __user *) buf;
 	u32 data[2];
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index 6a0aa70..8421d0a 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -22,9 +22,11 @@
 #include <linux/cpumask.h>
 #include <linux/kernel_stat.h>
 #include <linux/kdebug.h>
+#include <linux/slab.h>
 
 #include <asm/smp.h>
 #include <asm/nmi.h>
+#include <asm/timer.h>
 
 #include "mach_traps.h"
 
@@ -67,7 +69,7 @@
 }
 #endif
 
-static int __init check_nmi_watchdog(void)
+int __init check_nmi_watchdog(void)
 {
 	unsigned int *prev_nmi_count;
 	int cpu;
@@ -80,7 +82,7 @@
 
 	prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
 	if (!prev_nmi_count)
-		return -1;
+		goto error;
 
 	printk(KERN_INFO "Testing NMI watchdog ... ");
 
@@ -117,7 +119,7 @@
 	if (!atomic_read(&nmi_active)) {
 		kfree(prev_nmi_count);
 		atomic_set(&nmi_active, -1);
-		return -1;
+		goto error;
 	}
 	printk("OK.\n");
 
@@ -128,9 +130,11 @@
 
 	kfree(prev_nmi_count);
 	return 0;
+error:
+	timer_ack = !cpu_has_tsc;
+
+	return -1;
 }
-/* This needs to happen later in boot so counters are working */
-late_initcall(check_nmi_watchdog);
 
 static int __init setup_nmi_watchdog(char *str)
 {
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
index 9a4fde7..11f9130 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi_64.c
@@ -26,6 +26,8 @@
 #include <asm/proto.h>
 #include <asm/mce.h>
 
+#include <mach_traps.h>
+
 int unknown_nmi_panic;
 int nmi_watchdog_enabled;
 int panic_on_unrecovered_nmi;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 075962c..3733412 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -206,13 +206,6 @@
 	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
 };
 
-static struct resource reserve_iomem = {
-	.start = 0,
-	.end = -1,
-	.name = "paravirt-iomem",
-	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
-};
-
 /*
  * Reserve the whole legacy IO space to prevent any legacy drivers
  * from wasting time probing for their hardware.  This is a fairly
@@ -222,16 +215,7 @@
  */
 int paravirt_disable_iospace(void)
 {
-	int ret;
-
-	ret = request_resource(&ioport_resource, &reserve_ioports);
-	if (ret == 0) {
-		ret = request_resource(&iomem_resource, &reserve_iomem);
-		if (ret)
-			release_resource(&reserve_ioports);
-	}
-
-	return ret;
+	return request_resource(&ioport_resource, &reserve_ioports);
 }
 
 static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
index 375cb2b..ada5a06 100644
--- a/arch/x86/kernel/pci-dma_64.c
+++ b/arch/x86/kernel/pci-dma_64.c
@@ -232,32 +232,32 @@
 		return -EINVAL;
 
 	while (*p) {
-		if (!strncmp(p,"off",3))
+		if (!strncmp(p, "off", 3))
 			no_iommu = 1;
 		/* gart_parse_options has more force support */
-		if (!strncmp(p,"force",5))
+		if (!strncmp(p, "force", 5))
 			force_iommu = 1;
-		if (!strncmp(p,"noforce",7)) {
+		if (!strncmp(p, "noforce", 7)) {
 			iommu_merge = 0;
 			force_iommu = 0;
 		}
 
-		if (!strncmp(p, "biomerge",8)) {
+		if (!strncmp(p, "biomerge", 8)) {
 			iommu_bio_merge = 4096;
 			iommu_merge = 1;
 			force_iommu = 1;
 		}
-		if (!strncmp(p, "panic",5))
+		if (!strncmp(p, "panic", 5))
 			panic_on_overflow = 1;
-		if (!strncmp(p, "nopanic",7))
+		if (!strncmp(p, "nopanic", 7))
 			panic_on_overflow = 0;
-		if (!strncmp(p, "merge",5)) {
+		if (!strncmp(p, "merge", 5)) {
 			iommu_merge = 1;
 			force_iommu = 1;
 		}
-		if (!strncmp(p, "nomerge",7))
+		if (!strncmp(p, "nomerge", 7))
 			iommu_merge = 0;
-		if (!strncmp(p, "forcesac",8))
+		if (!strncmp(p, "forcesac", 8))
 			iommu_sac_force = 1;
 		if (!strncmp(p, "allowdac", 8))
 			forbid_dac = 0;
@@ -265,7 +265,7 @@
 			forbid_dac = -1;
 
 #ifdef CONFIG_SWIOTLB
-		if (!strncmp(p, "soft",4))
+		if (!strncmp(p, "soft", 4))
 			swiotlb = 1;
 #endif
 
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 43930e7..3903a8f 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -113,20 +113,13 @@
 
 		local_irq_disable();
 		if (!need_resched()) {
-			ktime_t t0, t1;
-			u64 t0n, t1n;
-
-			t0 = ktime_get();
-			t0n = ktime_to_ns(t0);
 			safe_halt();	/* enables interrupts racelessly */
 			local_irq_disable();
-			t1 = ktime_get();
-			t1n = ktime_to_ns(t1);
-			sched_clock_idle_wakeup_event(t1n - t0n);
 		}
 		local_irq_enable();
 		current_thread_info()->status |= TS_POLLING;
 	} else {
+		local_irq_enable();
 		/* loop is done by the caller */
 		cpu_relax();
 	}
@@ -142,6 +135,7 @@
  */
 static void poll_idle(void)
 {
+	local_irq_enable();
 	cpu_relax();
 }
 
@@ -248,8 +242,11 @@
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
 		smp_mb();
 		if (!need_resched())
-			__mwait(ax, cx);
-	}
+			__sti_mwait(ax, cx);
+		else
+			local_irq_enable();
+	} else
+		local_irq_enable();
 }
 
 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
@@ -332,7 +329,7 @@
 			init_utsname()->version);
 
 	printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
-			0xffff & regs->cs, regs->ip, regs->flags,
+			(u16)regs->cs, regs->ip, regs->flags,
 			smp_processor_id());
 	print_symbol("EIP is at %s\n", regs->ip);
 
@@ -341,8 +338,7 @@
 	printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
 		regs->si, regs->di, regs->bp, sp);
 	printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
-	       regs->ds & 0xffff, regs->es & 0xffff,
-	       regs->fs & 0xffff, gs, ss);
+	       (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
 
 	if (!all)
 		return;
@@ -513,6 +509,21 @@
 	return err;
 }
 
+void
+start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+{
+	__asm__("movl %0, %%gs" :: "r"(0));
+	regs->fs		= 0;
+	set_fs(USER_DS);
+	regs->ds		= __USER_DS;
+	regs->es		= __USER_DS;
+	regs->ss		= __USER_DS;
+	regs->cs		= __USER_CS;
+	regs->ip		= new_ip;
+	regs->sp		= new_sp;
+}
+EXPORT_SYMBOL_GPL(start_thread);
+
 #ifdef CONFIG_SECCOMP
 static void hard_disable_TSC(void)
 {
@@ -550,12 +561,12 @@
 		/* we clear debugctl to make sure DS
 		 * is not in use when we change it */
 		debugctl = 0;
-		wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
+		update_debugctlmsr(0);
 		wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
 	}
 
 	if (next->debugctlmsr != debugctl)
-		wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0);
+		update_debugctlmsr(next->debugctlmsr);
 
 	if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
 		set_debugreg(next->debugreg0, 0);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 46c4c54..e75ccc8 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -107,16 +107,8 @@
 	smp_mb();
 	local_irq_disable();
 	if (!need_resched()) {
-		ktime_t t0, t1;
-		u64 t0n, t1n;
-
-		t0 = ktime_get();
-		t0n = ktime_to_ns(t0);
 		safe_halt();	/* enables interrupts racelessly */
 		local_irq_disable();
-		t1 = ktime_get();
-		t1n = ktime_to_ns(t1);
-		sched_clock_idle_wakeup_event(t1n - t0n);
 	}
 	local_irq_enable();
 	current_thread_info()->status |= TS_POLLING;
@@ -528,6 +520,21 @@
 	return err;
 }
 
+void
+start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+{
+	asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0));
+	load_gs_index(0);
+	regs->ip		= new_ip;
+	regs->sp		= new_sp;
+	write_pda(oldrsp, new_sp);
+	regs->cs		= __USER_CS;
+	regs->ss		= __USER_DS;
+	regs->flags		= 0x200;
+	set_fs(USER_DS);
+}
+EXPORT_SYMBOL_GPL(start_thread);
+
 /*
  * This special macro can be used to load a debugging register
  */
@@ -548,12 +555,12 @@
 		/* we clear debugctl to make sure DS
 		 * is not in use when we change it */
 		debugctl = 0;
-		wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
+		update_debugctlmsr(0);
 		wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
 	}
 
 	if (next->debugctlmsr != debugctl)
-		wrmsrl(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr);
+		update_debugctlmsr(next->debugctlmsr);
 
 	if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
 		loaddebug(next, 0);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index eb92ccb..559c1b0 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1456,7 +1456,6 @@
 /* notification of system call entry/exit
  * - triggered by current->work.syscall_trace
  */
-__attribute__((regparm(3)))
 int do_syscall_trace(struct pt_regs *regs, int entryexit)
 {
 	int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 484c4a8..9692202 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -1,5 +1,4 @@
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/reboot.h>
 #include <linux/init.h>
 #include <linux/pm.h>
@@ -412,12 +411,12 @@
 #ifdef CONFIG_X86_32
 	/* See if there has been given a command line override */
 	if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
-		cpu_isset(reboot_cpu, cpu_online_map))
+		cpu_online(reboot_cpu))
 		reboot_cpu_id = reboot_cpu;
 #endif
 
 	/* Make certain the cpu I'm about to reboot on is online */
-	if (!cpu_isset(reboot_cpu_id, cpu_online_map))
+	if (!cpu_online(reboot_cpu_id))
 		reboot_cpu_id = smp_processor_id();
 
 	/* Make certain I only run on the appropriate processor */
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index f151d6f..c30fe25 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -9,18 +9,19 @@
 #include <linux/linkage.h>
 #include <asm/page.h>
 #include <asm/kexec.h>
+#include <asm/processor-flags.h>
+#include <asm/pgtable.h>
 
 /*
  * Must be relocatable PIC code callable as a C function
  */
 
 #define PTR(x) (x << 2)
-#define PAGE_ALIGNED (1 << PAGE_SHIFT)
-#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
-#define PAE_PGD_ATTR 0x01 /* _PAGE_PRESENT */
+#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define PAE_PGD_ATTR (_PAGE_PRESENT)
 
 	.text
-	.align PAGE_ALIGNED
+	.align PAGE_SIZE
 	.globl relocate_kernel
 relocate_kernel:
 	movl	8(%esp), %ebp /* list of pages */
@@ -155,7 +156,7 @@
 	movl	%eax, %cr3
 
 	/* setup a new stack at the end of the physical control page */
-	lea	4096(%edi), %esp
+	lea	PAGE_SIZE(%edi), %esp
 
 	/* jump to identity mapped page */
 	movl    %edi, %eax
@@ -168,16 +169,16 @@
 	pushl   %edx
 
 	/* Set cr0 to a known state:
-	 * 31 0 == Paging disabled
-	 * 18 0 == Alignment check disabled
-	 * 16 0 == Write protect disabled
-	 * 3  0 == No task switch
-	 * 2  0 == Don't do FP software emulation.
-	 * 0  1 == Proctected mode enabled
+	 *  - Paging disabled
+	 *  - Alignment check disabled
+	 *  - Write protect disabled
+	 *  - No task switch
+	 *  - Don't do FP software emulation.
+	 *  - Proctected mode enabled
 	 */
 	movl	%cr0, %eax
-	andl	$~((1<<31)|(1<<18)|(1<<16)|(1<<3)|(1<<2)), %eax
-	orl	$(1<<0), %eax
+	andl	$~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
+	orl	$(X86_CR0_PE), %eax
 	movl	%eax, %cr0
 
 	/* clear cr4 if applicable */
@@ -186,8 +187,7 @@
 	/* Set cr4 to a known state:
 	 * Setting everything to zero seems safe.
 	 */
-	movl	%cr4, %eax
-	andl	$0, %eax
+	xorl	%eax, %eax
 	movl	%eax, %cr4
 
 	jmp 1f
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 14e9587..f5afe66 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -9,17 +9,18 @@
 #include <linux/linkage.h>
 #include <asm/page.h>
 #include <asm/kexec.h>
+#include <asm/processor-flags.h>
+#include <asm/pgtable.h>
 
 /*
  * Must be relocatable PIC code callable as a C function
  */
 
 #define PTR(x) (x << 3)
-#define PAGE_ALIGNED (1 << PAGE_SHIFT)
-#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
+#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
 
 	.text
-	.align PAGE_ALIGNED
+	.align PAGE_SIZE
 	.code64
 	.globl relocate_kernel
 relocate_kernel:
@@ -160,7 +161,7 @@
 	movq	%r9, %cr3
 
 	/* setup a new stack at the end of the physical control page */
-	lea	4096(%r8), %rsp
+	lea	PAGE_SIZE(%r8), %rsp
 
 	/* jump to identity mapped page */
 	addq	$(identity_mapped - relocate_kernel), %r8
@@ -172,33 +173,22 @@
 	pushq   %rdx
 
 	/* Set cr0 to a known state:
-	 * 31 1 == Paging enabled
-	 * 18 0 == Alignment check disabled
-	 * 16 0 == Write protect disabled
-	 * 3  0 == No task switch
-	 * 2  0 == Don't do FP software emulation.
-	 * 0  1 == Proctected mode enabled
+	 *  - Paging enabled
+	 *  - Alignment check disabled
+	 *  - Write protect disabled
+	 *  - No task switch
+	 *  - Don't do FP software emulation.
+	 *  - Proctected mode enabled
 	 */
 	movq	%cr0, %rax
-	andq	$~((1<<18)|(1<<16)|(1<<3)|(1<<2)), %rax
-	orl	$((1<<31)|(1<<0)), %eax
+	andq	$~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
+	orl	$(X86_CR0_PG | X86_CR0_PE), %eax
 	movq	%rax, %cr0
 
 	/* Set cr4 to a known state:
-	 * 10 0 == xmm exceptions disabled
-	 * 9  0 == xmm registers instructions disabled
-	 * 8  0 == performance monitoring counter disabled
-	 * 7  0 == page global disabled
-	 * 6  0 == machine check exceptions disabled
-	 * 5  1 == physical address extension enabled
-	 * 4  0 == page size extensions	disabled
-	 * 3  0 == Debug extensions disabled
-	 * 2  0 == Time stamp disable (disabled)
-	 * 1  0 == Protected mode virtual interrupts disabled
-	 * 0  0 == VME disabled
+	 *  - physical address extension enabled
 	 */
-
-	movq	$((1<<5)), %rax
+	movq	$X86_CR4_PAE, %rax
 	movq	%rax, %cr4
 
 	jmp 1f
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index eb9b1a1..9615eee 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -9,7 +9,6 @@
 #include <asm/vsyscall.h>
 
 #ifdef CONFIG_X86_32
-# define CMOS_YEARS_OFFS 1900
 /*
  * This is a special lock that is owned by the CPU and holds the index
  * register we are working with.  It is required for NMI access to the
@@ -17,14 +16,11 @@
  */
 volatile unsigned long cmos_lock = 0;
 EXPORT_SYMBOL(cmos_lock);
-#else
-/*
- * x86-64 systems only exists since 2002.
- * This will work up to Dec 31, 2100
- */
-# define CMOS_YEARS_OFFS 2000
 #endif
 
+/* For two digit years assume time is always after that */
+#define CMOS_YEARS_OFFS 2000
+
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL(rtc_lock);
 
@@ -98,7 +94,7 @@
 
 unsigned long mach_get_cmos_time(void)
 {
-	unsigned int year, mon, day, hour, min, sec, century = 0;
+	unsigned int status, year, mon, day, hour, min, sec, century = 0;
 
 	/*
 	 * If UIP is clear, then we have >= 244 microseconds before
@@ -116,14 +112,16 @@
 	mon = CMOS_READ(RTC_MONTH);
 	year = CMOS_READ(RTC_YEAR);
 
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86_64)
-	/* CHECKME: Is this really 64bit only ??? */
+#ifdef CONFIG_ACPI
 	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
 	    acpi_gbl_FADT.century)
 		century = CMOS_READ(acpi_gbl_FADT.century);
 #endif
 
-	if (RTC_ALWAYS_BCD || !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)) {
+	status = CMOS_READ(RTC_CONTROL);
+	WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
+
+	if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
 		BCD_TO_BIN(sec);
 		BCD_TO_BIN(min);
 		BCD_TO_BIN(hour);
@@ -136,11 +134,8 @@
 		BCD_TO_BIN(century);
 		year += century * 100;
 		printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
-	} else {
+	} else
 		year += CMOS_YEARS_OFFS;
-		if (year < 1970)
-			year += 100;
-	}
 
 	return mktime(year, mon, day, hour, min, sec);
 }
@@ -151,8 +146,8 @@
 	unsigned char val;
 
 	lock_cmos_prefix(addr);
-	outb_p(addr, RTC_PORT(0));
-	val = inb_p(RTC_PORT(1));
+	outb(addr, RTC_PORT(0));
+	val = inb(RTC_PORT(1));
 	lock_cmos_suffix(addr);
 	return val;
 }
@@ -161,8 +156,8 @@
 void rtc_cmos_write(unsigned char val, unsigned char addr)
 {
 	lock_cmos_prefix(addr);
-	outb_p(addr, RTC_PORT(0));
-	outb_p(val, RTC_PORT(1));
+	outb(addr, RTC_PORT(0));
+	outb(val, RTC_PORT(1));
 	lock_cmos_suffix(addr);
 }
 EXPORT_SYMBOL(rtc_cmos_write);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
new file mode 100644
index 0000000..ed157c9
--- /dev/null
+++ b/arch/x86/kernel/setup.c
@@ -0,0 +1,113 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/percpu.h>
+#include <asm/smp.h>
+#include <asm/percpu.h>
+#include <asm/sections.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/topology.h>
+#include <asm/mpspec.h>
+#include <asm/apicdef.h>
+
+unsigned int num_processors;
+unsigned disabled_cpus __cpuinitdata;
+/* Processor that is doing the boot up */
+unsigned int boot_cpu_physical_apicid = -1U;
+EXPORT_SYMBOL(boot_cpu_physical_apicid);
+
+physid_mask_t phys_cpu_present_map;
+
+DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
+EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
+
+/* Bitmask of physically existing CPUs */
+physid_mask_t phys_cpu_present_map;
+
+#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP)
+/*
+ * Copy data used in early init routines from the initial arrays to the
+ * per cpu data areas.  These arrays then become expendable and the
+ * *_early_ptr's are zeroed indicating that the static arrays are gone.
+ */
+static void __init setup_per_cpu_maps(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu];
+		per_cpu(x86_bios_cpu_apicid, cpu) =
+						x86_bios_cpu_apicid_init[cpu];
+#ifdef CONFIG_NUMA
+		per_cpu(x86_cpu_to_node_map, cpu) =
+						x86_cpu_to_node_map_init[cpu];
+#endif
+	}
+
+	/* indicate the early static arrays will soon be gone */
+	x86_cpu_to_apicid_early_ptr = NULL;
+	x86_bios_cpu_apicid_early_ptr = NULL;
+#ifdef CONFIG_NUMA
+	x86_cpu_to_node_map_early_ptr = NULL;
+#endif
+}
+
+#ifdef CONFIG_X86_32
+/*
+ * Great future not-so-futuristic plan: make i386 and x86_64 do it
+ * the same way
+ */
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+#endif
+
+/*
+ * Great future plan:
+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
+ * Always point %gs to its beginning
+ */
+void __init setup_per_cpu_areas(void)
+{
+	int i;
+	unsigned long size;
+
+#ifdef CONFIG_HOTPLUG_CPU
+	prefill_possible_map();
+#endif
+
+	/* Copy section for each CPU (we discard the original) */
+	size = PERCPU_ENOUGH_ROOM;
+	printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
+			  size);
+
+	for_each_possible_cpu(i) {
+		char *ptr;
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+		ptr = alloc_bootmem_pages(size);
+#else
+		int node = early_cpu_to_node(i);
+		if (!node_online(node) || !NODE_DATA(node)) {
+			ptr = alloc_bootmem_pages(size);
+			printk(KERN_INFO
+			       "cpu %d has no node or node-local memory\n", i);
+		}
+		else
+			ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
+#endif
+		if (!ptr)
+			panic("Cannot allocate cpu data for CPU %d\n", i);
+#ifdef CONFIG_X86_64
+		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
+#else
+		__per_cpu_offset[i] = ptr - __per_cpu_start;
+#endif
+		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+	}
+
+	/* Setup percpu data maps */
+	setup_per_cpu_maps();
+}
+
+#endif
diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c
index e24c456..9042fb0 100644
--- a/arch/x86/kernel/setup64.c
+++ b/arch/x86/kernel/setup64.c
@@ -11,6 +11,7 @@
 #include <linux/bootmem.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
+#include <linux/kgdb.h>
 #include <asm/pda.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -23,6 +24,7 @@
 #include <asm/proto.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
+#include <asm/genapic.h>
 
 #ifndef CONFIG_DEBUG_BOOT_PARAMS
 struct boot_params __initdata boot_params;
@@ -85,83 +87,6 @@
 }
 __setup("noexec32=", nonx32_setup);
 
-/*
- * Copy data used in early init routines from the initial arrays to the
- * per cpu data areas.  These arrays then become expendable and the
- * *_early_ptr's are zeroed indicating that the static arrays are gone.
- */
-static void __init setup_per_cpu_maps(void)
-{
-	int cpu;
-
-	for_each_possible_cpu(cpu) {
-#ifdef CONFIG_SMP
-		if (per_cpu_offset(cpu)) {
-#endif
-			per_cpu(x86_cpu_to_apicid, cpu) =
-						x86_cpu_to_apicid_init[cpu];
-			per_cpu(x86_bios_cpu_apicid, cpu) =
-						x86_bios_cpu_apicid_init[cpu];
-#ifdef CONFIG_NUMA
-			per_cpu(x86_cpu_to_node_map, cpu) =
-						x86_cpu_to_node_map_init[cpu];
-#endif
-#ifdef CONFIG_SMP
-		}
-		else
-			printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
-									cpu);
-#endif
-	}
-
-	/* indicate the early static arrays will soon be gone */
-	x86_cpu_to_apicid_early_ptr = NULL;
-	x86_bios_cpu_apicid_early_ptr = NULL;
-#ifdef CONFIG_NUMA
-	x86_cpu_to_node_map_early_ptr = NULL;
-#endif
-}
-
-/*
- * Great future plan:
- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
- * Always point %gs to its beginning
- */
-void __init setup_per_cpu_areas(void)
-{ 
-	int i;
-	unsigned long size;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	prefill_possible_map();
-#endif
-
-	/* Copy section for each CPU (we discard the original) */
-	size = PERCPU_ENOUGH_ROOM;
-
-	printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
-	for_each_cpu_mask (i, cpu_possible_map) {
-		char *ptr;
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-		ptr = alloc_bootmem_pages(size);
-#else
-		int node = early_cpu_to_node(i);
-
-		if (!node_online(node) || !NODE_DATA(node))
-			ptr = alloc_bootmem_pages(size);
-		else
-			ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
-#endif
-		if (!ptr)
-			panic("Cannot allocate cpu data for CPU %d\n", i);
-		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
-		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-	}
-
-	/* setup percpu data maps early */
-	setup_per_cpu_maps();
-} 
-
 void pda_init(int cpu)
 { 
 	struct x8664_pda *pda = cpu_pda(cpu);
@@ -327,6 +252,17 @@
 	load_TR_desc();
 	load_LDT(&init_mm.context);
 
+#ifdef CONFIG_KGDB
+	/*
+	 * If the kgdb is connected no debug regs should be altered.  This
+	 * is only applicable when KGDB and a KGDB I/O module are built
+	 * into the kernel and you are using early debugging with
+	 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
+	 */
+	if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
+		arch_kgdb_ops.correct_hw_break();
+	else {
+#endif
 	/*
 	 * Clear all 6 debug registers:
 	 */
@@ -337,8 +273,15 @@
 	set_debugreg(0UL, 3);
 	set_debugreg(0UL, 6);
 	set_debugreg(0UL, 7);
+#ifdef CONFIG_KGDB
+	/* If the kgdb is connected no debug regs should be altered. */
+	}
+#endif
 
 	fpu_init(); 
 
 	raw_local_save_flags(kernel_eflags);
+
+	if (is_uv_system())
+		uv_cpu_init();
 }
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 2b3e5d4..5b0bffb 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -62,8 +62,9 @@
 #include <asm/io.h>
 #include <asm/vmi.h>
 #include <setup_arch.h>
-#include <bios_ebda.h>
+#include <asm/bios_ebda.h>
 #include <asm/cacheflush.h>
+#include <asm/processor.h>
 
 /* This value is set up by the early boot code to point to the value
    immediately after the boot time page tables.  It contains a *physical*
@@ -154,6 +155,8 @@
 struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
 EXPORT_SYMBOL(boot_cpu_data);
 
+unsigned int def_to_bigsmp;
+
 #ifndef CONFIG_X86_PAE
 unsigned long mmu_cr4_features;
 #else
@@ -189,7 +192,7 @@
 extern void early_cpu_init(void);
 extern int root_mountflags;
 
-unsigned long saved_videomode;
+unsigned long saved_video_mode;
 
 #define RAMDISK_IMAGE_START_MASK	0x07FF
 #define RAMDISK_PROMPT_FLAG		0x8000
@@ -227,7 +230,7 @@
 }
 #endif
 
-int __initdata user_defined_memmap = 0;
+int __initdata user_defined_memmap;
 
 /*
  * "mem=nopentium" disables the 4MB page tables.
@@ -385,15 +388,56 @@
 	return max_low_pfn;
 }
 
+#define BIOS_EBDA_SEGMENT 0x40E
+#define BIOS_LOWMEM_KILOBYTES 0x413
+
 /*
- * workaround for Dell systems that neglect to reserve EBDA
+ * The BIOS places the EBDA/XBDA at the top of conventional
+ * memory, and usually decreases the reported amount of
+ * conventional memory (int 0x12) too. This also contains a
+ * workaround for Dell systems that neglect to reserve EBDA.
+ * The same workaround also avoids a problem with the AMD768MPX
+ * chipset: reserve a page before VGA to prevent PCI prefetch
+ * into it (errata #56). Usually the page is reserved anyways,
+ * unless you have no PS/2 mouse plugged in.
  */
 static void __init reserve_ebda_region(void)
 {
-	unsigned int addr;
-	addr = get_bios_ebda();
-	if (addr)
-		reserve_bootmem(addr, PAGE_SIZE, BOOTMEM_DEFAULT);
+	unsigned int lowmem, ebda_addr;
+
+	/* To determine the position of the EBDA and the */
+	/* end of conventional memory, we need to look at */
+	/* the BIOS data area. In a paravirtual environment */
+	/* that area is absent. We'll just have to assume */
+	/* that the paravirt case can handle memory setup */
+	/* correctly, without our help. */
+	if (paravirt_enabled())
+		return;
+
+	/* end of low (conventional) memory */
+	lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
+	lowmem <<= 10;
+
+	/* start of EBDA area */
+	ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT);
+	ebda_addr <<= 4;
+
+	/* Fixup: bios puts an EBDA in the top 64K segment */
+	/* of conventional memory, but does not adjust lowmem. */
+	if ((lowmem - ebda_addr) <= 0x10000)
+		lowmem = ebda_addr;
+
+	/* Fixup: bios does not report an EBDA at all. */
+	/* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
+	if ((ebda_addr == 0) && (lowmem >= 0x9f000))
+		lowmem = 0x9f000;
+
+	/* Paranoia: should never happen, but... */
+	if ((lowmem == 0) || (lowmem >= 0x100000))
+		lowmem = 0x9f000;
+
+	/* reserve all memory between lowmem and the 1MB mark */
+	reserve_bootmem(lowmem, 0x100000 - lowmem, BOOTMEM_DEFAULT);
 }
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -617,16 +661,9 @@
 	 */
 	reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT);
 
-	/* reserve EBDA region, it's a 4K region */
+	/* reserve EBDA region */
 	reserve_ebda_region();
 
-    /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
-       PCI prefetch into it (errata #56). Usually the page is reserved anyways,
-       unless you have no PS/2 mouse plugged in. */
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-	    boot_cpu_data.x86 == 6)
-	     reserve_bootmem(0xa0000 - 4096, 4096, BOOTMEM_DEFAULT);
-
 #ifdef CONFIG_SMP
 	/*
 	 * But first pinch a few for the stack/trampoline stuff
@@ -687,6 +724,18 @@
 	return machine_specific_memory_setup();
 }
 
+#ifdef CONFIG_NUMA
+/*
+ * In the golden day, when everything among i386 and x86_64 will be
+ * integrated, this will not live here
+ */
+void *x86_cpu_to_node_map_early_ptr;
+int x86_cpu_to_node_map_init[NR_CPUS] = {
+	[0 ... NR_CPUS-1] = NUMA_NO_NODE
+};
+DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
+#endif
+
 /*
  * Determine if we were loaded by an EFI loader.  If so, then we have also been
  * passed the efi memmap, systab, etc., so we should use these data structures
@@ -714,7 +763,7 @@
 	edid_info = boot_params.edid_info;
 	apm_info.bios = boot_params.apm_bios_info;
 	ist_info = boot_params.ist_info;
-	saved_videomode = boot_params.hdr.vid_mode;
+	saved_video_mode = boot_params.hdr.vid_mode;
 	if( boot_params.sys_desc_table.length != 0 ) {
 		set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2);
 		machine_id = boot_params.sys_desc_table.table[0];
@@ -820,6 +869,18 @@
 
 	io_delay_init();
 
+#ifdef CONFIG_X86_SMP
+	/*
+	 * setup to use the early static init tables during kernel startup
+	 * X86_SMP will exclude sub-arches that don't deal well with it.
+	 */
+	x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
+	x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
+#ifdef CONFIG_NUMA
+	x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
+#endif
+#endif
+
 #ifdef CONFIG_X86_GENERICARCH
 	generic_apic_probe();
 #endif
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index f4f7ecf..674ef35 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -58,7 +58,6 @@
 #include <asm/mmu_context.h>
 #include <asm/proto.h>
 #include <asm/setup.h>
-#include <asm/mach_apic.h>
 #include <asm/numa.h>
 #include <asm/sections.h>
 #include <asm/dmi.h>
@@ -66,7 +65,9 @@
 #include <asm/mce.h>
 #include <asm/ds.h>
 #include <asm/topology.h>
+#include <asm/trampoline.h>
 
+#include <mach_apic.h>
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
@@ -248,6 +249,7 @@
 				(unsigned long)(total_mem >> 20));
 		crashk_res.start = crash_base;
 		crashk_res.end   = crash_base + crash_size - 1;
+		insert_resource(&iomem_resource, &crashk_res);
 	}
 }
 #else
@@ -322,6 +324,11 @@
 
 	finish_e820_parsing();
 
+	/* after parse_early_param, so could debug it */
+	insert_resource(&iomem_resource, &code_resource);
+	insert_resource(&iomem_resource, &data_resource);
+	insert_resource(&iomem_resource, &bss_resource);
+
 	early_gart_iommu_check();
 
 	e820_register_active_regions(0, 0, -1UL);
@@ -341,10 +348,12 @@
 
 	check_efer();
 
-	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
+	max_pfn_mapped = init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT));
 	if (efi_enabled)
 		efi_init();
 
+	vsmp_init();
+
 	dmi_scan_machine();
 
 	io_delay_init();
@@ -450,7 +459,7 @@
 	/*
 	 * We trust e820 completely. No explicit ROM probing in memory.
 	 */
-	e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
+	e820_reserve_resources();
 	e820_mark_nosave_regions();
 
 	/* request I/O space for devices used on all i[345]86 PCs */
@@ -552,9 +561,9 @@
 	bits = c->x86_coreid_bits;
 
 	/* Low order bits define the core id (index of core in socket) */
-	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
-	/* Convert the APIC ID into the socket ID */
-	c->phys_proc_id = phys_pkg_id(bits);
+	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
+	/* Convert the initial APIC ID into the socket ID */
+	c->phys_proc_id = c->initial_apicid >> bits;
 
 #ifdef CONFIG_NUMA
 	node = c->phys_proc_id;
@@ -571,7 +580,7 @@
 		   If that doesn't result in a usable node fall back to the
 		   path for the previous case.  */
 
-		int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
+		int ht_nodeid = c->initial_apicid;
 
 		if (ht_nodeid >= 0 &&
 		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
@@ -677,7 +686,7 @@
 
 	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
 	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-	clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
+	clear_cpu_cap(c, 0*32+31);
 
 	/* On C+ stepping K8 rep microcode works well for copy/memset */
 	level = cpuid_eax(1);
@@ -721,6 +730,19 @@
 
 	if (amd_apic_timer_broken())
 		disable_apic_timer = 1;
+
+	if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
+		unsigned long long tseg;
+
+		/*
+		 * Split up direct mapping around the TSEG SMM area.
+		 * Don't do it for gbpages because there seems very little
+		 * benefit in doing so.
+		 */
+		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) &&
+		(tseg >> PMD_SHIFT) < (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT)))
+			set_memory_4k((unsigned long)__va(tseg), 1);
+	}
 }
 
 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -813,7 +835,7 @@
 {
 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
 	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
-		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 }
 
 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
@@ -856,9 +878,6 @@
 
 	if (c->x86 == 15)
 		c->x86_cache_alignment = c->x86_clflush_size * 2;
-	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
-	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
-		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 	if (c->x86 == 6)
 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 	set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
@@ -867,6 +886,32 @@
 	srat_detect_node();
 }
 
+static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
+{
+	if (c->x86 == 0x6 && c->x86_model >= 0xf)
+		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+}
+
+static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
+{
+	/* Cache sizes */
+	unsigned n;
+
+	n = c->extended_cpuid_level;
+	if (n >= 0x80000008) {
+		unsigned eax = cpuid_eax(0x80000008);
+		c->x86_virt_bits = (eax >> 8) & 0xff;
+		c->x86_phys_bits = eax & 0xff;
+	}
+
+	if (c->x86 == 0x6 && c->x86_model >= 0xf) {
+		c->x86_cache_alignment = c->x86_clflush_size * 2;
+		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+	}
+	set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+}
+
 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 {
 	char *v = c->x86_vendor_id;
@@ -875,6 +920,8 @@
 		c->x86_vendor = X86_VENDOR_AMD;
 	else if (!strcmp(v, "GenuineIntel"))
 		c->x86_vendor = X86_VENDOR_INTEL;
+	else if (!strcmp(v, "CentaurHauls"))
+		c->x86_vendor = X86_VENDOR_CENTAUR;
 	else
 		c->x86_vendor = X86_VENDOR_UNKNOWN;
 }
@@ -922,15 +969,16 @@
 			c->x86 += (tfms >> 20) & 0xff;
 		if (c->x86 >= 0x6)
 			c->x86_model += ((tfms >> 16) & 0xF) << 4;
-		if (c->x86_capability[0] & (1<<19))
+		if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
 			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
 	} else {
 		/* Have CPUID level 0 only - unheard of */
 		c->x86 = 4;
 	}
 
+	c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
 #ifdef CONFIG_SMP
-	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
+	c->phys_proc_id = c->initial_apicid;
 #endif
 	/* AMD-defined flags: level 0x80000001 */
 	xlvl = cpuid_eax(0x80000000);
@@ -956,12 +1004,22 @@
 	if (c->extended_cpuid_level >= 0x80000007)
 		c->x86_power = cpuid_edx(0x80000007);
 
+
+	clear_cpu_cap(c, X86_FEATURE_PAT);
+
 	switch (c->x86_vendor) {
 	case X86_VENDOR_AMD:
 		early_init_amd(c);
+		if (c->x86 >= 0xf && c->x86 <= 0x11)
+			set_cpu_cap(c, X86_FEATURE_PAT);
 		break;
 	case X86_VENDOR_INTEL:
 		early_init_intel(c);
+		if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
+			set_cpu_cap(c, X86_FEATURE_PAT);
+		break;
+	case X86_VENDOR_CENTAUR:
+		early_init_centaur(c);
 		break;
 	}
 
@@ -999,6 +1057,10 @@
 		init_intel(c);
 		break;
 
+	case X86_VENDOR_CENTAUR:
+		init_centaur(c);
+		break;
+
 	case X86_VENDOR_UNKNOWN:
 	default:
 		display_cacheinfo(c);
@@ -1028,14 +1090,24 @@
 #endif
 	select_idle_routine(c);
 
-	if (c != &boot_cpu_data)
-		mtrr_ap_init();
 #ifdef CONFIG_NUMA
 	numa_add_cpu(smp_processor_id());
 #endif
 
 }
 
+void __cpuinit identify_boot_cpu(void)
+{
+	identify_cpu(&boot_cpu_data);
+}
+
+void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
+{
+	BUG_ON(c == &boot_cpu_data);
+	identify_cpu(c);
+	mtrr_ap_init();
+}
+
 static __init int setup_noclflush(char *arg)
 {
 	setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
@@ -1064,123 +1136,3 @@
 	return 1;
 }
 __setup("clearcpuid=", setup_disablecpuid);
-
-/*
- *	Get CPU information for use by the procfs.
- */
-
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
-	struct cpuinfo_x86 *c = v;
-	int cpu = 0, i;
-
-#ifdef CONFIG_SMP
-	cpu = c->cpu_index;
-#endif
-
-	seq_printf(m, "processor\t: %u\n"
-		   "vendor_id\t: %s\n"
-		   "cpu family\t: %d\n"
-		   "model\t\t: %d\n"
-		   "model name\t: %s\n",
-		   (unsigned)cpu,
-		   c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
-		   c->x86,
-		   (int)c->x86_model,
-		   c->x86_model_id[0] ? c->x86_model_id : "unknown");
-
-	if (c->x86_mask || c->cpuid_level >= 0)
-		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
-	else
-		seq_printf(m, "stepping\t: unknown\n");
-
-	if (cpu_has(c, X86_FEATURE_TSC)) {
-		unsigned int freq = cpufreq_quick_get((unsigned)cpu);
-
-		if (!freq)
-			freq = cpu_khz;
-		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-			   freq / 1000, (freq % 1000));
-	}
-
-	/* Cache size */
-	if (c->x86_cache_size >= 0)
-		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-
-#ifdef CONFIG_SMP
-	if (smp_num_siblings * c->x86_max_cores > 1) {
-		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-		seq_printf(m, "siblings\t: %d\n",
-			       cpus_weight(per_cpu(cpu_core_map, cpu)));
-		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
-		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
-	}
-#endif
-
-	seq_printf(m,
-		   "fpu\t\t: yes\n"
-		   "fpu_exception\t: yes\n"
-		   "cpuid level\t: %d\n"
-		   "wp\t\t: yes\n"
-		   "flags\t\t:",
-		   c->cpuid_level);
-
-	for (i = 0; i < 32*NCAPINTS; i++)
-		if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
-			seq_printf(m, " %s", x86_cap_flags[i]);
-
-	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
-		   c->loops_per_jiffy/(500000/HZ),
-		   (c->loops_per_jiffy/(5000/HZ)) % 100);
-
-	if (c->x86_tlbsize > 0)
-		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
-	seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
-	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
-
-	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
-		   c->x86_phys_bits, c->x86_virt_bits);
-
-	seq_printf(m, "power management:");
-	for (i = 0; i < 32; i++) {
-		if (c->x86_power & (1 << i)) {
-			if (i < ARRAY_SIZE(x86_power_flags) &&
-			    x86_power_flags[i])
-				seq_printf(m, "%s%s",
-					   x86_power_flags[i][0]?" ":"",
-					   x86_power_flags[i]);
-			else
-				seq_printf(m, " [%d]", i);
-		}
-	}
-
-	seq_printf(m, "\n\n");
-
-	return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
-	if (*pos == 0)	/* just in case, cpu 0 is not the first */
-		*pos = first_cpu(cpu_online_map);
-	if ((*pos) < NR_CPUS && cpu_online(*pos))
-		return &cpu_data(*pos);
-	return NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
-	*pos = next_cpu(*pos, cpu_online_map);
-	return c_start(m, pos);
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
-	.start = c_start,
-	.next =	c_next,
-	.stop =	c_stop,
-	.show =	show_cpuinfo,
-};
diff --git a/arch/x86/kernel/sigframe_32.h b/arch/x86/kernel/sigframe.h
similarity index 66%
rename from arch/x86/kernel/sigframe_32.h
rename to arch/x86/kernel/sigframe.h
index 0b22217..72bbb51 100644
--- a/arch/x86/kernel/sigframe_32.h
+++ b/arch/x86/kernel/sigframe.h
@@ -1,5 +1,5 @@
-struct sigframe
-{
+#ifdef CONFIG_X86_32
+struct sigframe {
 	char __user *pretcode;
 	int sig;
 	struct sigcontext sc;
@@ -8,8 +8,7 @@
 	char retcode[8];
 };
 
-struct rt_sigframe
-{
+struct rt_sigframe {
 	char __user *pretcode;
 	int sig;
 	struct siginfo __user *pinfo;
@@ -19,3 +18,10 @@
 	struct _fpstate fpstate;
 	char retcode[8];
 };
+#else
+struct rt_sigframe {
+	char __user *pretcode;
+	struct ucontext uc;
+	struct siginfo info;
+};
+#endif
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 0157a6f..f1b1179 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -4,32 +4,44 @@
  *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
  *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
  */
+#include <linux/list.h>
 
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
-#include <linux/ptrace.h>
-#include <linux/elf.h>
 #include <linux/binfmts.h>
+#include <linux/suspend.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/signal.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/elf.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
 #include <asm/processor.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/i387.h>
 #include <asm/vdso.h>
-#include "sigframe_32.h"
 
-#define DEBUG_SIG 0
+#include "sigframe.h"
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
+#define __FIX_EFLAGS	(X86_EFLAGS_AC | X86_EFLAGS_OF | \
+			 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
+			 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
+			 X86_EFLAGS_CF)
+
+#ifdef CONFIG_X86_32
+# define FIX_EFLAGS	(__FIX_EFLAGS | X86_EFLAGS_RF)
+#else
+# define FIX_EFLAGS	__FIX_EFLAGS
+#endif
+
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
@@ -46,10 +58,11 @@
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
 	set_thread_flag(TIF_RESTORE_SIGMASK);
+
 	return -ERESTARTNOHAND;
 }
 
-asmlinkage int 
+asmlinkage int
 sys_sigaction(int sig, const struct old_sigaction __user *act,
 	      struct old_sigaction __user *oact)
 {
@@ -58,10 +71,12 @@
 
 	if (act) {
 		old_sigset_t mask;
+
 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
 			return -EFAULT;
+
 		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
 		__get_user(mask, &act->sa_mask);
 		siginitset(&new_ka.sa.sa_mask, mask);
@@ -74,6 +89,7 @@
 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
 			return -EFAULT;
+
 		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
 		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
 	}
@@ -81,10 +97,12 @@
 	return ret;
 }
 
-asmlinkage int
-sys_sigaltstack(unsigned long bx)
+asmlinkage int sys_sigaltstack(unsigned long bx)
 {
-	/* This is needed to make gcc realize it doesn't own the "struct pt_regs" */
+	/*
+	 * This is needed to make gcc realize it doesn't own the
+	 * "struct pt_regs"
+	 */
 	struct pt_regs *regs = (struct pt_regs *)&bx;
 	const stack_t __user *uss = (const stack_t __user *)bx;
 	stack_t __user *uoss = (stack_t __user *)regs->cx;
@@ -96,9 +114,9 @@
 /*
  * Do a signal return; undo the signal stack.
  */
-
 static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax)
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+		   unsigned long *pax)
 {
 	unsigned int err = 0;
 
@@ -120,37 +138,29 @@
 #define GET_SEG(seg)							\
 	{ unsigned short tmp;						\
 	  err |= __get_user(tmp, &sc->seg);				\
-	  loadsegment(seg,tmp); }
-
-#define	FIX_EFLAGS	(X86_EFLAGS_AC | X86_EFLAGS_RF |		 \
-			 X86_EFLAGS_OF | X86_EFLAGS_DF |		 \
-			 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
-			 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
+	  loadsegment(seg, tmp); }
 
 	GET_SEG(gs);
 	COPY_SEG(fs);
 	COPY_SEG(es);
 	COPY_SEG(ds);
-	COPY(di);
-	COPY(si);
-	COPY(bp);
-	COPY(sp);
-	COPY(bx);
-	COPY(dx);
-	COPY(cx);
-	COPY(ip);
+	COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
+	COPY(dx); COPY(cx); COPY(ip);
 	COPY_SEG_STRICT(cs);
 	COPY_SEG_STRICT(ss);
-	
+
 	{
 		unsigned int tmpflags;
+
 		err |= __get_user(tmpflags, &sc->flags);
-		regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+		regs->flags = (regs->flags & ~FIX_EFLAGS) |
+						(tmpflags & FIX_EFLAGS);
 		regs->orig_ax = -1;		/* disable syscall checks */
 	}
 
 	{
-		struct _fpstate __user * buf;
+		struct _fpstate __user *buf;
+
 		err |= __get_user(buf, &sc->fpstate);
 		if (buf) {
 			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
@@ -158,6 +168,7 @@
 			err |= restore_i387(buf);
 		} else {
 			struct task_struct *me = current;
+
 			if (used_math()) {
 				clear_fpu(me);
 				clear_used_math();
@@ -165,24 +176,26 @@
 		}
 	}
 
-	err |= __get_user(*peax, &sc->ax);
+	err |= __get_user(*pax, &sc->ax);
 	return err;
 
 badframe:
 	return 1;
 }
 
-asmlinkage int sys_sigreturn(unsigned long __unused)
+asmlinkage unsigned long sys_sigreturn(unsigned long __unused)
 {
-	struct pt_regs *regs = (struct pt_regs *) &__unused;
-	struct sigframe __user *frame = (struct sigframe __user *)(regs->sp - 8);
+	struct sigframe __user *frame;
+	struct pt_regs *regs;
+	unsigned long ax;
 	sigset_t set;
-	int ax;
+
+	regs = (struct pt_regs *) &__unused;
+	frame = (struct sigframe __user *)(regs->sp - 8);
 
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
-	if (__get_user(set.sig[0], &frame->sc.oldmask)
-	    || (_NSIG_WORDS > 1
+	if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1
 		&& __copy_from_user(&set.sig[1], &frame->extramask,
 				    sizeof(frame->extramask))))
 		goto badframe;
@@ -192,33 +205,35 @@
 	current->blocked = set;
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
-	
+
 	if (restore_sigcontext(regs, &frame->sc, &ax))
 		goto badframe;
 	return ax;
 
 badframe:
 	if (show_unhandled_signals && printk_ratelimit()) {
-		printk("%s%s[%d] bad frame in sigreturn frame:%p ip:%lx"
-		       " sp:%lx oeax:%lx",
+		printk(KERN_INFO "%s%s[%d] bad frame in sigreturn frame:"
+			"%p ip:%lx sp:%lx oeax:%lx",
 		    task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
 		    current->comm, task_pid_nr(current), frame, regs->ip,
 		    regs->sp, regs->orig_ax);
 		print_vma_addr(" in ", regs->ip);
-		printk("\n");
+		printk(KERN_CONT "\n");
 	}
 
 	force_sig(SIGSEGV, current);
+
 	return 0;
-}	
+}
 
 asmlinkage int sys_rt_sigreturn(unsigned long __unused)
 {
-	struct pt_regs *regs = (struct pt_regs *) &__unused;
-	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->sp - 4);
+	struct pt_regs *regs = (struct pt_regs *)&__unused;
+	struct rt_sigframe __user *frame;
+	unsigned long ax;
 	sigset_t set;
-	int ax;
 
+	frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
@@ -229,7 +244,7 @@
 	current->blocked = set;
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
-	
+
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
 		goto badframe;
 
@@ -241,12 +256,11 @@
 badframe:
 	force_sig(SIGSEGV, current);
 	return 0;
-}	
+}
 
 /*
  * Set up a signal frame.
  */
-
 static int
 setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
 		 struct pt_regs *regs, unsigned long mask)
@@ -277,9 +291,9 @@
 
 	tmp = save_i387(fpstate);
 	if (tmp < 0)
-	  err = 1;
+		err = 1;
 	else
-	  err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
+		err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
 
 	/* non-iBCS2 extensions.. */
 	err |= __put_user(mask, &sc->oldmask);
@@ -292,7 +306,7 @@
  * Determine which stack to use..
  */
 static inline void __user *
-get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
 {
 	unsigned long sp;
 
@@ -310,32 +324,30 @@
 	if (ka->sa.sa_flags & SA_ONSTACK) {
 		if (sas_ss_flags(sp) == 0)
 			sp = current->sas_ss_sp + current->sas_ss_size;
-	}
-
-	/* This is the legacy signal stack switching. */
-	else if ((regs->ss & 0xffff) != __USER_DS &&
-		 !(ka->sa.sa_flags & SA_RESTORER) &&
-		 ka->sa.sa_restorer) {
-		sp = (unsigned long) ka->sa.sa_restorer;
+	} else {
+		/* This is the legacy signal stack switching. */
+		if ((regs->ss & 0xffff) != __USER_DS &&
+			!(ka->sa.sa_flags & SA_RESTORER) &&
+				ka->sa.sa_restorer)
+			sp = (unsigned long) ka->sa.sa_restorer;
 	}
 
 	sp -= frame_size;
-	/* Align the stack pointer according to the i386 ABI,
-	 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+	/*
+	 * Align the stack pointer according to the i386 ABI,
+	 * i.e. so that on function entry ((sp + 4) & 15) == 0.
+	 */
 	sp = ((sp + 4) & -16ul) - 4;
+
 	return (void __user *) sp;
 }
 
-/* These symbols are defined with the addresses in the vsyscall page.
-   See vsyscall-sigreturn.S.  */
-extern void __user __kernel_sigreturn;
-extern void __user __kernel_rt_sigreturn;
-
-static int setup_frame(int sig, struct k_sigaction *ka,
-		       sigset_t *set, struct pt_regs * regs)
+static int
+setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
+	    struct pt_regs *regs)
 {
-	void __user *restorer;
 	struct sigframe __user *frame;
+	void __user *restorer;
 	int err = 0;
 	int usig;
 
@@ -365,7 +377,7 @@
 			goto give_sigsegv;
 	}
 
-	if (current->binfmt->hasvdso)
+	if (current->mm->context.vdso)
 		restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
 	else
 		restorer = &frame->retcode;
@@ -374,9 +386,9 @@
 
 	/* Set up to return from userspace.  */
 	err |= __put_user(restorer, &frame->pretcode);
-	 
+
 	/*
-	 * This is popl %eax ; movl $,%eax ; int $0x80
+	 * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80
 	 *
 	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
 	 * reasons and because gdb uses it as a signature to notice
@@ -390,11 +402,11 @@
 		goto give_sigsegv;
 
 	/* Set up registers for signal handler */
-	regs->sp = (unsigned long) frame;
-	regs->ip = (unsigned long) ka->sa.sa_handler;
-	regs->ax = (unsigned long) sig;
-	regs->dx = (unsigned long) 0;
-	regs->cx = (unsigned long) 0;
+	regs->sp = (unsigned long)frame;
+	regs->ip = (unsigned long)ka->sa.sa_handler;
+	regs->ax = (unsigned long)sig;
+	regs->dx = 0;
+	regs->cx = 0;
 
 	regs->ds = __USER_DS;
 	regs->es = __USER_DS;
@@ -407,15 +419,10 @@
 	 * The tracer may want to single-step inside the
 	 * handler too.
 	 */
-	regs->flags &= ~(TF_MASK | X86_EFLAGS_DF);
+	regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
 	if (test_thread_flag(TIF_SINGLESTEP))
 		ptrace_notify(SIGTRAP);
 
-#if DEBUG_SIG
-	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
-		current->comm, current->pid, frame, regs->ip, frame->pretcode);
-#endif
-
 	return 0;
 
 give_sigsegv:
@@ -424,10 +431,10 @@
 }
 
 static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-			   sigset_t *set, struct pt_regs * regs)
+			  sigset_t *set, struct pt_regs *regs)
 {
-	void __user *restorer;
 	struct rt_sigframe __user *frame;
+	void __user *restorer;
 	int err = 0;
 	int usig;
 
@@ -457,7 +464,7 @@
 			  &frame->uc.uc_stack.ss_flags);
 	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
 	err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
-			        regs, set->sig[0]);
+				regs, set->sig[0]);
 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 	if (err)
 		goto give_sigsegv;
@@ -467,9 +474,9 @@
 	if (ka->sa.sa_flags & SA_RESTORER)
 		restorer = ka->sa.sa_restorer;
 	err |= __put_user(restorer, &frame->pretcode);
-	 
+
 	/*
-	 * This is movl $,%ax ; int $0x80
+	 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
 	 *
 	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
 	 * reasons and because gdb uses it as a signature to notice
@@ -483,11 +490,11 @@
 		goto give_sigsegv;
 
 	/* Set up registers for signal handler */
-	regs->sp = (unsigned long) frame;
-	regs->ip = (unsigned long) ka->sa.sa_handler;
-	regs->ax = (unsigned long) usig;
-	regs->dx = (unsigned long) &frame->info;
-	regs->cx = (unsigned long) &frame->uc;
+	regs->sp = (unsigned long)frame;
+	regs->ip = (unsigned long)ka->sa.sa_handler;
+	regs->ax = (unsigned long)usig;
+	regs->dx = (unsigned long)&frame->info;
+	regs->cx = (unsigned long)&frame->uc;
 
 	regs->ds = __USER_DS;
 	regs->es = __USER_DS;
@@ -500,15 +507,10 @@
 	 * The tracer may want to single-step inside the
 	 * handler too.
 	 */
-	regs->flags &= ~(TF_MASK | X86_EFLAGS_DF);
+	regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
 	if (test_thread_flag(TIF_SINGLESTEP))
 		ptrace_notify(SIGTRAP);
 
-#if DEBUG_SIG
-	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
-		current->comm, current->pid, frame, regs->ip, frame->pretcode);
-#endif
-
 	return 0;
 
 give_sigsegv:
@@ -517,33 +519,33 @@
 }
 
 /*
- * OK, we're invoking a handler
- */	
-
+ * OK, we're invoking a handler:
+ */
 static int
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-	      sigset_t *oldset,	struct pt_regs * regs)
+	      sigset_t *oldset, struct pt_regs *regs)
 {
 	int ret;
 
 	/* Are we from a system call? */
-	if (regs->orig_ax >= 0) {
+	if ((long)regs->orig_ax >= 0) {
 		/* If so, check system call restarting.. */
 		switch (regs->ax) {
-		        case -ERESTART_RESTARTBLOCK:
-			case -ERESTARTNOHAND:
+		case -ERESTART_RESTARTBLOCK:
+		case -ERESTARTNOHAND:
+			regs->ax = -EINTR;
+			break;
+
+		case -ERESTARTSYS:
+			if (!(ka->sa.sa_flags & SA_RESTART)) {
 				regs->ax = -EINTR;
 				break;
-
-			case -ERESTARTSYS:
-				if (!(ka->sa.sa_flags & SA_RESTART)) {
-					regs->ax = -EINTR;
-					break;
-				}
-			/* fallthrough */
-			case -ERESTARTNOINTR:
-				regs->ax = regs->orig_ax;
-				regs->ip -= 2;
+			}
+		/* fallthrough */
+		case -ERESTARTNOINTR:
+			regs->ax = regs->orig_ax;
+			regs->ip -= 2;
+			break;
 		}
 	}
 
@@ -561,16 +563,17 @@
 	else
 		ret = setup_frame(sig, ka, oldset, regs);
 
-	if (ret == 0) {
-		spin_lock_irq(&current->sighand->siglock);
-		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-		if (!(ka->sa.sa_flags & SA_NODEFER))
-			sigaddset(&current->blocked,sig);
-		recalc_sigpending();
-		spin_unlock_irq(&current->sighand->siglock);
-	}
+	if (ret)
+		return ret;
 
-	return ret;
+	spin_lock_irq(&current->sighand->siglock);
+	sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
+	if (!(ka->sa.sa_flags & SA_NODEFER))
+		sigaddset(&current->blocked, sig);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	return 0;
 }
 
 /*
@@ -580,18 +583,17 @@
  */
 static void do_signal(struct pt_regs *regs)
 {
+	struct k_sigaction ka;
 	siginfo_t info;
 	int signr;
-	struct k_sigaction ka;
 	sigset_t *oldset;
 
 	/*
-	 * We want the common case to go fast, which
-	 * is why we may in certain cases get here from
-	 * kernel mode. Just return without doing anything
- 	 * if so.  vm86 regs switched out by assembly code
- 	 * before reaching here, so testing against kernel
- 	 * CS suffices.
+	 * We want the common case to go fast, which is why we may in certain
+	 * cases get here from kernel mode. Just return without doing anything
+	 * if so.
+	 * X86_32: vm86 regs switched out by assembly code before reaching
+	 * here, so testing against kernel CS suffices.
 	 */
 	if (!user_mode(regs))
 		return;
@@ -603,29 +605,31 @@
 
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
-		/* Re-enable any watchpoints before delivering the
+		/*
+		 * Re-enable any watchpoints before delivering the
 		 * signal to user space. The processor register will
 		 * have been cleared if the watchpoint triggered
 		 * inside the kernel.
 		 */
-		if (unlikely(current->thread.debugreg7))
+		if (current->thread.debugreg7)
 			set_debugreg(current->thread.debugreg7, 7);
 
-		/* Whee!  Actually deliver the signal.  */
+		/* Whee! Actually deliver the signal.  */
 		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-			/* a signal was successfully delivered; the saved
+			/*
+			 * a signal was successfully delivered; the saved
 			 * sigmask will have been stored in the signal frame,
 			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag */
+			 * clear the TIF_RESTORE_SIGMASK flag
+			 */
 			if (test_thread_flag(TIF_RESTORE_SIGMASK))
 				clear_thread_flag(TIF_RESTORE_SIGMASK);
 		}
-
 		return;
 	}
 
 	/* Did we come from a system call? */
-	if (regs->orig_ax >= 0) {
+	if ((long)regs->orig_ax >= 0) {
 		/* Restart the system call - no handlers present */
 		switch (regs->ax) {
 		case -ERESTARTNOHAND:
@@ -642,8 +646,10 @@
 		}
 	}
 
-	/* if there's no signal to deliver, we just put the saved sigmask
-	 * back */
+	/*
+	 * If there's no signal to deliver, we just put the saved sigmask
+	 * back.
+	 */
 	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
 		clear_thread_flag(TIF_RESTORE_SIGMASK);
 		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
@@ -654,13 +660,12 @@
  * notification of userspace execution resumption
  * - triggered by the TIF_WORK_MASK flags
  */
-__attribute__((regparm(3)))
-void do_notify_resume(struct pt_regs *regs, void *_unused,
-		      __u32 thread_info_flags)
+void
+do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
 {
 	/* Pending single-step? */
 	if (thread_info_flags & _TIF_SINGLESTEP) {
-		regs->flags |= TF_MASK;
+		regs->flags |= X86_EFLAGS_TF;
 		clear_thread_flag(TIF_SINGLESTEP);
 	}
 
@@ -670,6 +675,6 @@
 
 	if (thread_info_flags & _TIF_HRTICK_RESCHED)
 		hrtick_resched();
-	
+
 	clear_thread_flag(TIF_IRET);
 }
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 1c83e51..827179c 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -19,17 +19,28 @@
 #include <linux/stddef.h>
 #include <linux/personality.h>
 #include <linux/compiler.h>
+#include <asm/processor.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/i387.h>
 #include <asm/proto.h>
 #include <asm/ia32_unistd.h>
 #include <asm/mce.h>
-
-/* #define DEBUG_SIG 1 */
+#include "sigframe.h"
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
+#define __FIX_EFLAGS	(X86_EFLAGS_AC | X86_EFLAGS_OF | \
+			 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
+			 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
+			 X86_EFLAGS_CF)
+
+#ifdef CONFIG_X86_32
+# define FIX_EFLAGS	(__FIX_EFLAGS | X86_EFLAGS_RF)
+#else
+# define FIX_EFLAGS	__FIX_EFLAGS
+#endif
+
 int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                sigset_t *set, struct pt_regs * regs); 
 int ia32_setup_frame(int sig, struct k_sigaction *ka,
@@ -46,16 +57,9 @@
 /*
  * Do a signal return; undo the signal stack.
  */
-
-struct rt_sigframe
-{
-	char __user *pretcode;
-	struct ucontext uc;
-	struct siginfo info;
-};
-
 static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *prax)
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+		   unsigned long *pax)
 {
 	unsigned int err = 0;
 
@@ -87,7 +91,7 @@
 	{
 		unsigned int tmpflags;
 		err |= __get_user(tmpflags, &sc->flags);
-		regs->flags = (regs->flags & ~0x40DD5) | (tmpflags & 0x40DD5);
+		regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
 		regs->orig_ax = -1;		/* disable syscall checks */
 	}
 
@@ -108,7 +112,7 @@
 		}
 	}
 
-	err |= __get_user(*prax, &sc->ax);
+	err |= __get_user(*pax, &sc->ax);
 	return err;
 
 badframe:
@@ -121,13 +125,11 @@
 	sigset_t set;
 	unsigned long ax;
 
-	frame = (struct rt_sigframe __user *)(regs->sp - 8);
-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) {
+	frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
-	} 
-	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) { 
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
-	} 
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
 	spin_lock_irq(&current->sighand->siglock);
@@ -138,10 +140,6 @@
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
 		goto badframe;
 
-#ifdef DEBUG_SIG
-	printk("%d sigreturn ip:%lx sp:%lx frame:%p ax:%lx\n",current->pid,regs->ip,regs->sp,frame,ax);
-#endif
-
 	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
 		goto badframe;
 
@@ -270,10 +268,6 @@
 	if (err)
 		goto give_sigsegv;
 
-#ifdef DEBUG_SIG
-	printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax);
-#endif
-
 	/* Set up registers for signal handler */
 	regs->di = sig;
 	/* In case the signal handler was declared without prototypes */ 
@@ -298,10 +292,6 @@
 	regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
 	if (test_thread_flag(TIF_SINGLESTEP))
 		ptrace_notify(SIGTRAP);
-#ifdef DEBUG_SIG
-	printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n",
-		current->comm, current->pid, frame, regs->ip, frame->pretcode);
-#endif
 
 	return 0;
 
@@ -345,35 +335,29 @@
 
 static int
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-		sigset_t *oldset, struct pt_regs *regs)
+	      sigset_t *oldset, struct pt_regs *regs)
 {
 	int ret;
 
-#ifdef DEBUG_SIG
-	printk("handle_signal pid:%d sig:%lu ip:%lx sp:%lx regs=%p\n",
-		current->pid, sig,
-		regs->ip, regs->sp, regs);
-#endif
-
 	/* Are we from a system call? */
 	if (current_syscall(regs) >= 0) {
 		/* If so, check system call restarting.. */
 		switch (current_syscall_ret(regs)) {
-		        case -ERESTART_RESTARTBLOCK:
-			case -ERESTARTNOHAND:
+		case -ERESTART_RESTARTBLOCK:
+		case -ERESTARTNOHAND:
+			regs->ax = -EINTR;
+			break;
+
+		case -ERESTARTSYS:
+			if (!(ka->sa.sa_flags & SA_RESTART)) {
 				regs->ax = -EINTR;
 				break;
-
-			case -ERESTARTSYS:
-				if (!(ka->sa.sa_flags & SA_RESTART)) {
-					regs->ax = -EINTR;
-					break;
-				}
-				/* fallthrough */
-			case -ERESTARTNOINTR:
-				regs->ax = regs->orig_ax;
-				regs->ip -= 2;
-				break;
+			}
+		/* fallthrough */
+		case -ERESTARTNOINTR:
+			regs->ax = regs->orig_ax;
+			regs->ip -= 2;
+			break;
 		}
 	}
 
@@ -420,10 +404,11 @@
 	sigset_t *oldset;
 
 	/*
-	 * We want the common case to go fast, which
-	 * is why we may in certain cases get here from
-	 * kernel mode. Just return without doing anything
+	 * We want the common case to go fast, which is why we may in certain
+	 * cases get here from kernel mode. Just return without doing anything
 	 * if so.
+	 * X86_32: vm86 regs switched out by assembly code before reaching
+	 * here, so testing against kernel CS suffices.
 	 */
 	if (!user_mode(regs))
 		return;
@@ -473,22 +458,19 @@
 		}
 	}
 
-	/* if there's no signal to deliver, we just put the saved sigmask
-	   back. */
+	/*
+	 * If there's no signal to deliver, we just put the saved sigmask
+	 * back.
+	 */
 	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
 		clear_thread_flag(TIF_RESTORE_SIGMASK);
 		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
 	}
 }
 
-void
-do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, void *unused,
+		      __u32 thread_info_flags)
 {
-#ifdef DEBUG_SIG
-	printk("do_notify_resume flags:%x ip:%lx sp:%lx caller:%p pending:%x\n",
-	       thread_info_flags, regs->ip, regs->sp, __builtin_return_address(0),signal_pending(current));
-#endif
-	       
 	/* Pending single-step? */
 	if (thread_info_flags & _TIF_SINGLESTEP) {
 		regs->flags |= X86_EFLAGS_TF;
@@ -502,7 +484,7 @@
 #endif /* CONFIG_X86_MCE */
 
 	/* deal with pending signal delivery */
-	if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
+	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
 		do_signal(regs);
 
 	if (thread_info_flags & _TIF_HRTICK_RESCHED)
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
new file mode 100644
index 0000000..8f75893
--- /dev/null
+++ b/arch/x86/kernel/smp.c
@@ -0,0 +1,343 @@
+/*
+ *	Intel SMP support routines.
+ *
+ *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ *	(c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ *      (c) 2002,2003 Andi Kleen, SuSE Labs.
+ *
+ *	i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
+ *
+ *	This code is released under the GNU General Public License version 2 or
+ *	later.
+ */
+
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+
+#include <asm/mtrr.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/proto.h>
+#include <mach_ipi.h>
+#include <mach_apic.h>
+/*
+ *	Some notes on x86 processor bugs affecting SMP operation:
+ *
+ *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
+ *	The Linux implications for SMP are handled as follows:
+ *
+ *	Pentium III / [Xeon]
+ *		None of the E1AP-E3AP errata are visible to the user.
+ *
+ *	E1AP.	see PII A1AP
+ *	E2AP.	see PII A2AP
+ *	E3AP.	see PII A3AP
+ *
+ *	Pentium II / [Xeon]
+ *		None of the A1AP-A3AP errata are visible to the user.
+ *
+ *	A1AP.	see PPro 1AP
+ *	A2AP.	see PPro 2AP
+ *	A3AP.	see PPro 7AP
+ *
+ *	Pentium Pro
+ *		None of 1AP-9AP errata are visible to the normal user,
+ *	except occasional delivery of 'spurious interrupt' as trap #15.
+ *	This is very rare and a non-problem.
+ *
+ *	1AP.	Linux maps APIC as non-cacheable
+ *	2AP.	worked around in hardware
+ *	3AP.	fixed in C0 and above steppings microcode update.
+ *		Linux does not use excessive STARTUP_IPIs.
+ *	4AP.	worked around in hardware
+ *	5AP.	symmetric IO mode (normal Linux operation) not affected.
+ *		'noapic' mode has vector 0xf filled out properly.
+ *	6AP.	'noapic' mode might be affected - fixed in later steppings
+ *	7AP.	We do not assume writes to the LVT deassering IRQs
+ *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
+ *	9AP.	We do not use mixed mode
+ *
+ *	Pentium
+ *		There is a marginal case where REP MOVS on 100MHz SMP
+ *	machines with B stepping processors can fail. XXX should provide
+ *	an L1cache=Writethrough or L1cache=off option.
+ *
+ *		B stepping CPUs may hang. There are hardware work arounds
+ *	for this. We warn about it in case your board doesn't have the work
+ *	arounds. Basically that's so I can tell anyone with a B stepping
+ *	CPU and SMP problems "tough".
+ *
+ *	Specific items [From Pentium Processor Specification Update]
+ *
+ *	1AP.	Linux doesn't use remote read
+ *	2AP.	Linux doesn't trust APIC errors
+ *	3AP.	We work around this
+ *	4AP.	Linux never generated 3 interrupts of the same priority
+ *		to cause a lost local interrupt.
+ *	5AP.	Remote read is never used
+ *	6AP.	not affected - worked around in hardware
+ *	7AP.	not affected - worked around in hardware
+ *	8AP.	worked around in hardware - we get explicit CS errors if not
+ *	9AP.	only 'noapic' mode affected. Might generate spurious
+ *		interrupts, we log only the first one and count the
+ *		rest silently.
+ *	10AP.	not affected - worked around in hardware
+ *	11AP.	Linux reads the APIC between writes to avoid this, as per
+ *		the documentation. Make sure you preserve this as it affects
+ *		the C stepping chips too.
+ *	12AP.	not affected - worked around in hardware
+ *	13AP.	not affected - worked around in hardware
+ *	14AP.	we always deassert INIT during bootup
+ *	15AP.	not affected - worked around in hardware
+ *	16AP.	not affected - worked around in hardware
+ *	17AP.	not affected - worked around in hardware
+ *	18AP.	not affected - worked around in hardware
+ *	19AP.	not affected - worked around in BIOS
+ *
+ *	If this sounds worrying believe me these bugs are either ___RARE___,
+ *	or are signal timing bugs worked around in hardware and there's
+ *	about nothing of note with C stepping upwards.
+ */
+
+/*
+ * this function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+static void native_smp_send_reschedule(int cpu)
+{
+	if (unlikely(cpu_is_offline(cpu))) {
+		WARN_ON(1);
+		return;
+	}
+	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
+}
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+static DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+	void (*func) (void *info);
+	void *info;
+	atomic_t started;
+	atomic_t finished;
+	int wait;
+};
+
+void lock_ipi_call_lock(void)
+{
+	spin_lock_irq(&call_lock);
+}
+
+void unlock_ipi_call_lock(void)
+{
+	spin_unlock_irq(&call_lock);
+}
+
+static struct call_data_struct *call_data;
+
+static void __smp_call_function(void (*func) (void *info), void *info,
+				int nonatomic, int wait)
+{
+	struct call_data_struct data;
+	int cpus = num_online_cpus() - 1;
+
+	if (!cpus)
+		return;
+
+	data.func = func;
+	data.info = info;
+	atomic_set(&data.started, 0);
+	data.wait = wait;
+	if (wait)
+		atomic_set(&data.finished, 0);
+
+	call_data = &data;
+	mb();
+
+	/* Send a message to all other CPUs and wait for them to respond */
+	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+
+	/* Wait for response */
+	while (atomic_read(&data.started) != cpus)
+		cpu_relax();
+
+	if (wait)
+		while (atomic_read(&data.finished) != cpus)
+			cpu_relax();
+}
+
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on.  Must not include the current cpu.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+  * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+static int
+native_smp_call_function_mask(cpumask_t mask,
+			      void (*func)(void *), void *info,
+			      int wait)
+{
+	struct call_data_struct data;
+	cpumask_t allbutself;
+	int cpus;
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
+	/* Holding any lock stops cpus from going down. */
+	spin_lock(&call_lock);
+
+	allbutself = cpu_online_map;
+	cpu_clear(smp_processor_id(), allbutself);
+
+	cpus_and(mask, mask, allbutself);
+	cpus = cpus_weight(mask);
+
+	if (!cpus) {
+		spin_unlock(&call_lock);
+		return 0;
+	}
+
+	data.func = func;
+	data.info = info;
+	atomic_set(&data.started, 0);
+	data.wait = wait;
+	if (wait)
+		atomic_set(&data.finished, 0);
+
+	call_data = &data;
+	wmb();
+
+	/* Send a message to other CPUs */
+	if (cpus_equal(mask, allbutself))
+		send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+	else
+		send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+
+	/* Wait for response */
+	while (atomic_read(&data.started) != cpus)
+		cpu_relax();
+
+	if (wait)
+		while (atomic_read(&data.finished) != cpus)
+			cpu_relax();
+	spin_unlock(&call_lock);
+
+	return 0;
+}
+
+static void stop_this_cpu(void *dummy)
+{
+	local_irq_disable();
+	/*
+	 * Remove this CPU:
+	 */
+	cpu_clear(smp_processor_id(), cpu_online_map);
+	disable_local_APIC();
+	if (hlt_works(smp_processor_id()))
+		for (;;) halt();
+	for (;;);
+}
+
+/*
+ * this function calls the 'stop' function on all other CPUs in the system.
+ */
+
+static void native_smp_send_stop(void)
+{
+	int nolock;
+	unsigned long flags;
+
+	if (reboot_force)
+		return;
+
+	/* Don't deadlock on the call lock in panic */
+	nolock = !spin_trylock(&call_lock);
+	local_irq_save(flags);
+	__smp_call_function(stop_this_cpu, NULL, 0, 0);
+	if (!nolock)
+		spin_unlock(&call_lock);
+	disable_local_APIC();
+	local_irq_restore(flags);
+}
+
+/*
+ * Reschedule call back. Nothing to do,
+ * all the work is done automatically when
+ * we return from the interrupt.
+ */
+void smp_reschedule_interrupt(struct pt_regs *regs)
+{
+	ack_APIC_irq();
+#ifdef CONFIG_X86_32
+	__get_cpu_var(irq_stat).irq_resched_count++;
+#else
+	add_pda(irq_resched_count, 1);
+#endif
+}
+
+void smp_call_function_interrupt(struct pt_regs *regs)
+{
+	void (*func) (void *info) = call_data->func;
+	void *info = call_data->info;
+	int wait = call_data->wait;
+
+	ack_APIC_irq();
+	/*
+	 * Notify initiating CPU that I've grabbed the data and am
+	 * about to execute the function
+	 */
+	mb();
+	atomic_inc(&call_data->started);
+	/*
+	 * At this point the info structure may be out of scope unless wait==1
+	 */
+	irq_enter();
+	(*func)(info);
+#ifdef CONFIG_X86_32
+	__get_cpu_var(irq_stat).irq_call_count++;
+#else
+	add_pda(irq_call_count, 1);
+#endif
+	irq_exit();
+
+	if (wait) {
+		mb();
+		atomic_inc(&call_data->finished);
+	}
+}
+
+struct smp_ops smp_ops = {
+	.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
+	.smp_prepare_cpus = native_smp_prepare_cpus,
+	.cpu_up = native_cpu_up,
+	.smp_cpus_done = native_smp_cpus_done,
+
+	.smp_send_stop = native_smp_send_stop,
+	.smp_send_reschedule = native_smp_send_reschedule,
+	.smp_call_function_mask = native_smp_call_function_mask,
+};
+EXPORT_SYMBOL_GPL(smp_ops);
+
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
deleted file mode 100644
index dc0cde9..0000000
--- a/arch/x86/kernel/smp_32.c
+++ /dev/null
@@ -1,712 +0,0 @@
-/*
- *	Intel SMP support routines.
- *
- *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
- *	(c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
- *
- *	This code is released under the GNU General Public License version 2 or
- *	later.
- */
-
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/kernel_stat.h>
-#include <linux/mc146818rtc.h>
-#include <linux/cache.h>
-#include <linux/interrupt.h>
-#include <linux/cpu.h>
-#include <linux/module.h>
-
-#include <asm/mtrr.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-#include <mach_apic.h>
-
-/*
- *	Some notes on x86 processor bugs affecting SMP operation:
- *
- *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
- *	The Linux implications for SMP are handled as follows:
- *
- *	Pentium III / [Xeon]
- *		None of the E1AP-E3AP errata are visible to the user.
- *
- *	E1AP.	see PII A1AP
- *	E2AP.	see PII A2AP
- *	E3AP.	see PII A3AP
- *
- *	Pentium II / [Xeon]
- *		None of the A1AP-A3AP errata are visible to the user.
- *
- *	A1AP.	see PPro 1AP
- *	A2AP.	see PPro 2AP
- *	A3AP.	see PPro 7AP
- *
- *	Pentium Pro
- *		None of 1AP-9AP errata are visible to the normal user,
- *	except occasional delivery of 'spurious interrupt' as trap #15.
- *	This is very rare and a non-problem.
- *
- *	1AP.	Linux maps APIC as non-cacheable
- *	2AP.	worked around in hardware
- *	3AP.	fixed in C0 and above steppings microcode update.
- *		Linux does not use excessive STARTUP_IPIs.
- *	4AP.	worked around in hardware
- *	5AP.	symmetric IO mode (normal Linux operation) not affected.
- *		'noapic' mode has vector 0xf filled out properly.
- *	6AP.	'noapic' mode might be affected - fixed in later steppings
- *	7AP.	We do not assume writes to the LVT deassering IRQs
- *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
- *	9AP.	We do not use mixed mode
- *
- *	Pentium
- *		There is a marginal case where REP MOVS on 100MHz SMP
- *	machines with B stepping processors can fail. XXX should provide
- *	an L1cache=Writethrough or L1cache=off option.
- *
- *		B stepping CPUs may hang. There are hardware work arounds
- *	for this. We warn about it in case your board doesn't have the work
- *	arounds. Basically that's so I can tell anyone with a B stepping
- *	CPU and SMP problems "tough".
- *
- *	Specific items [From Pentium Processor Specification Update]
- *
- *	1AP.	Linux doesn't use remote read
- *	2AP.	Linux doesn't trust APIC errors
- *	3AP.	We work around this
- *	4AP.	Linux never generated 3 interrupts of the same priority
- *		to cause a lost local interrupt.
- *	5AP.	Remote read is never used
- *	6AP.	not affected - worked around in hardware
- *	7AP.	not affected - worked around in hardware
- *	8AP.	worked around in hardware - we get explicit CS errors if not
- *	9AP.	only 'noapic' mode affected. Might generate spurious
- *		interrupts, we log only the first one and count the
- *		rest silently.
- *	10AP.	not affected - worked around in hardware
- *	11AP.	Linux reads the APIC between writes to avoid this, as per
- *		the documentation. Make sure you preserve this as it affects
- *		the C stepping chips too.
- *	12AP.	not affected - worked around in hardware
- *	13AP.	not affected - worked around in hardware
- *	14AP.	we always deassert INIT during bootup
- *	15AP.	not affected - worked around in hardware
- *	16AP.	not affected - worked around in hardware
- *	17AP.	not affected - worked around in hardware
- *	18AP.	not affected - worked around in hardware
- *	19AP.	not affected - worked around in BIOS
- *
- *	If this sounds worrying believe me these bugs are either ___RARE___,
- *	or are signal timing bugs worked around in hardware and there's
- *	about nothing of note with C stepping upwards.
- */
-
-DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
-
-/*
- * the following functions deal with sending IPIs between CPUs.
- *
- * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
- */
-
-static inline int __prepare_ICR (unsigned int shortcut, int vector)
-{
-	unsigned int icr = shortcut | APIC_DEST_LOGICAL;
-
-	switch (vector) {
-	default:
-		icr |= APIC_DM_FIXED | vector;
-		break;
-	case NMI_VECTOR:
-		icr |= APIC_DM_NMI;
-		break;
-	}
-	return icr;
-}
-
-static inline int __prepare_ICR2 (unsigned int mask)
-{
-	return SET_APIC_DEST_FIELD(mask);
-}
-
-void __send_IPI_shortcut(unsigned int shortcut, int vector)
-{
-	/*
-	 * Subtle. In the case of the 'never do double writes' workaround
-	 * we have to lock out interrupts to be safe.  As we don't care
-	 * of the value read we use an atomic rmw access to avoid costly
-	 * cli/sti.  Otherwise we use an even cheaper single atomic write
-	 * to the APIC.
-	 */
-	unsigned int cfg;
-
-	/*
-	 * Wait for idle.
-	 */
-	apic_wait_icr_idle();
-
-	/*
-	 * No need to touch the target chip field
-	 */
-	cfg = __prepare_ICR(shortcut, vector);
-
-	/*
-	 * Send the IPI. The write to APIC_ICR fires this off.
-	 */
-	apic_write_around(APIC_ICR, cfg);
-}
-
-void send_IPI_self(int vector)
-{
-	__send_IPI_shortcut(APIC_DEST_SELF, vector);
-}
-
-/*
- * This is used to send an IPI with no shorthand notation (the destination is
- * specified in bits 56 to 63 of the ICR).
- */
-static inline void __send_IPI_dest_field(unsigned long mask, int vector)
-{
-	unsigned long cfg;
-
-	/*
-	 * Wait for idle.
-	 */
-	if (unlikely(vector == NMI_VECTOR))
-		safe_apic_wait_icr_idle();
-	else
-		apic_wait_icr_idle();
-		
-	/*
-	 * prepare target chip field
-	 */
-	cfg = __prepare_ICR2(mask);
-	apic_write_around(APIC_ICR2, cfg);
-		
-	/*
-	 * program the ICR 
-	 */
-	cfg = __prepare_ICR(0, vector);
-			
-	/*
-	 * Send the IPI. The write to APIC_ICR fires this off.
-	 */
-	apic_write_around(APIC_ICR, cfg);
-}
-
-/*
- * This is only used on smaller machines.
- */
-void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
-{
-	unsigned long mask = cpus_addr(cpumask)[0];
-	unsigned long flags;
-
-	local_irq_save(flags);
-	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
-	__send_IPI_dest_field(mask, vector);
-	local_irq_restore(flags);
-}
-
-void send_IPI_mask_sequence(cpumask_t mask, int vector)
-{
-	unsigned long flags;
-	unsigned int query_cpu;
-
-	/*
-	 * Hack. The clustered APIC addressing mode doesn't allow us to send 
-	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 
-	 * should be modified to do 1 message per cluster ID - mbligh
-	 */ 
-
-	local_irq_save(flags);
-	for_each_possible_cpu(query_cpu) {
-		if (cpu_isset(query_cpu, mask)) {
-			__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
-					      vector);
-		}
-	}
-	local_irq_restore(flags);
-}
-
-#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
-
-/*
- *	Smarter SMP flushing macros. 
- *		c/o Linus Torvalds.
- *
- *	These mean you can really definitely utterly forget about
- *	writing to user space from interrupts. (Its not allowed anyway).
- *
- *	Optimizations Manfred Spraul <manfred@colorfullife.com>
- */
-
-static cpumask_t flush_cpumask;
-static struct mm_struct * flush_mm;
-static unsigned long flush_va;
-static DEFINE_SPINLOCK(tlbstate_lock);
-
-/*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm->cpu_vm_mask.
- *
- * We need to reload %cr3 since the page tables may be going
- * away from under us..
- */
-void leave_mm(int cpu)
-{
-	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-		BUG();
-	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
-	load_cr3(swapper_pg_dir);
-}
-EXPORT_SYMBOL_GPL(leave_mm);
-
-/*
- *
- * The flush IPI assumes that a thread switch happens in this order:
- * [cpu0: the cpu that switches]
- * 1) switch_mm() either 1a) or 1b)
- * 1a) thread switch to a different mm
- * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
- * 	Stop ipi delivery for the old mm. This is not synchronized with
- * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
- * 	for the wrong mm, and in the worst case we perform a superfluous
- * 	tlb flush.
- * 1a2) set cpu_tlbstate to TLBSTATE_OK
- * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
- *	was in lazy tlb mode.
- * 1a3) update cpu_tlbstate[].active_mm
- * 	Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
- * 	Now the other cpus will send tlb flush ipis.
- * 1a4) change cr3.
- * 1b) thread switch without mm change
- *	cpu_tlbstate[].active_mm is correct, cpu0 already handles
- *	flush ipis.
- * 1b1) set cpu_tlbstate to TLBSTATE_OK
- * 1b2) test_and_set the cpu bit in cpu_vm_mask.
- * 	Atomically set the bit [other cpus will start sending flush ipis],
- * 	and test the bit.
- * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
- * 2) switch %%esp, ie current
- *
- * The interrupt must handle 2 special cases:
- * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
- * - the cpu performs speculative tlb reads, i.e. even if the cpu only
- *   runs in kernel space, the cpu could load tlb entries for user space
- *   pages.
- *
- * The good news is that cpu_tlbstate is local to each cpu, no
- * write/read ordering problems.
- */
-
-/*
- * TLB flush IPI:
- *
- * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
- * 2) Leave the mm if we are in the lazy tlb mode.
- */
-
-void smp_invalidate_interrupt(struct pt_regs *regs)
-{
-	unsigned long cpu;
-
-	cpu = get_cpu();
-
-	if (!cpu_isset(cpu, flush_cpumask))
-		goto out;
-		/* 
-		 * This was a BUG() but until someone can quote me the
-		 * line from the intel manual that guarantees an IPI to
-		 * multiple CPUs is retried _only_ on the erroring CPUs
-		 * its staying as a return
-		 *
-		 * BUG();
-		 */
-		 
-	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
-		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
-			if (flush_va == TLB_FLUSH_ALL)
-				local_flush_tlb();
-			else
-				__flush_tlb_one(flush_va);
-		} else
-			leave_mm(cpu);
-	}
-	ack_APIC_irq();
-	smp_mb__before_clear_bit();
-	cpu_clear(cpu, flush_cpumask);
-	smp_mb__after_clear_bit();
-out:
-	put_cpu_no_resched();
-	__get_cpu_var(irq_stat).irq_tlb_count++;
-}
-
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-			     unsigned long va)
-{
-	cpumask_t cpumask = *cpumaskp;
-
-	/*
-	 * A couple of (to be removed) sanity checks:
-	 *
-	 * - current CPU must not be in mask
-	 * - mask must exist :)
-	 */
-	BUG_ON(cpus_empty(cpumask));
-	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-	BUG_ON(!mm);
-
-#ifdef CONFIG_HOTPLUG_CPU
-	/* If a CPU which we ran on has gone down, OK. */
-	cpus_and(cpumask, cpumask, cpu_online_map);
-	if (unlikely(cpus_empty(cpumask)))
-		return;
-#endif
-
-	/*
-	 * i'm not happy about this global shared spinlock in the
-	 * MM hot path, but we'll see how contended it is.
-	 * AK: x86-64 has a faster method that could be ported.
-	 */
-	spin_lock(&tlbstate_lock);
-	
-	flush_mm = mm;
-	flush_va = va;
-	cpus_or(flush_cpumask, cpumask, flush_cpumask);
-	/*
-	 * We have to send the IPI only to
-	 * CPUs affected.
-	 */
-	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
-
-	while (!cpus_empty(flush_cpumask))
-		/* nothing. lockup detection does not belong here */
-		cpu_relax();
-
-	flush_mm = NULL;
-	flush_va = 0;
-	spin_unlock(&tlbstate_lock);
-}
-	
-void flush_tlb_current_task(void)
-{
-	struct mm_struct *mm = current->mm;
-	cpumask_t cpu_mask;
-
-	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
-	cpu_clear(smp_processor_id(), cpu_mask);
-
-	local_flush_tlb();
-	if (!cpus_empty(cpu_mask))
-		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-	preempt_enable();
-}
-
-void flush_tlb_mm (struct mm_struct * mm)
-{
-	cpumask_t cpu_mask;
-
-	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
-	cpu_clear(smp_processor_id(), cpu_mask);
-
-	if (current->active_mm == mm) {
-		if (current->mm)
-			local_flush_tlb();
-		else
-			leave_mm(smp_processor_id());
-	}
-	if (!cpus_empty(cpu_mask))
-		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-
-	preempt_enable();
-}
-
-void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-{
-	struct mm_struct *mm = vma->vm_mm;
-	cpumask_t cpu_mask;
-
-	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
-	cpu_clear(smp_processor_id(), cpu_mask);
-
-	if (current->active_mm == mm) {
-		if(current->mm)
-			__flush_tlb_one(va);
-		 else
-		 	leave_mm(smp_processor_id());
-	}
-
-	if (!cpus_empty(cpu_mask))
-		flush_tlb_others(cpu_mask, mm, va);
-
-	preempt_enable();
-}
-EXPORT_SYMBOL(flush_tlb_page);
-
-static void do_flush_tlb_all(void* info)
-{
-	unsigned long cpu = smp_processor_id();
-
-	__flush_tlb_all();
-	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
-		leave_mm(cpu);
-}
-
-void flush_tlb_all(void)
-{
-	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
-}
-
-/*
- * this function sends a 'reschedule' IPI to another CPU.
- * it goes straight through and wastes no time serializing
- * anything. Worst case is that we lose a reschedule ...
- */
-static void native_smp_send_reschedule(int cpu)
-{
-	WARN_ON(cpu_is_offline(cpu));
-	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
-}
-
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
-	void (*func) (void *info);
-	void *info;
-	atomic_t started;
-	atomic_t finished;
-	int wait;
-};
-
-void lock_ipi_call_lock(void)
-{
-	spin_lock_irq(&call_lock);
-}
-
-void unlock_ipi_call_lock(void)
-{
-	spin_unlock_irq(&call_lock);
-}
-
-static struct call_data_struct *call_data;
-
-static void __smp_call_function(void (*func) (void *info), void *info,
-				int nonatomic, int wait)
-{
-	struct call_data_struct data;
-	int cpus = num_online_cpus() - 1;
-
-	if (!cpus)
-		return;
-
-	data.func = func;
-	data.info = info;
-	atomic_set(&data.started, 0);
-	data.wait = wait;
-	if (wait)
-		atomic_set(&data.finished, 0);
-
-	call_data = &data;
-	mb();
-	
-	/* Send a message to all other CPUs and wait for them to respond */
-	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-
-	/* Wait for response */
-	while (atomic_read(&data.started) != cpus)
-		cpu_relax();
-
-	if (wait)
-		while (atomic_read(&data.finished) != cpus)
-			cpu_relax();
-}
-
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on.  Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
-  * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-static int
-native_smp_call_function_mask(cpumask_t mask,
-			      void (*func)(void *), void *info,
-			      int wait)
-{
-	struct call_data_struct data;
-	cpumask_t allbutself;
-	int cpus;
-
-	/* Can deadlock when called with interrupts disabled */
-	WARN_ON(irqs_disabled());
-
-	/* Holding any lock stops cpus from going down. */
-	spin_lock(&call_lock);
-
-	allbutself = cpu_online_map;
-	cpu_clear(smp_processor_id(), allbutself);
-
-	cpus_and(mask, mask, allbutself);
-	cpus = cpus_weight(mask);
-
-	if (!cpus) {
-		spin_unlock(&call_lock);
-		return 0;
-	}
-
-	data.func = func;
-	data.info = info;
-	atomic_set(&data.started, 0);
-	data.wait = wait;
-	if (wait)
-		atomic_set(&data.finished, 0);
-
-	call_data = &data;
-	mb();
-
-	/* Send a message to other CPUs */
-	if (cpus_equal(mask, allbutself))
-		send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-	else
-		send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
-
-	/* Wait for response */
-	while (atomic_read(&data.started) != cpus)
-		cpu_relax();
-
-	if (wait)
-		while (atomic_read(&data.finished) != cpus)
-			cpu_relax();
-	spin_unlock(&call_lock);
-
-	return 0;
-}
-
-static void stop_this_cpu (void * dummy)
-{
-	local_irq_disable();
-	/*
-	 * Remove this CPU:
-	 */
-	cpu_clear(smp_processor_id(), cpu_online_map);
-	disable_local_APIC();
-	if (cpu_data(smp_processor_id()).hlt_works_ok)
-		for(;;) halt();
-	for (;;);
-}
-
-/*
- * this function calls the 'stop' function on all other CPUs in the system.
- */
-
-static void native_smp_send_stop(void)
-{
-	/* Don't deadlock on the call lock in panic */
-	int nolock = !spin_trylock(&call_lock);
-	unsigned long flags;
-
-	local_irq_save(flags);
-	__smp_call_function(stop_this_cpu, NULL, 0, 0);
-	if (!nolock)
-		spin_unlock(&call_lock);
-	disable_local_APIC();
-	local_irq_restore(flags);
-}
-
-/*
- * Reschedule call back. Nothing to do,
- * all the work is done automatically when
- * we return from the interrupt.
- */
-void smp_reschedule_interrupt(struct pt_regs *regs)
-{
-	ack_APIC_irq();
-	__get_cpu_var(irq_stat).irq_resched_count++;
-}
-
-void smp_call_function_interrupt(struct pt_regs *regs)
-{
-	void (*func) (void *info) = call_data->func;
-	void *info = call_data->info;
-	int wait = call_data->wait;
-
-	ack_APIC_irq();
-	/*
-	 * Notify initiating CPU that I've grabbed the data and am
-	 * about to execute the function
-	 */
-	mb();
-	atomic_inc(&call_data->started);
-	/*
-	 * At this point the info structure may be out of scope unless wait==1
-	 */
-	irq_enter();
-	(*func)(info);
-	__get_cpu_var(irq_stat).irq_call_count++;
-	irq_exit();
-
-	if (wait) {
-		mb();
-		atomic_inc(&call_data->finished);
-	}
-}
-
-static int convert_apicid_to_cpu(int apic_id)
-{
-	int i;
-
-	for_each_possible_cpu(i) {
-		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
-			return i;
-	}
-	return -1;
-}
-
-int safe_smp_processor_id(void)
-{
-	int apicid, cpuid;
-
-	if (!boot_cpu_has(X86_FEATURE_APIC))
-		return 0;
-
-	apicid = hard_smp_processor_id();
-	if (apicid == BAD_APICID)
-		return 0;
-
-	cpuid = convert_apicid_to_cpu(apicid);
-
-	return cpuid >= 0 ? cpuid : 0;
-}
-
-struct smp_ops smp_ops = {
-	.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
-	.smp_prepare_cpus = native_smp_prepare_cpus,
-	.cpu_up = native_cpu_up,
-	.smp_cpus_done = native_smp_cpus_done,
-
-	.smp_send_stop = native_smp_send_stop,
-	.smp_send_reschedule = native_smp_send_reschedule,
-	.smp_call_function_mask = native_smp_call_function_mask,
-};
-EXPORT_SYMBOL_GPL(smp_ops);
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
deleted file mode 100644
index 2fd74b0..0000000
--- a/arch/x86/kernel/smp_64.c
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- *	Intel SMP support routines.
- *
- *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
- *	(c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
- *      (c) 2002,2003 Andi Kleen, SuSE Labs.
- *
- *	This code is released under the GNU General Public License version 2 or
- *	later.
- */
-
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/mc146818rtc.h>
-#include <linux/interrupt.h>
-
-#include <asm/mtrr.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/mach_apic.h>
-#include <asm/mmu_context.h>
-#include <asm/proto.h>
-#include <asm/apicdef.h>
-#include <asm/idle.h>
-
-/*
- *	Smarter SMP flushing macros.
- *		c/o Linus Torvalds.
- *
- *	These mean you can really definitely utterly forget about
- *	writing to user space from interrupts. (Its not allowed anyway).
- *
- *	Optimizations Manfred Spraul <manfred@colorfullife.com>
- *
- *	More scalable flush, from Andi Kleen
- *
- *	To avoid global state use 8 different call vectors.
- *	Each CPU uses a specific vector to trigger flushes on other
- *	CPUs. Depending on the received vector the target CPUs look into
- *	the right per cpu variable for the flush data.
- *
- *	With more than 8 CPUs they are hashed to the 8 available
- *	vectors. The limited global vector space forces us to this right now.
- *	In future when interrupts are split into per CPU domains this could be
- *	fixed, at the cost of triggering multiple IPIs in some cases.
- */
-
-union smp_flush_state {
-	struct {
-		cpumask_t flush_cpumask;
-		struct mm_struct *flush_mm;
-		unsigned long flush_va;
-		spinlock_t tlbstate_lock;
-	};
-	char pad[SMP_CACHE_BYTES];
-} ____cacheline_aligned;
-
-/* State is put into the per CPU data section, but padded
-   to a full cache line because other CPUs can access it and we don't
-   want false sharing in the per cpu data segment. */
-static DEFINE_PER_CPU(union smp_flush_state, flush_state);
-
-/*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm->cpu_vm_mask.
- */
-void leave_mm(int cpu)
-{
-	if (read_pda(mmu_state) == TLBSTATE_OK)
-		BUG();
-	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
-	load_cr3(swapper_pg_dir);
-}
-EXPORT_SYMBOL_GPL(leave_mm);
-
-/*
- *
- * The flush IPI assumes that a thread switch happens in this order:
- * [cpu0: the cpu that switches]
- * 1) switch_mm() either 1a) or 1b)
- * 1a) thread switch to a different mm
- * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
- *	Stop ipi delivery for the old mm. This is not synchronized with
- *	the other cpus, but smp_invalidate_interrupt ignore flush ipis
- *	for the wrong mm, and in the worst case we perform a superfluous
- *	tlb flush.
- * 1a2) set cpu mmu_state to TLBSTATE_OK
- *	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
- *	was in lazy tlb mode.
- * 1a3) update cpu active_mm
- *	Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
- *	Now the other cpus will send tlb flush ipis.
- * 1a4) change cr3.
- * 1b) thread switch without mm change
- *	cpu active_mm is correct, cpu0 already handles
- *	flush ipis.
- * 1b1) set cpu mmu_state to TLBSTATE_OK
- * 1b2) test_and_set the cpu bit in cpu_vm_mask.
- *	Atomically set the bit [other cpus will start sending flush ipis],
- *	and test the bit.
- * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
- * 2) switch %%esp, ie current
- *
- * The interrupt must handle 2 special cases:
- * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
- * - the cpu performs speculative tlb reads, i.e. even if the cpu only
- *   runs in kernel space, the cpu could load tlb entries for user space
- *   pages.
- *
- * The good news is that cpu mmu_state is local to each cpu, no
- * write/read ordering problems.
- */
-
-/*
- * TLB flush IPI:
- *
- * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
- * 2) Leave the mm if we are in the lazy tlb mode.
- *
- * Interrupts are disabled.
- */
-
-asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
-{
-	int cpu;
-	int sender;
-	union smp_flush_state *f;
-
-	cpu = smp_processor_id();
-	/*
-	 * orig_rax contains the negated interrupt vector.
-	 * Use that to determine where the sender put the data.
-	 */
-	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
-	f = &per_cpu(flush_state, sender);
-
-	if (!cpu_isset(cpu, f->flush_cpumask))
-		goto out;
-		/*
-		 * This was a BUG() but until someone can quote me the
-		 * line from the intel manual that guarantees an IPI to
-		 * multiple CPUs is retried _only_ on the erroring CPUs
-		 * its staying as a return
-		 *
-		 * BUG();
-		 */
-
-	if (f->flush_mm == read_pda(active_mm)) {
-		if (read_pda(mmu_state) == TLBSTATE_OK) {
-			if (f->flush_va == TLB_FLUSH_ALL)
-				local_flush_tlb();
-			else
-				__flush_tlb_one(f->flush_va);
-		} else
-			leave_mm(cpu);
-	}
-out:
-	ack_APIC_irq();
-	cpu_clear(cpu, f->flush_cpumask);
-	add_pda(irq_tlb_count, 1);
-}
-
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-			     unsigned long va)
-{
-	int sender;
-	union smp_flush_state *f;
-	cpumask_t cpumask = *cpumaskp;
-
-	/* Caller has disabled preemption */
-	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
-	f = &per_cpu(flush_state, sender);
-
-	/*
-	 * Could avoid this lock when
-	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
-	 * probably not worth checking this for a cache-hot lock.
-	 */
-	spin_lock(&f->tlbstate_lock);
-
-	f->flush_mm = mm;
-	f->flush_va = va;
-	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
-
-	/*
-	 * We have to send the IPI only to
-	 * CPUs affected.
-	 */
-	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
-
-	while (!cpus_empty(f->flush_cpumask))
-		cpu_relax();
-
-	f->flush_mm = NULL;
-	f->flush_va = 0;
-	spin_unlock(&f->tlbstate_lock);
-}
-
-int __cpuinit init_smp_flush(void)
-{
-	int i;
-
-	for_each_cpu_mask(i, cpu_possible_map) {
-		spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
-	}
-	return 0;
-}
-core_initcall(init_smp_flush);
-
-void flush_tlb_current_task(void)
-{
-	struct mm_struct *mm = current->mm;
-	cpumask_t cpu_mask;
-
-	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
-	cpu_clear(smp_processor_id(), cpu_mask);
-
-	local_flush_tlb();
-	if (!cpus_empty(cpu_mask))
-		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-	preempt_enable();
-}
-
-void flush_tlb_mm (struct mm_struct * mm)
-{
-	cpumask_t cpu_mask;
-
-	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
-	cpu_clear(smp_processor_id(), cpu_mask);
-
-	if (current->active_mm == mm) {
-		if (current->mm)
-			local_flush_tlb();
-		else
-			leave_mm(smp_processor_id());
-	}
-	if (!cpus_empty(cpu_mask))
-		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-
-	preempt_enable();
-}
-
-void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-{
-	struct mm_struct *mm = vma->vm_mm;
-	cpumask_t cpu_mask;
-
-	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
-	cpu_clear(smp_processor_id(), cpu_mask);
-
-	if (current->active_mm == mm) {
-		if(current->mm)
-			__flush_tlb_one(va);
-		else
-			leave_mm(smp_processor_id());
-	}
-
-	if (!cpus_empty(cpu_mask))
-		flush_tlb_others(cpu_mask, mm, va);
-
-	preempt_enable();
-}
-
-static void do_flush_tlb_all(void* info)
-{
-	unsigned long cpu = smp_processor_id();
-
-	__flush_tlb_all();
-	if (read_pda(mmu_state) == TLBSTATE_LAZY)
-		leave_mm(cpu);
-}
-
-void flush_tlb_all(void)
-{
-	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
-}
-
-/*
- * this function sends a 'reschedule' IPI to another CPU.
- * it goes straight through and wastes no time serializing
- * anything. Worst case is that we lose a reschedule ...
- */
-
-void smp_send_reschedule(int cpu)
-{
-	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
-}
-
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
-	void (*func) (void *info);
-	void *info;
-	atomic_t started;
-	atomic_t finished;
-	int wait;
-};
-
-static struct call_data_struct * call_data;
-
-void lock_ipi_call_lock(void)
-{
-	spin_lock_irq(&call_lock);
-}
-
-void unlock_ipi_call_lock(void)
-{
-	spin_unlock_irq(&call_lock);
-}
-
-/*
- * this function sends a 'generic call function' IPI to all other CPU
- * of the system defined in the mask.
- */
-static int __smp_call_function_mask(cpumask_t mask,
-				    void (*func)(void *), void *info,
-				    int wait)
-{
-	struct call_data_struct data;
-	cpumask_t allbutself;
-	int cpus;
-
-	allbutself = cpu_online_map;
-	cpu_clear(smp_processor_id(), allbutself);
-
-	cpus_and(mask, mask, allbutself);
-	cpus = cpus_weight(mask);
-
-	if (!cpus)
-		return 0;
-
-	data.func = func;
-	data.info = info;
-	atomic_set(&data.started, 0);
-	data.wait = wait;
-	if (wait)
-		atomic_set(&data.finished, 0);
-
-	call_data = &data;
-	wmb();
-
-	/* Send a message to other CPUs */
-	if (cpus_equal(mask, allbutself))
-		send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-	else
-		send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
-
-	/* Wait for response */
-	while (atomic_read(&data.started) != cpus)
-		cpu_relax();
-
-	if (!wait)
-		return 0;
-
-	while (atomic_read(&data.finished) != cpus)
-		cpu_relax();
-
-	return 0;
-}
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on.  Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function_mask(cpumask_t mask,
-			   void (*func)(void *), void *info,
-			   int wait)
-{
-	int ret;
-
-	/* Can deadlock when called with interrupts disabled */
-	WARN_ON(irqs_disabled());
-
-	spin_lock(&call_lock);
-	ret = __smp_call_function_mask(mask, func, info, wait);
-	spin_unlock(&call_lock);
-	return ret;
-}
-EXPORT_SYMBOL(smp_call_function_mask);
-
-/*
- * smp_call_function_single - Run a function on a specific CPU
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Currently unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Retrurns 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
-			      int nonatomic, int wait)
-{
-	/* prevent preemption and reschedule on another processor */
-	int ret, me = get_cpu();
-
-	/* Can deadlock when called with interrupts disabled */
-	WARN_ON(irqs_disabled());
-
-	if (cpu == me) {
-		local_irq_disable();
-		func(info);
-		local_irq_enable();
-		put_cpu();
-		return 0;
-	}
-
-	ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
-
-	put_cpu();
-	return ret;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
-/*
- * smp_call_function - run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: currently unused.
- * @wait: If true, wait (atomically) until function has completed on other
- *        CPUs.
- *
- * Returns 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute func or are or have executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- * Actually there are a few legal cases, like panic.
- */
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-			int wait)
-{
-	return smp_call_function_mask(cpu_online_map, func, info, wait);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-static void stop_this_cpu(void *dummy)
-{
-	local_irq_disable();
-	/*
-	 * Remove this CPU:
-	 */
-	cpu_clear(smp_processor_id(), cpu_online_map);
-	disable_local_APIC();
-	for (;;)
-		halt();
-}
-
-void smp_send_stop(void)
-{
-	int nolock;
-	unsigned long flags;
-
-	if (reboot_force)
-		return;
-
-	/* Don't deadlock on the call lock in panic */
-	nolock = !spin_trylock(&call_lock);
-	local_irq_save(flags);
-	__smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
-	if (!nolock)
-		spin_unlock(&call_lock);
-	disable_local_APIC();
-	local_irq_restore(flags);
-}
-
-/*
- * Reschedule call back. Nothing to do,
- * all the work is done automatically when
- * we return from the interrupt.
- */
-asmlinkage void smp_reschedule_interrupt(void)
-{
-	ack_APIC_irq();
-	add_pda(irq_resched_count, 1);
-}
-
-asmlinkage void smp_call_function_interrupt(void)
-{
-	void (*func) (void *info) = call_data->func;
-	void *info = call_data->info;
-	int wait = call_data->wait;
-
-	ack_APIC_irq();
-	/*
-	 * Notify initiating CPU that I've grabbed the data and am
-	 * about to execute the function
-	 */
-	mb();
-	atomic_inc(&call_data->started);
-	/*
-	 * At this point the info structure may be out of scope unless wait==1
-	 */
-	exit_idle();
-	irq_enter();
-	(*func)(info);
-	add_pda(irq_call_count, 1);
-	irq_exit();
-	if (wait) {
-		mb();
-		atomic_inc(&call_data->finished);
-	}
-}
-
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot.c
similarity index 62%
rename from arch/x86/kernel/smpboot_32.c
rename to arch/x86/kernel/smpboot.c
index 579b9b7..e6abe8a 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot.c
@@ -3,6 +3,7 @@
  *
  *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *	Copyright 2001 Andi Kleen, SuSE Labs.
  *
  *	Much of the core SMP work is based on previous work by Thomas Radke, to
  *	whom a great many thanks are extended.
@@ -29,53 +30,90 @@
  *		Ingo Molnar	:	various cleanups and rewrites
  *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
  *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
+ *	Andi Kleen		:	Changed for SMP boot into long mode.
  *		Martin J. Bligh	: 	Added support for multi-quad systems
  *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
-*		Rusty Russell	:	Hacked into shape for new "hotplug" boot process. */
+ *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
+ *      Andi Kleen              :       Converted to new state machine.
+ *	Ashok Raj		: 	CPU hotplug support
+ *	Glauber Costa		:	i386 and x86_64 integration
+ */
 
-#include <linux/module.h>
 #include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/module.h>
 #include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/bootmem.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
 #include <linux/percpu.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
 #include <linux/nmi.h>
 
-#include <linux/delay.h>
-#include <linux/mc146818rtc.h>
-#include <asm/tlbflush.h>
+#include <asm/acpi.h>
 #include <asm/desc.h>
-#include <asm/arch_hooks.h>
 #include <asm/nmi.h>
+#include <asm/irq.h>
+#include <asm/smp.h>
+#include <asm/trampoline.h>
+#include <asm/cpu.h>
+#include <asm/numa.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/mtrr.h>
+#include <asm/nmi.h>
+#include <asm/vmi.h>
+#include <linux/mc146818rtc.h>
 
 #include <mach_apic.h>
 #include <mach_wakecpu.h>
 #include <smpboot_hooks.h>
-#include <asm/vmi.h>
-#include <asm/mtrr.h>
 
-/* Set if we find a B stepping CPU */
-static int __cpuinitdata smp_b_stepping;
+/*
+ * FIXME: For x86_64, those are defined in other files. But moving them here,
+ * would make the setup areas dependent on smp, which is a loss. When we
+ * integrate apic between arches, we can probably do a better job, but
+ * right now, they'll stay here -- glommer
+ */
+
+/* which logical CPU number maps to which CPU (physical APIC ID) */
+u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
+			{ [0 ... NR_CPUS-1] = BAD_APICID };
+void *x86_cpu_to_apicid_early_ptr;
+
+u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
+				= { [0 ... NR_CPUS-1] = BAD_APICID };
+void *x86_bios_cpu_apicid_early_ptr;
+
+#ifdef CONFIG_X86_32
+u8 apicid_2_node[MAX_APICID];
+#endif
+
+/* State of each CPU */
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
+
+/* Store all idle threads, this can be reused instead of creating
+* a new thread. Also avoids complicated thread destroy functionality
+* for idle threads.
+*/
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
+ * removed after init for !CONFIG_HOTPLUG_CPU.
+ */
+static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
+#define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
+#define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
+#else
+struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+#define get_idle_for_cpu(x)      (idle_thread_array[(x)])
+#define set_idle_for_cpu(x, p)   (idle_thread_array[(x)] = (p))
+#endif
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
 EXPORT_SYMBOL(smp_num_siblings);
 
 /* Last level cache ID of each logical CPU */
-DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
-
-/* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
-EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
-
-/* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_core_map);
-EXPORT_PER_CPU_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
 
 /* bitmap of online cpus */
 cpumask_t cpu_online_map __read_mostly;
@@ -85,126 +123,94 @@
 cpumask_t cpu_callout_map;
 cpumask_t cpu_possible_map;
 EXPORT_SYMBOL(cpu_possible_map);
-static cpumask_t smp_commenced_mask;
+
+/* representing HT siblings of each logical CPU */
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
+
+/* representing HT and core siblings of each logical CPU */
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /* Per CPU bogomips and other parameters */
 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
-/* which logical CPU number maps to which CPU (physical APIC ID) */
-u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
-			{ [0 ... NR_CPUS-1] = BAD_APICID };
-void *x86_cpu_to_apicid_early_ptr;
-DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
-EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
-
-u8 apicid_2_node[MAX_APICID];
-
-/*
- * Trampoline 80x86 program as an array.
- */
-
-extern const unsigned char trampoline_data [];
-extern const unsigned char trampoline_end  [];
-static unsigned char *trampoline_base;
-
-static void map_cpu_to_logical_apicid(void);
-
-/* State of each CPU. */
-DEFINE_PER_CPU(int, cpu_state) = { 0 };
-
-/*
- * Currently trivial. Write the real->protected mode
- * bootstrap into the page concerned. The caller
- * has made sure it's suitably aligned.
- */
-
-static unsigned long __cpuinit setup_trampoline(void)
-{
-	memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
-	return virt_to_phys(trampoline_base);
-}
-
-/*
- * We are called very early to get the low memory for the
- * SMP bootup trampoline page.
- */
-void __init smp_alloc_memory(void)
-{
-	trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
-	/*
-	 * Has to be in very low memory so we can execute
-	 * real-mode AP code.
-	 */
-	if (__pa(trampoline_base) >= 0x9F000)
-		BUG();
-}
-
-/*
- * The bootstrap kernel entry code has set these up. Save them for
- * a given CPU
- */
-
-void __cpuinit smp_store_cpu_info(int id)
-{
-	struct cpuinfo_x86 *c = &cpu_data(id);
-
-	*c = boot_cpu_data;
-	c->cpu_index = id;
-	if (id!=0)
-		identify_secondary_cpu(c);
-	/*
-	 * Mask B, Pentium, but not Pentium MMX
-	 */
-	if (c->x86_vendor == X86_VENDOR_INTEL &&
-	    c->x86 == 5 &&
-	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
-	    c->x86_model <= 3)
-		/*
-		 * Remember we have B step Pentia with bugs
-		 */
-		smp_b_stepping = 1;
-
-	/*
-	 * Certain Athlons might work (for various values of 'work') in SMP
-	 * but they are not certified as MP capable.
-	 */
-	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
-
-		if (num_possible_cpus() == 1)
-			goto valid_k7;
-
-		/* Athlon 660/661 is valid. */	
-		if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
-			goto valid_k7;
-
-		/* Duron 670 is valid */
-		if ((c->x86_model==7) && (c->x86_mask==0))
-			goto valid_k7;
-
-		/*
-		 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
-		 * It's worth noting that the A5 stepping (662) of some Athlon XP's
-		 * have the MP bit set.
-		 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
-		 */
-		if (((c->x86_model==6) && (c->x86_mask>=2)) ||
-		    ((c->x86_model==7) && (c->x86_mask>=1)) ||
-		     (c->x86_model> 7))
-			if (cpu_has_mp)
-				goto valid_k7;
-
-		/* If we get here, it's not a certified SMP capable AMD system. */
-		add_taint(TAINT_UNSAFE_SMP);
-	}
-
-valid_k7:
-	;
-}
-
 static atomic_t init_deasserted;
 
-static void __cpuinit smp_callin(void)
+static int boot_cpu_logical_apicid;
+
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
+/* Set if we find a B stepping CPU */
+int __cpuinitdata smp_b_stepping;
+
+#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
+
+/* which logical CPUs are on which nodes */
+cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
+				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
+EXPORT_SYMBOL(node_to_cpumask_map);
+/* which node each logical CPU is on */
+int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
+EXPORT_SYMBOL(cpu_to_node_map);
+
+/* set up a mapping between cpu and node. */
+static void map_cpu_to_node(int cpu, int node)
+{
+	printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
+	cpu_set(cpu, node_to_cpumask_map[node]);
+	cpu_to_node_map[cpu] = node;
+}
+
+/* undo a mapping between cpu and node. */
+static void unmap_cpu_to_node(int cpu)
+{
+	int node;
+
+	printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
+	for (node = 0; node < MAX_NUMNODES; node++)
+		cpu_clear(cpu, node_to_cpumask_map[node]);
+	cpu_to_node_map[cpu] = 0;
+}
+#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
+#define map_cpu_to_node(cpu, node)	({})
+#define unmap_cpu_to_node(cpu)	({})
+#endif
+
+#ifdef CONFIG_X86_32
+u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
+					{ [0 ... NR_CPUS-1] = BAD_APICID };
+
+void map_cpu_to_logical_apicid(void)
+{
+	int cpu = smp_processor_id();
+	int apicid = logical_smp_processor_id();
+	int node = apicid_to_node(apicid);
+
+	if (!node_online(node))
+		node = first_online_node;
+
+	cpu_2_logical_apicid[cpu] = apicid;
+	map_cpu_to_node(cpu, node);
+}
+
+void unmap_cpu_to_logical_apicid(int cpu)
+{
+	cpu_2_logical_apicid[cpu] = BAD_APICID;
+	unmap_cpu_to_node(cpu);
+}
+#else
+#define unmap_cpu_to_logical_apicid(cpu) do {} while (0)
+#define map_cpu_to_logical_apicid()  do {} while (0)
+#endif
+
+/*
+ * Report back to the Boot Processor.
+ * Running on AP.
+ */
+void __cpuinit smp_callin(void)
 {
 	int cpuid, phys_id;
 	unsigned long timeout;
@@ -220,12 +226,11 @@
 	/*
 	 * (This works even if the APIC is not enabled.)
 	 */
-	phys_id = GET_APIC_ID(apic_read(APIC_ID));
+	phys_id = GET_APIC_ID(read_apic_id());
 	cpuid = smp_processor_id();
 	if (cpu_isset(cpuid, cpu_callin_map)) {
-		printk("huh, phys CPU#%d, CPU#%d already present??\n",
+		panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
 					phys_id, cpuid);
-		BUG();
 	}
 	Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
 
@@ -247,13 +252,12 @@
 		 */
 		if (cpu_isset(cpuid, cpu_callout_map))
 			break;
-		rep_nop();
+		cpu_relax();
 	}
 
 	if (!time_before(jiffies, timeout)) {
-		printk("BUG: CPU%d started up but did not get a callout!\n",
-			cpuid);
-		BUG();
+		panic("%s: CPU%d started up but did not get a callout!\n",
+		      __func__, cpuid);
 	}
 
 	/*
@@ -266,13 +270,19 @@
 	Dprintk("CALLIN, before setup_local_APIC().\n");
 	smp_callin_clear_local_apic();
 	setup_local_APIC();
+	end_local_APIC_setup();
 	map_cpu_to_logical_apicid();
 
 	/*
 	 * Get our bogomips.
+	 *
+	 * Need to enable IRQs because it can take longer and then
+	 * the NMI watchdog might kill us.
 	 */
+	local_irq_enable();
 	calibrate_delay();
-	Dprintk("Stack at about %p\n",&cpuid);
+	local_irq_disable();
+	Dprintk("Stack at about %p\n", &cpuid);
 
 	/*
 	 * Save our processor parameters
@@ -285,24 +295,181 @@
 	cpu_set(cpuid, cpu_callin_map);
 }
 
-static int cpucount;
-
-/* maps the cpu to the sched domain representing multi-core */
-cpumask_t cpu_coregroup_map(int cpu)
+/*
+ * Activate a secondary processor.
+ */
+void __cpuinit start_secondary(void *unused)
 {
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	/*
-	 * For perf, we return last level cache shared map.
-	 * And for power savings, we return cpu_core_map
+	 * Don't put *anything* before cpu_init(), SMP booting is too
+	 * fragile that we want to limit the things done here to the
+	 * most necessary things.
 	 */
-	if (sched_mc_power_savings || sched_smt_power_savings)
-		return per_cpu(cpu_core_map, cpu);
-	else
-		return c->llc_shared_map;
+#ifdef CONFIG_VMI
+	vmi_bringup();
+#endif
+	cpu_init();
+	preempt_disable();
+	smp_callin();
+
+	/* otherwise gcc will move up smp_processor_id before the cpu_init */
+	barrier();
+	/*
+	 * Check TSC synchronization with the BP:
+	 */
+	check_tsc_sync_target();
+
+	if (nmi_watchdog == NMI_IO_APIC) {
+		disable_8259A_irq(0);
+		enable_NMI_through_LVT0();
+		enable_8259A_irq(0);
+	}
+
+	/* This must be done before setting cpu_online_map */
+	set_cpu_sibling_map(raw_smp_processor_id());
+	wmb();
+
+	/*
+	 * We need to hold call_lock, so there is no inconsistency
+	 * between the time smp_call_function() determines number of
+	 * IPI recipients, and the time when the determination is made
+	 * for which cpus receive the IPI. Holding this
+	 * lock helps us to not include this cpu in a currently in progress
+	 * smp_call_function().
+	 */
+	lock_ipi_call_lock();
+#ifdef CONFIG_X86_64
+	spin_lock(&vector_lock);
+
+	/* Setup the per cpu irq handling data structures */
+	__setup_vector_irq(smp_processor_id());
+	/*
+	 * Allow the master to continue.
+	 */
+	spin_unlock(&vector_lock);
+#endif
+	cpu_set(smp_processor_id(), cpu_online_map);
+	unlock_ipi_call_lock();
+	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+
+	setup_secondary_clock();
+
+	wmb();
+	cpu_idle();
 }
 
-/* representing cpus for which sibling maps can be computed */
-static cpumask_t cpu_sibling_setup_map;
+#ifdef CONFIG_X86_32
+/*
+ * Everything has been set up for the secondary
+ * CPUs - they just need to reload everything
+ * from the task structure
+ * This function must not return.
+ */
+void __devinit initialize_secondary(void)
+{
+	/*
+	 * We don't actually need to load the full TSS,
+	 * basically just the stack pointer and the ip.
+	 */
+
+	asm volatile(
+		"movl %0,%%esp\n\t"
+		"jmp *%1"
+		:
+		:"m" (current->thread.sp), "m" (current->thread.ip));
+}
+#endif
+
+static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_X86_32
+	/*
+	 * Mask B, Pentium, but not Pentium MMX
+	 */
+	if (c->x86_vendor == X86_VENDOR_INTEL &&
+	    c->x86 == 5 &&
+	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
+	    c->x86_model <= 3)
+		/*
+		 * Remember we have B step Pentia with bugs
+		 */
+		smp_b_stepping = 1;
+
+	/*
+	 * Certain Athlons might work (for various values of 'work') in SMP
+	 * but they are not certified as MP capable.
+	 */
+	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
+
+		if (num_possible_cpus() == 1)
+			goto valid_k7;
+
+		/* Athlon 660/661 is valid. */
+		if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
+		    (c->x86_mask == 1)))
+			goto valid_k7;
+
+		/* Duron 670 is valid */
+		if ((c->x86_model == 7) && (c->x86_mask == 0))
+			goto valid_k7;
+
+		/*
+		 * Athlon 662, Duron 671, and Athlon >model 7 have capability
+		 * bit. It's worth noting that the A5 stepping (662) of some
+		 * Athlon XP's have the MP bit set.
+		 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
+		 * more.
+		 */
+		if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
+		    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
+		     (c->x86_model > 7))
+			if (cpu_has_mp)
+				goto valid_k7;
+
+		/* If we get here, not a certified SMP capable AMD system. */
+		add_taint(TAINT_UNSAFE_SMP);
+	}
+
+valid_k7:
+	;
+#endif
+}
+
+void __cpuinit smp_checks(void)
+{
+	if (smp_b_stepping)
+		printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
+				    "with B stepping processors.\n");
+
+	/*
+	 * Don't taint if we are running SMP kernel on a single non-MP
+	 * approved Athlon
+	 */
+	if (tainted & TAINT_UNSAFE_SMP) {
+		if (num_online_cpus())
+			printk(KERN_INFO "WARNING: This combination of AMD"
+				"processors is not suitable for SMP.\n");
+		else
+			tainted &= ~TAINT_UNSAFE_SMP;
+	}
+}
+
+/*
+ * The bootstrap kernel entry code has set these up. Save them for
+ * a given CPU
+ */
+
+void __cpuinit smp_store_cpu_info(int id)
+{
+	struct cpuinfo_x86 *c = &cpu_data(id);
+
+	*c = boot_cpu_data;
+	c->cpu_index = id;
+	if (id != 0)
+		identify_secondary_cpu(c);
+	smp_apply_quirks(c);
+}
+
 
 void __cpuinit set_cpu_sibling_map(int cpu)
 {
@@ -366,165 +533,76 @@
 	}
 }
 
-/*
- * Activate a secondary processor.
- */
-static void __cpuinit start_secondary(void *unused)
+/* maps the cpu to the sched domain representing multi-core */
+cpumask_t cpu_coregroup_map(int cpu)
 {
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	/*
-	 * Don't put *anything* before cpu_init(), SMP booting is too
-	 * fragile that we want to limit the things done here to the
-	 * most necessary things.
+	 * For perf, we return last level cache shared map.
+	 * And for power savings, we return cpu_core_map
 	 */
-#ifdef CONFIG_VMI
-	vmi_bringup();
+	if (sched_mc_power_savings || sched_smt_power_savings)
+		return per_cpu(cpu_core_map, cpu);
+	else
+		return c->llc_shared_map;
+}
+
+#ifdef CONFIG_X86_32
+/*
+ * We are called very early to get the low memory for the
+ * SMP bootup trampoline page.
+ */
+void __init smp_alloc_memory(void)
+{
+	trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
+	/*
+	 * Has to be in very low memory so we can execute
+	 * real-mode AP code.
+	 */
+	if (__pa(trampoline_base) >= 0x9F000)
+		BUG();
+}
 #endif
-	cpu_init();
-	preempt_disable();
-	smp_callin();
-	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
-		rep_nop();
+
+void impress_friends(void)
+{
+	int cpu;
+	unsigned long bogosum = 0;
 	/*
-	 * Check TSC synchronization with the BP:
+	 * Allow the user to impress friends.
 	 */
-	check_tsc_sync_target();
+	Dprintk("Before bogomips.\n");
+	for_each_possible_cpu(cpu)
+		if (cpu_isset(cpu, cpu_callout_map))
+			bogosum += cpu_data(cpu).loops_per_jiffy;
+	printk(KERN_INFO
+		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+		num_online_cpus(),
+		bogosum/(500000/HZ),
+		(bogosum/(5000/HZ))%100);
 
-	setup_secondary_clock();
-	if (nmi_watchdog == NMI_IO_APIC) {
-		disable_8259A_irq(0);
-		enable_NMI_through_LVT0();
-		enable_8259A_irq(0);
-	}
-	/*
-	 * low-memory mappings have been cleared, flush them from
-	 * the local TLBs too.
-	 */
-	local_flush_tlb();
-
-	/* This must be done before setting cpu_online_map */
-	set_cpu_sibling_map(raw_smp_processor_id());
-	wmb();
-
-	/*
-	 * We need to hold call_lock, so there is no inconsistency
-	 * between the time smp_call_function() determines number of
-	 * IPI recipients, and the time when the determination is made
-	 * for which cpus receive the IPI. Holding this
-	 * lock helps us to not include this cpu in a currently in progress
-	 * smp_call_function().
-	 */
-	lock_ipi_call_lock();
-	cpu_set(smp_processor_id(), cpu_online_map);
-	unlock_ipi_call_lock();
-	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
-
-	/* We can take interrupts now: we're officially "up". */
-	local_irq_enable();
-
-	wmb();
-	cpu_idle();
-}
-
-/*
- * Everything has been set up for the secondary
- * CPUs - they just need to reload everything
- * from the task structure
- * This function must not return.
- */
-void __devinit initialize_secondary(void)
-{
-	/*
-	 * We don't actually need to load the full TSS,
-	 * basically just the stack pointer and the ip.
-	 */
-
-	asm volatile(
-		"movl %0,%%esp\n\t"
-		"jmp *%1"
-		:
-		:"m" (current->thread.sp),"m" (current->thread.ip));
-}
-
-/* Static state in head.S used to set up a CPU */
-extern struct {
-	void * sp;
-	unsigned short ss;
-} stack_start;
-
-#ifdef CONFIG_NUMA
-
-/* which logical CPUs are on which nodes */
-cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
-				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
-EXPORT_SYMBOL(node_to_cpumask_map);
-/* which node each logical CPU is on */
-int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
-EXPORT_SYMBOL(cpu_to_node_map);
-
-/* set up a mapping between cpu and node. */
-static inline void map_cpu_to_node(int cpu, int node)
-{
-	printk("Mapping cpu %d to node %d\n", cpu, node);
-	cpu_set(cpu, node_to_cpumask_map[node]);
-	cpu_to_node_map[cpu] = node;
-}
-
-/* undo a mapping between cpu and node. */
-static inline void unmap_cpu_to_node(int cpu)
-{
-	int node;
-
-	printk("Unmapping cpu %d from all nodes\n", cpu);
-	for (node = 0; node < MAX_NUMNODES; node ++)
-		cpu_clear(cpu, node_to_cpumask_map[node]);
-	cpu_to_node_map[cpu] = 0;
-}
-#else /* !CONFIG_NUMA */
-
-#define map_cpu_to_node(cpu, node)	({})
-#define unmap_cpu_to_node(cpu)	({})
-
-#endif /* CONFIG_NUMA */
-
-u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
-
-static void map_cpu_to_logical_apicid(void)
-{
-	int cpu = smp_processor_id();
-	int apicid = logical_smp_processor_id();
-	int node = apicid_to_node(apicid);
-
-	if (!node_online(node))
-		node = first_online_node;
-
-	cpu_2_logical_apicid[cpu] = apicid;
-	map_cpu_to_node(cpu, node);
-}
-
-static void unmap_cpu_to_logical_apicid(int cpu)
-{
-	cpu_2_logical_apicid[cpu] = BAD_APICID;
-	unmap_cpu_to_node(cpu);
+	Dprintk("Before bogocount - setting activated=1.\n");
 }
 
 static inline void __inquire_remote_apic(int apicid)
 {
-	int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
+	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
 	char *names[] = { "ID", "VERSION", "SPIV" };
 	int timeout;
-	unsigned long status;
+	u32 status;
 
-	printk("Inquiring remote APIC #%d...\n", apicid);
+	printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
 
 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
-		printk("... APIC #%d %s: ", apicid, names[i]);
+		printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
 
 		/*
 		 * Wait for idle.
 		 */
 		status = safe_apic_wait_icr_idle();
 		if (status)
-			printk("a previous APIC delivery may have failed\n");
+			printk(KERN_CONT
+			       "a previous APIC delivery may have failed\n");
 
 		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
 		apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
@@ -538,16 +616,16 @@
 		switch (status) {
 		case APIC_ICR_RR_VALID:
 			status = apic_read(APIC_RRR);
-			printk("%lx\n", status);
+			printk(KERN_CONT "%08x\n", status);
 			break;
 		default:
-			printk("failed\n");
+			printk(KERN_CONT "failed\n");
 		}
 	}
 }
 
 #ifdef WAKE_SECONDARY_VIA_NMI
-/* 
+/*
  * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
  * won't ... remember to clear down the APIC, etc later.
@@ -584,9 +662,9 @@
 	Dprintk("NMI sent.\n");
 
 	if (send_status)
-		printk("APIC never delivered???\n");
+		printk(KERN_ERR "APIC never delivered???\n");
 	if (accept_status)
-		printk("APIC delivery error (%lx).\n", accept_status);
+		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
 
 	return (send_status | accept_status);
 }
@@ -637,6 +715,7 @@
 	Dprintk("Waiting for send to finish...\n");
 	send_status = safe_apic_wait_icr_idle();
 
+	mb();
 	atomic_set(&init_deasserted, 1);
 
 	/*
@@ -655,7 +734,11 @@
 	 * target processor state.
 	 */
 	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
-		         (unsigned long) stack_start.sp);
+#ifdef CONFIG_X86_64
+			 (unsigned long)init_rsp);
+#else
+			 (unsigned long)stack_start.sp);
+#endif
 
 	/*
 	 * Run STARTUP IPI loop.
@@ -665,7 +748,7 @@
 	maxlvt = lapic_get_maxlvt();
 
 	for (j = 1; j <= num_starts; j++) {
-		Dprintk("Sending STARTUP #%d.\n",j);
+		Dprintk("Sending STARTUP #%d.\n", j);
 		apic_read_around(APIC_SPIV);
 		apic_write(APIC_ESR, 0);
 		apic_read(APIC_ESR);
@@ -711,49 +794,29 @@
 	Dprintk("After Startup.\n");
 
 	if (send_status)
-		printk("APIC never delivered???\n");
+		printk(KERN_ERR "APIC never delivered???\n");
 	if (accept_status)
-		printk("APIC delivery error (%lx).\n", accept_status);
+		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
 
 	return (send_status | accept_status);
 }
 #endif	/* WAKE_SECONDARY_VIA_INIT */
 
-extern cpumask_t cpu_initialized;
-static inline int alloc_cpu_id(void)
-{
-	cpumask_t	tmp_map;
-	int cpu;
-	cpus_complement(tmp_map, cpu_present_map);
-	cpu = first_cpu(tmp_map);
-	if (cpu >= NR_CPUS)
-		return -ENODEV;
-	return cpu;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS];
-static inline struct task_struct * __cpuinit alloc_idle_task(int cpu)
-{
+struct create_idle {
+	struct work_struct work;
 	struct task_struct *idle;
+	struct completion done;
+	int cpu;
+};
 
-	if ((idle = cpu_idle_tasks[cpu]) != NULL) {
-		/* initialize thread_struct.  we really want to avoid destroy
-		 * idle tread
-		 */
-		idle->thread.sp = (unsigned long)task_pt_regs(idle);
-		init_idle(idle, cpu);
-		return idle;
-	}
-	idle = fork_idle(cpu);
+static void __cpuinit do_fork_idle(struct work_struct *work)
+{
+	struct create_idle *c_idle =
+		container_of(work, struct create_idle, work);
 
-	if (!IS_ERR(idle))
-		cpu_idle_tasks[cpu] = idle;
-	return idle;
+	c_idle->idle = fork_idle(c_idle->cpu);
+	complete(&c_idle->done);
 }
-#else
-#define alloc_idle_task(cpu) fork_idle(cpu)
-#endif
 
 static int __cpuinit do_boot_cpu(int apicid, int cpu)
 /*
@@ -762,45 +825,92 @@
  * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
  */
 {
-	struct task_struct *idle;
-	unsigned long boot_error;
+	unsigned long boot_error = 0;
 	int timeout;
-	unsigned long start_eip;
+	unsigned long start_ip;
 	unsigned short nmi_high = 0, nmi_low = 0;
+	struct create_idle c_idle = {
+		.cpu = cpu,
+		.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+	};
+	INIT_WORK(&c_idle.work, do_fork_idle);
+#ifdef CONFIG_X86_64
+	/* allocate memory for gdts of secondary cpus. Hotplug is considered */
+	if (!cpu_gdt_descr[cpu].address &&
+		!(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
+		printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
+		return -1;
+	}
 
-	/*
-	 * Save current MTRR state in case it was changed since early boot
-	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
-	 */
-	mtrr_save_state();
+	/* Allocate node local memory for AP pdas */
+	if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
+		struct x8664_pda *newpda, *pda;
+		int node = cpu_to_node(cpu);
+		pda = cpu_pda(cpu);
+		newpda = kmalloc_node(sizeof(struct x8664_pda), GFP_ATOMIC,
+				      node);
+		if (newpda) {
+			memcpy(newpda, pda, sizeof(struct x8664_pda));
+			cpu_pda(cpu) = newpda;
+		} else
+			printk(KERN_ERR
+		"Could not allocate node local PDA for CPU %d on node %d\n",
+				cpu, node);
+	}
+#endif
+
+	alternatives_smp_switch(1);
+
+	c_idle.idle = get_idle_for_cpu(cpu);
 
 	/*
 	 * We can't use kernel_thread since we must avoid to
 	 * reschedule the child.
 	 */
-	idle = alloc_idle_task(cpu);
-	if (IS_ERR(idle))
-		panic("failed fork for CPU %d", cpu);
+	if (c_idle.idle) {
+		c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
+			(THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);
+		init_idle(c_idle.idle, cpu);
+		goto do_rest;
+	}
 
+	if (!keventd_up() || current_is_keventd())
+		c_idle.work.func(&c_idle.work);
+	else {
+		schedule_work(&c_idle.work);
+		wait_for_completion(&c_idle.done);
+	}
+
+	if (IS_ERR(c_idle.idle)) {
+		printk("failed fork for CPU %d\n", cpu);
+		return PTR_ERR(c_idle.idle);
+	}
+
+	set_idle_for_cpu(cpu, c_idle.idle);
+do_rest:
+#ifdef CONFIG_X86_32
+	per_cpu(current_task, cpu) = c_idle.idle;
 	init_gdt(cpu);
- 	per_cpu(current_task, cpu) = idle;
 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+	c_idle.idle->thread.ip = (unsigned long) start_secondary;
+	/* Stack for startup_32 can be just as for start_secondary onwards */
+	stack_start.sp = (void *) c_idle.idle->thread.sp;
+	irq_ctx_init(cpu);
+#else
+	cpu_pda(cpu)->pcurrent = c_idle.idle;
+	init_rsp = c_idle.idle->thread.sp;
+	load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread);
+	initial_code = (unsigned long)start_secondary;
+	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
+#endif
 
-	idle->thread.ip = (unsigned long) start_secondary;
-	/* start_eip had better be page-aligned! */
-	start_eip = setup_trampoline();
-
-	++cpucount;
-	alternatives_smp_switch(1);
+	/* start_ip had better be page-aligned! */
+	start_ip = setup_trampoline();
 
 	/* So we see what's up   */
-	printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
-	/* Stack for startup_32 can be just as for start_secondary onwards */
-	stack_start.sp = (void *) idle->thread.sp;
+	printk(KERN_INFO "Booting processor %d/%d ip %lx\n",
+			  cpu, apicid, start_ip);
 
-	irq_ctx_init(cpu);
-
-	per_cpu(x86_cpu_to_apicid, cpu) = apicid;
 	/*
 	 * This grunge runs the startup process for
 	 * the targeted processor.
@@ -812,12 +922,17 @@
 
 	store_NMI_vector(&nmi_high, &nmi_low);
 
-	smpboot_setup_warm_reset_vector(start_eip);
+	smpboot_setup_warm_reset_vector(start_ip);
+	/*
+	 * Be paranoid about clearing APIC errors.
+	 */
+	apic_write(APIC_ESR, 0);
+	apic_read(APIC_ESR);
 
 	/*
 	 * Starting actual IPI sequence...
 	 */
-	boot_error = wakeup_secondary_cpu(apicid, start_eip);
+	boot_error = wakeup_secondary_cpu(apicid, start_ip);
 
 	if (!boot_error) {
 		/*
@@ -839,18 +954,18 @@
 		if (cpu_isset(cpu, cpu_callin_map)) {
 			/* number CPUs logically, starting from 1 (BSP is 0) */
 			Dprintk("OK.\n");
-			printk("CPU%d: ", cpu);
+			printk(KERN_INFO "CPU%d: ", cpu);
 			print_cpu_info(&cpu_data(cpu));
 			Dprintk("CPU has booted.\n");
 		} else {
-			boot_error= 1;
+			boot_error = 1;
 			if (*((volatile unsigned char *)trampoline_base)
 					== 0xA5)
 				/* trampoline started but...? */
-				printk("Stuck ??\n");
+				printk(KERN_ERR "Stuck ??\n");
 			else
 				/* trampoline code not run */
-				printk("Not responding.\n");
+				printk(KERN_ERR "Not responding.\n");
 			inquire_remote_apic(apicid);
 		}
 	}
@@ -858,408 +973,71 @@
 	if (boot_error) {
 		/* Try to put things back the way they were before ... */
 		unmap_cpu_to_logical_apicid(cpu);
-		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
+#ifdef CONFIG_X86_64
+		clear_node_cpumask(cpu); /* was set by numa_add_cpu */
+#endif
+		cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
 		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
-		cpucount--;
-	} else {
-		per_cpu(x86_cpu_to_apicid, cpu) = apicid;
-		cpu_set(cpu, cpu_present_map);
+		cpu_clear(cpu, cpu_possible_map);
+		cpu_clear(cpu, cpu_present_map);
+		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
 	}
 
 	/* mark "stuck" area as not stuck */
 	*((volatile unsigned long *)trampoline_base) = 0;
 
-	return boot_error;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-void cpu_exit_clear(void)
-{
-	int cpu = raw_smp_processor_id();
-
-	idle_task_exit();
-
-	cpucount --;
-	cpu_uninit();
-	irq_ctx_exit(cpu);
-
-	cpu_clear(cpu, cpu_callout_map);
-	cpu_clear(cpu, cpu_callin_map);
-
-	cpu_clear(cpu, smp_commenced_mask);
-	unmap_cpu_to_logical_apicid(cpu);
-}
-
-struct warm_boot_cpu_info {
-	struct completion *complete;
-	struct work_struct task;
-	int apicid;
-	int cpu;
-};
-
-static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
-{
-	struct warm_boot_cpu_info *info =
-		container_of(work, struct warm_boot_cpu_info, task);
-	do_boot_cpu(info->apicid, info->cpu);
-	complete(info->complete);
-}
-
-static int __cpuinit __smp_prepare_cpu(int cpu)
-{
-	DECLARE_COMPLETION_ONSTACK(done);
-	struct warm_boot_cpu_info info;
-	int	apicid, ret;
-
-	apicid = per_cpu(x86_cpu_to_apicid, cpu);
-	if (apicid == BAD_APICID) {
-		ret = -ENODEV;
-		goto exit;
-	}
-
-	info.complete = &done;
-	info.apicid = apicid;
-	info.cpu = cpu;
-	INIT_WORK(&info.task, do_warm_boot_cpu);
-
-	/* init low mem mapping */
-	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
-			min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
-	flush_tlb_all();
-	schedule_work(&info.task);
-	wait_for_completion(&done);
-
-	zap_low_mappings();
-	ret = 0;
-exit:
-	return ret;
-}
-#endif
-
-/*
- * Cycle through the processors sending APIC IPIs to boot each.
- */
-
-static int boot_cpu_logical_apicid;
-/* Where the IO area was mapped on multiquad, always 0 otherwise */
-void *xquad_portio;
-#ifdef CONFIG_X86_NUMAQ
-EXPORT_SYMBOL(xquad_portio);
-#endif
-
-static void __init smp_boot_cpus(unsigned int max_cpus)
-{
-	int apicid, cpu, bit, kicked;
-	unsigned long bogosum = 0;
-
-	/*
-	 * Setup boot CPU information
-	 */
-	smp_store_cpu_info(0); /* Final full version of the data */
-	printk("CPU%d: ", 0);
-	print_cpu_info(&cpu_data(0));
-
-	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
-	boot_cpu_logical_apicid = logical_smp_processor_id();
-	per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
-
-	current_thread_info()->cpu = 0;
-
-	set_cpu_sibling_map(0);
-
-	/*
-	 * If we couldn't find an SMP configuration at boot time,
-	 * get out of here now!
-	 */
-	if (!smp_found_config && !acpi_lapic) {
-		printk(KERN_NOTICE "SMP motherboard not detected.\n");
-		smpboot_clear_io_apic_irqs();
-		phys_cpu_present_map = physid_mask_of_physid(0);
-		if (APIC_init_uniprocessor())
-			printk(KERN_NOTICE "Local APIC not detected."
-					   " Using dummy APIC emulation.\n");
-		map_cpu_to_logical_apicid();
-		cpu_set(0, per_cpu(cpu_sibling_map, 0));
-		cpu_set(0, per_cpu(cpu_core_map, 0));
-		return;
-	}
-
-	/*
-	 * Should not be necessary because the MP table should list the boot
-	 * CPU too, but we do it for the sake of robustness anyway.
-	 * Makes no sense to do this check in clustered apic mode, so skip it
-	 */
-	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
-		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
-				boot_cpu_physical_apicid);
-		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
-	}
-
-	/*
-	 * If we couldn't find a local APIC, then get out of here now!
-	 */
-	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
-		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
-			boot_cpu_physical_apicid);
-		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
-		smpboot_clear_io_apic_irqs();
-		phys_cpu_present_map = physid_mask_of_physid(0);
-		map_cpu_to_logical_apicid();
-		cpu_set(0, per_cpu(cpu_sibling_map, 0));
-		cpu_set(0, per_cpu(cpu_core_map, 0));
-		return;
-	}
-
-	verify_local_APIC();
-
-	/*
-	 * If SMP should be disabled, then really disable it!
-	 */
-	if (!max_cpus) {
-		smp_found_config = 0;
-		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
-
-		if (nmi_watchdog == NMI_LOCAL_APIC) {
-			printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n");
-			connect_bsp_APIC();
-			setup_local_APIC();
-		}
-		smpboot_clear_io_apic_irqs();
-		phys_cpu_present_map = physid_mask_of_physid(0);
-		map_cpu_to_logical_apicid();
-		cpu_set(0, per_cpu(cpu_sibling_map, 0));
-		cpu_set(0, per_cpu(cpu_core_map, 0));
-		return;
-	}
-
-	connect_bsp_APIC();
-	setup_local_APIC();
-	map_cpu_to_logical_apicid();
-
-
-	setup_portio_remap();
-
-	/*
-	 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
-	 *
-	 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
-	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 
-	 * clustered apic ID.
-	 */
-	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
-
-	kicked = 1;
-	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
-		apicid = cpu_present_to_apicid(bit);
-		/*
-		 * Don't even attempt to start the boot CPU!
-		 */
-		if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
-			continue;
-
-		if (!check_apicid_present(bit))
-			continue;
-		if (max_cpus <= cpucount+1)
-			continue;
-
-		if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
-			printk("CPU #%d not responding - cannot use it.\n",
-								apicid);
-		else
-			++kicked;
-	}
-
 	/*
 	 * Cleanup possible dangling ends...
 	 */
 	smpboot_restore_warm_reset_vector();
 
-	/*
-	 * Allow the user to impress friends.
-	 */
-	Dprintk("Before bogomips.\n");
-	for_each_possible_cpu(cpu)
-		if (cpu_isset(cpu, cpu_callout_map))
-			bogosum += cpu_data(cpu).loops_per_jiffy;
-	printk(KERN_INFO
-		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
-		cpucount+1,
-		bogosum/(500000/HZ),
-		(bogosum/(5000/HZ))%100);
-	
-	Dprintk("Before bogocount - setting activated=1.\n");
-
-	if (smp_b_stepping)
-		printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
-
-	/*
-	 * Don't taint if we are running SMP kernel on a single non-MP
-	 * approved Athlon
-	 */
-	if (tainted & TAINT_UNSAFE_SMP) {
-		if (cpucount)
-			printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
-		else
-			tainted &= ~TAINT_UNSAFE_SMP;
-	}
-
-	Dprintk("Boot done.\n");
-
-	/*
-	 * construct cpu_sibling_map, so that we can tell sibling CPUs
-	 * efficiently.
-	 */
-	for_each_possible_cpu(cpu) {
-		cpus_clear(per_cpu(cpu_sibling_map, cpu));
-		cpus_clear(per_cpu(cpu_core_map, cpu));
-	}
-
-	cpu_set(0, per_cpu(cpu_sibling_map, 0));
-	cpu_set(0, per_cpu(cpu_core_map, 0));
-
-	smpboot_setup_io_apic();
-
-	setup_boot_clock();
+	return boot_error;
 }
 
-/* These are wrappers to interface to the new boot process.  Someone
-   who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
-void __init native_smp_prepare_cpus(unsigned int max_cpus)
-{
-	smp_commenced_mask = cpumask_of_cpu(0);
-	cpu_callin_map = cpumask_of_cpu(0);
-	mb();
-	smp_boot_cpus(max_cpus);
-}
-
-void __init native_smp_prepare_boot_cpu(void)
-{
-	unsigned int cpu = smp_processor_id();
-
-	init_gdt(cpu);
-	switch_to_new_gdt();
-
-	cpu_set(cpu, cpu_online_map);
-	cpu_set(cpu, cpu_callout_map);
-	cpu_set(cpu, cpu_present_map);
-	cpu_set(cpu, cpu_possible_map);
-	__get_cpu_var(cpu_state) = CPU_ONLINE;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-void remove_siblinginfo(int cpu)
-{
-	int sibling;
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-	for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
-		cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
-		/*/
-		 * last thread sibling in this cpu core going down
-		 */
-		if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
-			cpu_data(sibling).booted_cores--;
-	}
-			
-	for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-		cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-	cpus_clear(per_cpu(cpu_sibling_map, cpu));
-	cpus_clear(per_cpu(cpu_core_map, cpu));
-	c->phys_proc_id = 0;
-	c->cpu_core_id = 0;
-	cpu_clear(cpu, cpu_sibling_setup_map);
-}
-
-int __cpu_disable(void)
-{
-	cpumask_t map = cpu_online_map;
-	int cpu = smp_processor_id();
-
-	/*
-	 * Perhaps use cpufreq to drop frequency, but that could go
-	 * into generic code.
- 	 *
-	 * We won't take down the boot processor on i386 due to some
-	 * interrupts only being able to be serviced by the BSP.
-	 * Especially so if we're not using an IOAPIC	-zwane
-	 */
-	if (cpu == 0)
-		return -EBUSY;
-	if (nmi_watchdog == NMI_LOCAL_APIC)
-		stop_apic_nmi_watchdog(NULL);
-	clear_local_APIC();
-	/* Allow any queued timer interrupts to get serviced */
-	local_irq_enable();
-	mdelay(1);
-	local_irq_disable();
-
-	remove_siblinginfo(cpu);
-
-	cpu_clear(cpu, map);
-	fixup_irqs(map);
-	/* It's now safe to remove this processor from the online map */
-	cpu_clear(cpu, cpu_online_map);
-	return 0;
-}
-
-void __cpu_die(unsigned int cpu)
-{
-	/* We don't do anything here: idle task is faking death itself. */
-	unsigned int i;
-
-	for (i = 0; i < 10; i++) {
-		/* They ack this in play_dead by setting CPU_DEAD */
-		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-			printk ("CPU %d is now offline\n", cpu);
-			if (1 == num_online_cpus())
-				alternatives_smp_switch(0);
-			return;
-		}
-		msleep(100);
-	}
- 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
-}
-#else /* ... !CONFIG_HOTPLUG_CPU */
-int __cpu_disable(void)
-{
-	return -ENOSYS;
-}
-
-void __cpu_die(unsigned int cpu)
-{
-	/* We said "no" in __cpu_disable */
-	BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
 int __cpuinit native_cpu_up(unsigned int cpu)
 {
+	int apicid = cpu_present_to_apicid(cpu);
 	unsigned long flags;
-#ifdef CONFIG_HOTPLUG_CPU
-	int ret = 0;
+	int err;
 
-	/*
-	 * We do warm boot only on cpus that had booted earlier
-	 * Otherwise cold boot is all handled from smp_boot_cpus().
-	 * cpu_callin_map is set during AP kickstart process. Its reset
-	 * when a cpu is taken offline from cpu_exit_clear().
-	 */
-	if (!cpu_isset(cpu, cpu_callin_map))
-		ret = __smp_prepare_cpu(cpu);
+	WARN_ON(irqs_disabled());
 
-	if (ret)
-		return -EIO;
-#endif
+	Dprintk("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
 
-	/* In case one didn't come up */
-	if (!cpu_isset(cpu, cpu_callin_map)) {
-		printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
-		return -EIO;
+	if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
+	    !physid_isset(apicid, phys_cpu_present_map)) {
+		printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
+		return -EINVAL;
 	}
 
+	/*
+	 * Already booted CPU?
+	 */
+	if (cpu_isset(cpu, cpu_callin_map)) {
+		Dprintk("do_boot_cpu %d Already started\n", cpu);
+		return -ENOSYS;
+	}
+
+	/*
+	 * Save current MTRR state in case it was changed since early boot
+	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
+	 */
+	mtrr_save_state();
+
 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-	/* Unleash the CPU! */
-	cpu_set(cpu, smp_commenced_mask);
+
+#ifdef CONFIG_X86_32
+	/* init low mem mapping */
+	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
+			min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
+	flush_tlb_all();
+#endif
+
+	err = do_boot_cpu(apicid, cpu);
+	if (err < 0) {
+		Dprintk("do_boot_cpu failed %d\n", err);
+		return err;
+	}
 
 	/*
 	 * Check TSC synchronization with the AP (keep irqs disabled
@@ -1277,34 +1055,375 @@
 	return 0;
 }
 
+/*
+ * Fall back to non SMP mode after errors.
+ *
+ * RED-PEN audit/test this more. I bet there is more state messed up here.
+ */
+static __init void disable_smp(void)
+{
+	cpu_present_map = cpumask_of_cpu(0);
+	cpu_possible_map = cpumask_of_cpu(0);
+#ifdef CONFIG_X86_32
+	smpboot_clear_io_apic_irqs();
+#endif
+	if (smp_found_config)
+		phys_cpu_present_map =
+				physid_mask_of_physid(boot_cpu_physical_apicid);
+	else
+		phys_cpu_present_map = physid_mask_of_physid(0);
+	map_cpu_to_logical_apicid();
+	cpu_set(0, per_cpu(cpu_sibling_map, 0));
+	cpu_set(0, per_cpu(cpu_core_map, 0));
+}
+
+/*
+ * Various sanity checks.
+ */
+static int __init smp_sanity_check(unsigned max_cpus)
+{
+	preempt_disable();
+	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
+		printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
+				    "by the BIOS.\n", hard_smp_processor_id());
+		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
+	}
+
+	/*
+	 * If we couldn't find an SMP configuration at boot time,
+	 * get out of here now!
+	 */
+	if (!smp_found_config && !acpi_lapic) {
+		preempt_enable();
+		printk(KERN_NOTICE "SMP motherboard not detected.\n");
+		disable_smp();
+		if (APIC_init_uniprocessor())
+			printk(KERN_NOTICE "Local APIC not detected."
+					   " Using dummy APIC emulation.\n");
+		return -1;
+	}
+
+	/*
+	 * Should not be necessary because the MP table should list the boot
+	 * CPU too, but we do it for the sake of robustness anyway.
+	 */
+	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
+		printk(KERN_NOTICE
+			"weird, boot CPU (#%d) not listed by the BIOS.\n",
+			boot_cpu_physical_apicid);
+		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
+	}
+	preempt_enable();
+
+	/*
+	 * If we couldn't find a local APIC, then get out of here now!
+	 */
+	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
+	    !cpu_has_apic) {
+		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
+			boot_cpu_physical_apicid);
+		printk(KERN_ERR "... forcing use of dummy APIC emulation."
+				"(tell your hw vendor)\n");
+		smpboot_clear_io_apic();
+		return -1;
+	}
+
+	verify_local_APIC();
+
+	/*
+	 * If SMP should be disabled, then really disable it!
+	 */
+	if (!max_cpus) {
+		printk(KERN_INFO "SMP mode deactivated,"
+				 "forcing use of dummy APIC emulation.\n");
+		smpboot_clear_io_apic();
+#ifdef CONFIG_X86_32
+		if (nmi_watchdog == NMI_LOCAL_APIC) {
+			printk(KERN_INFO "activating minimal APIC for"
+					 "NMI watchdog use.\n");
+			connect_bsp_APIC();
+			setup_local_APIC();
+			end_local_APIC_setup();
+		}
+#endif
+		return -1;
+	}
+
+	return 0;
+}
+
+static void __init smp_cpu_index_default(void)
+{
+	int i;
+	struct cpuinfo_x86 *c;
+
+	for_each_cpu_mask(i, cpu_possible_map) {
+		c = &cpu_data(i);
+		/* mark all to hotplug */
+		c->cpu_index = NR_CPUS;
+	}
+}
+
+/*
+ * Prepare for SMP bootup.  The MP table or ACPI has been read
+ * earlier.  Just do some sanity checking here and enable APIC mode.
+ */
+void __init native_smp_prepare_cpus(unsigned int max_cpus)
+{
+	nmi_watchdog_default();
+	smp_cpu_index_default();
+	current_cpu_data = boot_cpu_data;
+	cpu_callin_map = cpumask_of_cpu(0);
+	mb();
+	/*
+	 * Setup boot CPU information
+	 */
+	smp_store_cpu_info(0); /* Final full version of the data */
+	boot_cpu_logical_apicid = logical_smp_processor_id();
+	current_thread_info()->cpu = 0;  /* needed? */
+	set_cpu_sibling_map(0);
+
+	if (smp_sanity_check(max_cpus) < 0) {
+		printk(KERN_INFO "SMP disabled\n");
+		disable_smp();
+		return;
+	}
+
+	preempt_disable();
+	if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) {
+		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
+		     GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid);
+		/* Or can we switch back to PIC here? */
+	}
+	preempt_enable();
+
+#ifdef CONFIG_X86_32
+	connect_bsp_APIC();
+#endif
+	/*
+	 * Switch from PIC to APIC mode.
+	 */
+	setup_local_APIC();
+
+#ifdef CONFIG_X86_64
+	/*
+	 * Enable IO APIC before setting up error vector
+	 */
+	if (!skip_ioapic_setup && nr_ioapics)
+		enable_IO_APIC();
+#endif
+	end_local_APIC_setup();
+
+	map_cpu_to_logical_apicid();
+
+	setup_portio_remap();
+
+	smpboot_setup_io_apic();
+	/*
+	 * Set up local APIC timer on boot CPU.
+	 */
+
+	printk(KERN_INFO "CPU%d: ", 0);
+	print_cpu_info(&cpu_data(0));
+	setup_boot_clock();
+}
+/*
+ * Early setup to make printk work.
+ */
+void __init native_smp_prepare_boot_cpu(void)
+{
+	int me = smp_processor_id();
+#ifdef CONFIG_X86_32
+	init_gdt(me);
+	switch_to_new_gdt();
+#endif
+	/* already set me in cpu_online_map in boot_cpu_init() */
+	cpu_set(me, cpu_callout_map);
+	per_cpu(cpu_state, me) = CPU_ONLINE;
+}
+
 void __init native_smp_cpus_done(unsigned int max_cpus)
 {
+	Dprintk("Boot done.\n");
+
+	impress_friends();
+	smp_checks();
 #ifdef CONFIG_X86_IO_APIC
 	setup_ioapic_dest();
 #endif
+	check_nmi_watchdog();
+#ifdef CONFIG_X86_32
 	zap_low_mappings();
+#endif
 }
 
-void __init smp_intr_init(void)
+#ifdef CONFIG_HOTPLUG_CPU
+
+#  ifdef CONFIG_X86_32
+void cpu_exit_clear(void)
 {
-	/*
-	 * IRQ0 must be given a fixed assignment and initialized,
-	 * because it's used before the IO-APIC is set up.
-	 */
-	set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
+	int cpu = raw_smp_processor_id();
 
-	/*
-	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
-	 * IPI, driven by wakeup.
-	 */
-	set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
+	idle_task_exit();
 
-	/* IPI for invalidation */
-	set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+	cpu_uninit();
+	irq_ctx_exit(cpu);
 
-	/* IPI for generic function call */
-	set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+	cpu_clear(cpu, cpu_callout_map);
+	cpu_clear(cpu, cpu_callin_map);
+
+	unmap_cpu_to_logical_apicid(cpu);
 }
+#  endif /* CONFIG_X86_32 */
+
+void remove_siblinginfo(int cpu)
+{
+	int sibling;
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+	for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+		cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+		/*/
+		 * last thread sibling in this cpu core going down
+		 */
+		if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
+			cpu_data(sibling).booted_cores--;
+	}
+
+	for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
+		cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
+	cpus_clear(per_cpu(cpu_sibling_map, cpu));
+	cpus_clear(per_cpu(cpu_core_map, cpu));
+	c->phys_proc_id = 0;
+	c->cpu_core_id = 0;
+	cpu_clear(cpu, cpu_sibling_setup_map);
+}
+
+int additional_cpus __initdata = -1;
+
+static __init int setup_additional_cpus(char *s)
+{
+	return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
+}
+early_param("additional_cpus", setup_additional_cpus);
+
+/*
+ * cpu_possible_map should be static, it cannot change as cpu's
+ * are onlined, or offlined. The reason is per-cpu data-structures
+ * are allocated by some modules at init time, and dont expect to
+ * do this dynamically on cpu arrival/departure.
+ * cpu_present_map on the other hand can change dynamically.
+ * In case when cpu_hotplug is not compiled, then we resort to current
+ * behaviour, which is cpu_possible == cpu_present.
+ * - Ashok Raj
+ *
+ * Three ways to find out the number of additional hotplug CPUs:
+ * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
+ * - The user can overwrite it with additional_cpus=NUM
+ * - Otherwise don't reserve additional CPUs.
+ * We do this because additional CPUs waste a lot of memory.
+ * -AK
+ */
+__init void prefill_possible_map(void)
+{
+	int i;
+	int possible;
+
+	if (additional_cpus == -1) {
+		if (disabled_cpus > 0)
+			additional_cpus = disabled_cpus;
+		else
+			additional_cpus = 0;
+	}
+	possible = num_processors + additional_cpus;
+	if (possible > NR_CPUS)
+		possible = NR_CPUS;
+
+	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+		possible, max_t(int, possible - num_processors, 0));
+
+	for (i = 0; i < possible; i++)
+		cpu_set(i, cpu_possible_map);
+}
+
+static void __ref remove_cpu_from_maps(int cpu)
+{
+	cpu_clear(cpu, cpu_online_map);
+#ifdef CONFIG_X86_64
+	cpu_clear(cpu, cpu_callout_map);
+	cpu_clear(cpu, cpu_callin_map);
+	/* was set by cpu_init() */
+	clear_bit(cpu, (unsigned long *)&cpu_initialized);
+	clear_node_cpumask(cpu);
+#endif
+}
+
+int __cpu_disable(void)
+{
+	int cpu = smp_processor_id();
+
+	/*
+	 * Perhaps use cpufreq to drop frequency, but that could go
+	 * into generic code.
+	 *
+	 * We won't take down the boot processor on i386 due to some
+	 * interrupts only being able to be serviced by the BSP.
+	 * Especially so if we're not using an IOAPIC	-zwane
+	 */
+	if (cpu == 0)
+		return -EBUSY;
+
+	if (nmi_watchdog == NMI_LOCAL_APIC)
+		stop_apic_nmi_watchdog(NULL);
+	clear_local_APIC();
+
+	/*
+	 * HACK:
+	 * Allow any queued timer interrupts to get serviced
+	 * This is only a temporary solution until we cleanup
+	 * fixup_irqs as we do for IA64.
+	 */
+	local_irq_enable();
+	mdelay(1);
+
+	local_irq_disable();
+	remove_siblinginfo(cpu);
+
+	/* It's now safe to remove this processor from the online map */
+	remove_cpu_from_maps(cpu);
+	fixup_irqs(cpu_online_map);
+	return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+	/* We don't do anything here: idle task is faking death itself. */
+	unsigned int i;
+
+	for (i = 0; i < 10; i++) {
+		/* They ack this in play_dead by setting CPU_DEAD */
+		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+			printk(KERN_INFO "CPU %d is now offline\n", cpu);
+			if (1 == num_online_cpus())
+				alternatives_smp_switch(0);
+			return;
+		}
+		msleep(100);
+	}
+	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+}
+#else /* ... !CONFIG_HOTPLUG_CPU */
+int __cpu_disable(void)
+{
+	return -ENOSYS;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+	/* We said "no" in __cpu_disable */
+	BUG();
+}
+#endif
 
 /*
  * If the BIOS enumerates physical processors before logical,
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
deleted file mode 100644
index 0880f2c..0000000
--- a/arch/x86/kernel/smpboot_64.c
+++ /dev/null
@@ -1,1108 +0,0 @@
-/*
- *	x86 SMP booting functions
- *
- *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
- *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
- *	Copyright 2001 Andi Kleen, SuSE Labs.
- *
- *	Much of the core SMP work is based on previous work by Thomas Radke, to
- *	whom a great many thanks are extended.
- *
- *	Thanks to Intel for making available several different Pentium,
- *	Pentium Pro and Pentium-II/Xeon MP machines.
- *	Original development of Linux SMP code supported by Caldera.
- *
- *	This code is released under the GNU General Public License version 2
- *
- *	Fixes
- *		Felix Koop	:	NR_CPUS used properly
- *		Jose Renau	:	Handle single CPU case.
- *		Alan Cox	:	By repeated request 8) - Total BogoMIP report.
- *		Greg Wright	:	Fix for kernel stacks panic.
- *		Erich Boleyn	:	MP v1.4 and additional changes.
- *	Matthias Sattler	:	Changes for 2.1 kernel map.
- *	Michel Lespinasse	:	Changes for 2.1 kernel map.
- *	Michael Chastain	:	Change trampoline.S to gnu as.
- *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
- *		Ingo Molnar	:	Added APIC timers, based on code
- *					from Jose Renau
- *		Ingo Molnar	:	various cleanups and rewrites
- *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
- *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
- *	Andi Kleen		:	Changed for SMP boot into long mode.
- *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
- *      Andi Kleen              :       Converted to new state machine.
- *					Various cleanups.
- *					Probably mostly hotplug CPU ready now.
- *	Ashok Raj			: CPU hotplug support
- */
-
-
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/kernel_stat.h>
-#include <linux/bootmem.h>
-#include <linux/thread_info.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mc146818rtc.h>
-#include <linux/smp.h>
-#include <linux/kdebug.h>
-
-#include <asm/mtrr.h>
-#include <asm/pgalloc.h>
-#include <asm/desc.h>
-#include <asm/tlbflush.h>
-#include <asm/proto.h>
-#include <asm/nmi.h>
-#include <asm/irq.h>
-#include <asm/hw_irq.h>
-#include <asm/numa.h>
-
-/* Number of siblings per CPU package */
-int smp_num_siblings = 1;
-EXPORT_SYMBOL(smp_num_siblings);
-
-/* Last level cache ID of each logical CPU */
-DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
-
-/* Bitmask of currently online CPUs */
-cpumask_t cpu_online_map __read_mostly;
-
-EXPORT_SYMBOL(cpu_online_map);
-
-/*
- * Private maps to synchronize booting between AP and BP.
- * Probably not needed anymore, but it makes for easier debugging. -AK
- */
-cpumask_t cpu_callin_map;
-cpumask_t cpu_callout_map;
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-
-/* Per CPU bogomips and other parameters */
-DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
-EXPORT_PER_CPU_SYMBOL(cpu_info);
-
-/* Set when the idlers are all forked */
-int smp_threads_ready;
-
-/* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
-EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
-
-/* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_core_map);
-EXPORT_PER_CPU_SYMBOL(cpu_core_map);
-
-/*
- * Trampoline 80x86 program as an array.
- */
-
-extern const unsigned char trampoline_data[];
-extern const unsigned char trampoline_end[];
-
-/* State of each CPU */
-DEFINE_PER_CPU(int, cpu_state) = { 0 };
-
-/*
- * Store all idle threads, this can be reused instead of creating
- * a new thread. Also avoids complicated thread destroy functionality
- * for idle threads.
- */
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
- * removed after init for !CONFIG_HOTPLUG_CPU.
- */
-static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
-#define get_idle_for_cpu(x)     (per_cpu(idle_thread_array, x))
-#define set_idle_for_cpu(x,p)   (per_cpu(idle_thread_array, x) = (p))
-#else
-struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
-#define get_idle_for_cpu(x)     (idle_thread_array[(x)])
-#define set_idle_for_cpu(x,p)   (idle_thread_array[(x)] = (p))
-#endif
-
-
-/*
- * Currently trivial. Write the real->protected mode
- * bootstrap into the page concerned. The caller
- * has made sure it's suitably aligned.
- */
-
-static unsigned long __cpuinit setup_trampoline(void)
-{
-	void *tramp = __va(SMP_TRAMPOLINE_BASE); 
-	memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
-	return virt_to_phys(tramp);
-}
-
-/*
- * The bootstrap kernel entry code has set these up. Save them for
- * a given CPU
- */
-
-static void __cpuinit smp_store_cpu_info(int id)
-{
-	struct cpuinfo_x86 *c = &cpu_data(id);
-
-	*c = boot_cpu_data;
-	c->cpu_index = id;
-	identify_cpu(c);
-	print_cpu_info(c);
-}
-
-static atomic_t init_deasserted __cpuinitdata;
-
-/*
- * Report back to the Boot Processor.
- * Running on AP.
- */
-void __cpuinit smp_callin(void)
-{
-	int cpuid, phys_id;
-	unsigned long timeout;
-
-	/*
-	 * If waken up by an INIT in an 82489DX configuration
-	 * we may get here before an INIT-deassert IPI reaches
-	 * our local APIC.  We have to wait for the IPI or we'll
-	 * lock up on an APIC access.
-	 */
-	while (!atomic_read(&init_deasserted))
-		cpu_relax();
-
-	/*
-	 * (This works even if the APIC is not enabled.)
-	 */
-	phys_id = GET_APIC_ID(apic_read(APIC_ID));
-	cpuid = smp_processor_id();
-	if (cpu_isset(cpuid, cpu_callin_map)) {
-		panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
-					phys_id, cpuid);
-	}
-	Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
-
-	/*
-	 * STARTUP IPIs are fragile beasts as they might sometimes
-	 * trigger some glue motherboard logic. Complete APIC bus
-	 * silence for 1 second, this overestimates the time the
-	 * boot CPU is spending to send the up to 2 STARTUP IPIs
-	 * by a factor of two. This should be enough.
-	 */
-
-	/*
-	 * Waiting 2s total for startup (udelay is not yet working)
-	 */
-	timeout = jiffies + 2*HZ;
-	while (time_before(jiffies, timeout)) {
-		/*
-		 * Has the boot CPU finished it's STARTUP sequence?
-		 */
-		if (cpu_isset(cpuid, cpu_callout_map))
-			break;
-		cpu_relax();
-	}
-
-	if (!time_before(jiffies, timeout)) {
-		panic("smp_callin: CPU%d started up but did not get a callout!\n",
-			cpuid);
-	}
-
-	/*
-	 * the boot CPU has finished the init stage and is spinning
-	 * on callin_map until we finish. We are free to set up this
-	 * CPU, first the APIC. (this is probably redundant on most
-	 * boards)
-	 */
-
-	Dprintk("CALLIN, before setup_local_APIC().\n");
-	setup_local_APIC();
-	end_local_APIC_setup();
-
-	/*
-	 * Get our bogomips.
- 	 *
- 	 * Need to enable IRQs because it can take longer and then
-	 * the NMI watchdog might kill us.
-	 */
-	local_irq_enable();
-	calibrate_delay();
-	local_irq_disable();
-	Dprintk("Stack at about %p\n",&cpuid);
-
-	/*
-	 * Save our processor parameters
-	 */
- 	smp_store_cpu_info(cpuid);
-
-	/*
-	 * Allow the master to continue.
-	 */
-	cpu_set(cpuid, cpu_callin_map);
-}
-
-/* maps the cpu to the sched domain representing multi-core */
-cpumask_t cpu_coregroup_map(int cpu)
-{
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
-	/*
-	 * For perf, we return last level cache shared map.
-	 * And for power savings, we return cpu_core_map
-	 */
-	if (sched_mc_power_savings || sched_smt_power_savings)
-		return per_cpu(cpu_core_map, cpu);
-	else
-		return c->llc_shared_map;
-}
-
-/* representing cpus for which sibling maps can be computed */
-static cpumask_t cpu_sibling_setup_map;
-
-static inline void set_cpu_sibling_map(int cpu)
-{
-	int i;
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-	cpu_set(cpu, cpu_sibling_setup_map);
-
-	if (smp_num_siblings > 1) {
-		for_each_cpu_mask(i, cpu_sibling_setup_map) {
-			if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
-			    c->cpu_core_id == cpu_data(i).cpu_core_id) {
-				cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-				cpu_set(cpu, per_cpu(cpu_sibling_map, i));
-				cpu_set(i, per_cpu(cpu_core_map, cpu));
-				cpu_set(cpu, per_cpu(cpu_core_map, i));
-				cpu_set(i, c->llc_shared_map);
-				cpu_set(cpu, cpu_data(i).llc_shared_map);
-			}
-		}
-	} else {
-		cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
-	}
-
-	cpu_set(cpu, c->llc_shared_map);
-
-	if (current_cpu_data.x86_max_cores == 1) {
-		per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
-		c->booted_cores = 1;
-		return;
-	}
-
-	for_each_cpu_mask(i, cpu_sibling_setup_map) {
-		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
-		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-			cpu_set(i, c->llc_shared_map);
-			cpu_set(cpu, cpu_data(i).llc_shared_map);
-		}
-		if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
-			cpu_set(i, per_cpu(cpu_core_map, cpu));
-			cpu_set(cpu, per_cpu(cpu_core_map, i));
-			/*
-			 *  Does this new cpu bringup a new core?
-			 */
-			if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
-				/*
-				 * for each core in package, increment
-				 * the booted_cores for this new cpu
-				 */
-				if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
-					c->booted_cores++;
-				/*
-				 * increment the core count for all
-				 * the other cpus in this package
-				 */
-				if (i != cpu)
-					cpu_data(i).booted_cores++;
-			} else if (i != cpu && !c->booted_cores)
-				c->booted_cores = cpu_data(i).booted_cores;
-		}
-	}
-}
-
-/*
- * Setup code on secondary processor (after comming out of the trampoline)
- */
-void __cpuinit start_secondary(void)
-{
-	/*
-	 * Dont put anything before smp_callin(), SMP
-	 * booting is too fragile that we want to limit the
-	 * things done here to the most necessary things.
-	 */
-	cpu_init();
-	preempt_disable();
-	smp_callin();
-
-	/* otherwise gcc will move up the smp_processor_id before the cpu_init */
-	barrier();
-
-	/*
-  	 * Check TSC sync first:
- 	 */
-	check_tsc_sync_target();
-
-	if (nmi_watchdog == NMI_IO_APIC) {
-		disable_8259A_irq(0);
-		enable_NMI_through_LVT0();
-		enable_8259A_irq(0);
-	}
-
-	/*
-	 * The sibling maps must be set before turing the online map on for
-	 * this cpu
-	 */
-	set_cpu_sibling_map(smp_processor_id());
-
-	/*
-	 * We need to hold call_lock, so there is no inconsistency
-	 * between the time smp_call_function() determines number of
-	 * IPI recipients, and the time when the determination is made
-	 * for which cpus receive the IPI in genapic_flat.c. Holding this
-	 * lock helps us to not include this cpu in a currently in progress
-	 * smp_call_function().
-	 */
-	lock_ipi_call_lock();
-	spin_lock(&vector_lock);
-
-	/* Setup the per cpu irq handling data structures */
-	__setup_vector_irq(smp_processor_id());
-	/*
-	 * Allow the master to continue.
-	 */
-	cpu_set(smp_processor_id(), cpu_online_map);
-	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
-	spin_unlock(&vector_lock);
-
-	unlock_ipi_call_lock();
-
-	setup_secondary_clock();
-
-	cpu_idle();
-}
-
-extern volatile unsigned long init_rsp;
-extern void (*initial_code)(void);
-
-#ifdef APIC_DEBUG
-static void inquire_remote_apic(int apicid)
-{
-	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
-	char *names[] = { "ID", "VERSION", "SPIV" };
-	int timeout;
-	u32 status;
-
-	printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
-
-	for (i = 0; i < ARRAY_SIZE(regs); i++) {
-		printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
-
-		/*
-		 * Wait for idle.
-		 */
-		status = safe_apic_wait_icr_idle();
-		if (status)
-			printk(KERN_CONT
-			       "a previous APIC delivery may have failed\n");
-
-		apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
-		apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
-
-		timeout = 0;
-		do {
-			udelay(100);
-			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
-		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
-
-		switch (status) {
-		case APIC_ICR_RR_VALID:
-			status = apic_read(APIC_RRR);
-			printk(KERN_CONT "%08x\n", status);
-			break;
-		default:
-			printk(KERN_CONT "failed\n");
-		}
-	}
-}
-#endif
-
-/*
- * Kick the secondary to wake up.
- */
-static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
-{
-	unsigned long send_status, accept_status = 0;
-	int maxlvt, num_starts, j;
-
-	Dprintk("Asserting INIT.\n");
-
-	/*
-	 * Turn INIT on target chip
-	 */
-	apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
-	/*
-	 * Send IPI
-	 */
-	apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
-				| APIC_DM_INIT);
-
-	Dprintk("Waiting for send to finish...\n");
-	send_status = safe_apic_wait_icr_idle();
-
-	mdelay(10);
-
-	Dprintk("Deasserting INIT.\n");
-
-	/* Target chip */
-	apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
-	/* Send IPI */
-	apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
-
-	Dprintk("Waiting for send to finish...\n");
-	send_status = safe_apic_wait_icr_idle();
-
-	mb();
-	atomic_set(&init_deasserted, 1);
-
-	num_starts = 2;
-
-	/*
-	 * Run STARTUP IPI loop.
-	 */
-	Dprintk("#startup loops: %d.\n", num_starts);
-
-	maxlvt = lapic_get_maxlvt();
-
-	for (j = 1; j <= num_starts; j++) {
-		Dprintk("Sending STARTUP #%d.\n",j);
-		apic_write(APIC_ESR, 0);
-		apic_read(APIC_ESR);
-		Dprintk("After apic_write.\n");
-
-		/*
-		 * STARTUP IPI
-		 */
-
-		/* Target chip */
-		apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
-		/* Boot on the stack */
-		/* Kick the second */
-		apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12));
-
-		/*
-		 * Give the other CPU some time to accept the IPI.
-		 */
-		udelay(300);
-
-		Dprintk("Startup point 1.\n");
-
-		Dprintk("Waiting for send to finish...\n");
-		send_status = safe_apic_wait_icr_idle();
-
-		/*
-		 * Give the other CPU some time to accept the IPI.
-		 */
-		udelay(200);
-		/*
-		 * Due to the Pentium erratum 3AP.
-		 */
-		if (maxlvt > 3) {
-			apic_write(APIC_ESR, 0);
-		}
-		accept_status = (apic_read(APIC_ESR) & 0xEF);
-		if (send_status || accept_status)
-			break;
-	}
-	Dprintk("After Startup.\n");
-
-	if (send_status)
-		printk(KERN_ERR "APIC never delivered???\n");
-	if (accept_status)
-		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
-
-	return (send_status | accept_status);
-}
-
-struct create_idle {
-	struct work_struct work;
-	struct task_struct *idle;
-	struct completion done;
-	int cpu;
-};
-
-static void __cpuinit do_fork_idle(struct work_struct *work)
-{
-	struct create_idle *c_idle =
-		container_of(work, struct create_idle, work);
-
-	c_idle->idle = fork_idle(c_idle->cpu);
-	complete(&c_idle->done);
-}
-
-/*
- * Boot one CPU.
- */
-static int __cpuinit do_boot_cpu(int cpu, int apicid)
-{
-	unsigned long boot_error;
-	int timeout;
-	unsigned long start_rip;
-	struct create_idle c_idle = {
-		.cpu = cpu,
-		.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
-	};
-	INIT_WORK(&c_idle.work, do_fork_idle);
-
-	/* allocate memory for gdts of secondary cpus. Hotplug is considered */
-	if (!cpu_gdt_descr[cpu].address &&
-		!(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
-		printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
-		return -1;
-	}
-
-	/* Allocate node local memory for AP pdas */
-	if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
-		struct x8664_pda *newpda, *pda;
-		int node = cpu_to_node(cpu);
-		pda = cpu_pda(cpu);
-		newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,
-				      node);
-		if (newpda) {
-			memcpy(newpda, pda, sizeof (struct x8664_pda));
-			cpu_pda(cpu) = newpda;
-		} else
-			printk(KERN_ERR
-		"Could not allocate node local PDA for CPU %d on node %d\n",
-				cpu, node);
-	}
-
-	alternatives_smp_switch(1);
-
-	c_idle.idle = get_idle_for_cpu(cpu);
-
-	if (c_idle.idle) {
-		c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
-			(THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);
-		init_idle(c_idle.idle, cpu);
-		goto do_rest;
-	}
-
-	/*
-	 * During cold boot process, keventd thread is not spun up yet.
-	 * When we do cpu hot-add, we create idle threads on the fly, we should
-	 * not acquire any attributes from the calling context. Hence the clean
-	 * way to create kernel_threads() is to do that from keventd().
-	 * We do the current_is_keventd() due to the fact that ACPI notifier
-	 * was also queuing to keventd() and when the caller is already running
-	 * in context of keventd(), we would end up with locking up the keventd
-	 * thread.
-	 */
-	if (!keventd_up() || current_is_keventd())
-		c_idle.work.func(&c_idle.work);
-	else {
-		schedule_work(&c_idle.work);
-		wait_for_completion(&c_idle.done);
-	}
-
-	if (IS_ERR(c_idle.idle)) {
-		printk("failed fork for CPU %d\n", cpu);
-		return PTR_ERR(c_idle.idle);
-	}
-
-	set_idle_for_cpu(cpu, c_idle.idle);
-
-do_rest:
-
-	cpu_pda(cpu)->pcurrent = c_idle.idle;
-
-	start_rip = setup_trampoline();
-
-	init_rsp = c_idle.idle->thread.sp;
-	load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread);
-	initial_code = start_secondary;
-	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
-
-	printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
-		cpus_weight(cpu_present_map),
-		apicid);
-
-	/*
-	 * This grunge runs the startup process for
-	 * the targeted processor.
-	 */
-
-	atomic_set(&init_deasserted, 0);
-
-	Dprintk("Setting warm reset code and vector.\n");
-
-	CMOS_WRITE(0xa, 0xf);
-	local_flush_tlb();
-	Dprintk("1.\n");
-	*((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
-	Dprintk("2.\n");
-	*((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
-	Dprintk("3.\n");
-
-	/*
-	 * Be paranoid about clearing APIC errors.
-	 */
-	apic_write(APIC_ESR, 0);
-	apic_read(APIC_ESR);
-
-	/*
-	 * Status is now clean
-	 */
-	boot_error = 0;
-
-	/*
-	 * Starting actual IPI sequence...
-	 */
-	boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
-
-	if (!boot_error) {
-		/*
-		 * allow APs to start initializing.
-		 */
-		Dprintk("Before Callout %d.\n", cpu);
-		cpu_set(cpu, cpu_callout_map);
-		Dprintk("After Callout %d.\n", cpu);
-
-		/*
-		 * Wait 5s total for a response
-		 */
-		for (timeout = 0; timeout < 50000; timeout++) {
-			if (cpu_isset(cpu, cpu_callin_map))
-				break;	/* It has booted */
-			udelay(100);
-		}
-
-		if (cpu_isset(cpu, cpu_callin_map)) {
-			/* number CPUs logically, starting from 1 (BSP is 0) */
-			Dprintk("CPU has booted.\n");
-		} else {
-			boot_error = 1;
-			if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
-					== 0xA5)
-				/* trampoline started but...? */
-				printk("Stuck ??\n");
-			else
-				/* trampoline code not run */
-				printk("Not responding.\n");
-#ifdef APIC_DEBUG
-			inquire_remote_apic(apicid);
-#endif
-		}
-	}
-	if (boot_error) {
-		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
-		clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */
-		clear_node_cpumask(cpu); /* was set by numa_add_cpu */
-		cpu_clear(cpu, cpu_present_map);
-		cpu_clear(cpu, cpu_possible_map);
-		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
-		return -EIO;
-	}
-
-	return 0;
-}
-
-cycles_t cacheflush_time;
-unsigned long cache_decay_ticks;
-
-/*
- * Cleanup possible dangling ends...
- */
-static __cpuinit void smp_cleanup_boot(void)
-{
-	/*
-	 * Paranoid:  Set warm reset code and vector here back
-	 * to default values.
-	 */
-	CMOS_WRITE(0, 0xf);
-
-	/*
-	 * Reset trampoline flag
-	 */
-	*((volatile int *) phys_to_virt(0x467)) = 0;
-}
-
-/*
- * Fall back to non SMP mode after errors.
- *
- * RED-PEN audit/test this more. I bet there is more state messed up here.
- */
-static __init void disable_smp(void)
-{
-	cpu_present_map = cpumask_of_cpu(0);
-	cpu_possible_map = cpumask_of_cpu(0);
-	if (smp_found_config)
-		phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
-	else
-		phys_cpu_present_map = physid_mask_of_physid(0);
-	cpu_set(0, per_cpu(cpu_sibling_map, 0));
-	cpu_set(0, per_cpu(cpu_core_map, 0));
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-int additional_cpus __initdata = -1;
-
-/*
- * cpu_possible_map should be static, it cannot change as cpu's
- * are onlined, or offlined. The reason is per-cpu data-structures
- * are allocated by some modules at init time, and dont expect to
- * do this dynamically on cpu arrival/departure.
- * cpu_present_map on the other hand can change dynamically.
- * In case when cpu_hotplug is not compiled, then we resort to current
- * behaviour, which is cpu_possible == cpu_present.
- * - Ashok Raj
- *
- * Three ways to find out the number of additional hotplug CPUs:
- * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
- * - The user can overwrite it with additional_cpus=NUM
- * - Otherwise don't reserve additional CPUs.
- * We do this because additional CPUs waste a lot of memory.
- * -AK
- */
-__init void prefill_possible_map(void)
-{
-	int i;
-	int possible;
-
- 	if (additional_cpus == -1) {
- 		if (disabled_cpus > 0)
- 			additional_cpus = disabled_cpus;
- 		else
-			additional_cpus = 0;
- 	}
-	possible = num_processors + additional_cpus;
-	if (possible > NR_CPUS) 
-		possible = NR_CPUS;
-
-	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
-		possible,
-	        max_t(int, possible - num_processors, 0));
-
-	for (i = 0; i < possible; i++)
-		cpu_set(i, cpu_possible_map);
-}
-#endif
-
-/*
- * Various sanity checks.
- */
-static int __init smp_sanity_check(unsigned max_cpus)
-{
-	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
-		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
-		       hard_smp_processor_id());
-		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
-	}
-
-	/*
-	 * If we couldn't find an SMP configuration at boot time,
-	 * get out of here now!
-	 */
-	if (!smp_found_config) {
-		printk(KERN_NOTICE "SMP motherboard not detected.\n");
-		disable_smp();
-		if (APIC_init_uniprocessor())
-			printk(KERN_NOTICE "Local APIC not detected."
-					   " Using dummy APIC emulation.\n");
-		return -1;
-	}
-
-	/*
-	 * Should not be necessary because the MP table should list the boot
-	 * CPU too, but we do it for the sake of robustness anyway.
-	 */
-	if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
-		printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
-								 boot_cpu_id);
-		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
-	}
-
-	/*
-	 * If we couldn't find a local APIC, then get out of here now!
-	 */
-	if (!cpu_has_apic) {
-		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
-			boot_cpu_id);
-		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
-		nr_ioapics = 0;
-		return -1;
-	}
-
-	/*
-	 * If SMP should be disabled, then really disable it!
-	 */
-	if (!max_cpus) {
-		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
-		nr_ioapics = 0;
-		return -1;
-	}
-
-	return 0;
-}
-
-static void __init smp_cpu_index_default(void)
-{
-	int i;
-	struct cpuinfo_x86 *c;
-
-	for_each_cpu_mask(i, cpu_possible_map) {
-		c = &cpu_data(i);
-		/* mark all to hotplug */
-		c->cpu_index = NR_CPUS;
-	}
-}
-
-/*
- * Prepare for SMP bootup.  The MP table or ACPI has been read
- * earlier.  Just do some sanity checking here and enable APIC mode.
- */
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
-	nmi_watchdog_default();
-	smp_cpu_index_default();
-	current_cpu_data = boot_cpu_data;
-	current_thread_info()->cpu = 0;  /* needed? */
-	set_cpu_sibling_map(0);
-
-	if (smp_sanity_check(max_cpus) < 0) {
-		printk(KERN_INFO "SMP disabled\n");
-		disable_smp();
-		return;
-	}
-
-
-	/*
-	 * Switch from PIC to APIC mode.
-	 */
-	setup_local_APIC();
-
-	/*
-	 * Enable IO APIC before setting up error vector
-	 */
-	if (!skip_ioapic_setup && nr_ioapics)
-		enable_IO_APIC();
-	end_local_APIC_setup();
-
-	if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
-		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
-		      GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
-		/* Or can we switch back to PIC here? */
-	}
-
-	/*
-	 * Now start the IO-APICs
-	 */
-	if (!skip_ioapic_setup && nr_ioapics)
-		setup_IO_APIC();
-	else
-		nr_ioapics = 0;
-
-	/*
-	 * Set up local APIC timer on boot CPU.
-	 */
-
-	setup_boot_clock();
-}
-
-/*
- * Early setup to make printk work.
- */
-void __init smp_prepare_boot_cpu(void)
-{
-	int me = smp_processor_id();
-	/* already set me in cpu_online_map in boot_cpu_init() */
-	cpu_set(me, cpu_callout_map);
-	per_cpu(cpu_state, me) = CPU_ONLINE;
-}
-
-/*
- * Entry point to boot a CPU.
- */
-int __cpuinit __cpu_up(unsigned int cpu)
-{
-	int apicid = cpu_present_to_apicid(cpu);
-	unsigned long flags;
-	int err;
-
-	WARN_ON(irqs_disabled());
-
-	Dprintk("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
-
-	if (apicid == BAD_APICID || apicid == boot_cpu_id ||
-	    !physid_isset(apicid, phys_cpu_present_map)) {
-		printk("__cpu_up: bad cpu %d\n", cpu);
-		return -EINVAL;
-	}
-
-	/*
-	 * Already booted CPU?
-	 */
- 	if (cpu_isset(cpu, cpu_callin_map)) {
-		Dprintk("do_boot_cpu %d Already started\n", cpu);
- 		return -ENOSYS;
-	}
-
-	/*
-	 * Save current MTRR state in case it was changed since early boot
-	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
-	 */
-	mtrr_save_state();
-
-	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-	/* Boot it! */
-	err = do_boot_cpu(cpu, apicid);
-	if (err < 0) {
-		Dprintk("do_boot_cpu failed %d\n", err);
-		return err;
-	}
-
-	/* Unleash the CPU! */
-	Dprintk("waiting for cpu %d\n", cpu);
-
-	/*
-  	 * Make sure and check TSC sync:
- 	 */
-	local_irq_save(flags);
-	check_tsc_sync_source(cpu);
-	local_irq_restore(flags);
-
-	while (!cpu_isset(cpu, cpu_online_map))
-		cpu_relax();
-	err = 0;
-
-	return err;
-}
-
-/*
- * Finish the SMP boot.
- */
-void __init smp_cpus_done(unsigned int max_cpus)
-{
-	smp_cleanup_boot();
-	setup_ioapic_dest();
-	check_nmi_watchdog();
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void remove_siblinginfo(int cpu)
-{
-	int sibling;
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-	for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
-		cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
-		/*
-		 * last thread sibling in this cpu core going down
-		 */
-		if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
-			cpu_data(sibling).booted_cores--;
-	}
-			
-	for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-		cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-	cpus_clear(per_cpu(cpu_sibling_map, cpu));
-	cpus_clear(per_cpu(cpu_core_map, cpu));
-	c->phys_proc_id = 0;
-	c->cpu_core_id = 0;
-	cpu_clear(cpu, cpu_sibling_setup_map);
-}
-
-static void __ref remove_cpu_from_maps(void)
-{
-	int cpu = smp_processor_id();
-
-	cpu_clear(cpu, cpu_callout_map);
-	cpu_clear(cpu, cpu_callin_map);
-	clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */
-	clear_node_cpumask(cpu);
-}
-
-int __cpu_disable(void)
-{
-	int cpu = smp_processor_id();
-
-	/*
-	 * Perhaps use cpufreq to drop frequency, but that could go
-	 * into generic code.
- 	 *
-	 * We won't take down the boot processor on i386 due to some
-	 * interrupts only being able to be serviced by the BSP.
-	 * Especially so if we're not using an IOAPIC	-zwane
-	 */
-	if (cpu == 0)
-		return -EBUSY;
-
-	if (nmi_watchdog == NMI_LOCAL_APIC)
-		stop_apic_nmi_watchdog(NULL);
-	clear_local_APIC();
-
-	/*
-	 * HACK:
-	 * Allow any queued timer interrupts to get serviced
-	 * This is only a temporary solution until we cleanup
-	 * fixup_irqs as we do for IA64.
-	 */
-	local_irq_enable();
-	mdelay(1);
-
-	local_irq_disable();
-	remove_siblinginfo(cpu);
-
-	spin_lock(&vector_lock);
-	/* It's now safe to remove this processor from the online map */
-	cpu_clear(cpu, cpu_online_map);
-	spin_unlock(&vector_lock);
-	remove_cpu_from_maps();
-	fixup_irqs(cpu_online_map);
-	return 0;
-}
-
-void __cpu_die(unsigned int cpu)
-{
-	/* We don't do anything here: idle task is faking death itself. */
-	unsigned int i;
-
-	for (i = 0; i < 10; i++) {
-		/* They ack this in play_dead by setting CPU_DEAD */
-		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-			printk ("CPU %d is now offline\n", cpu);
-			if (1 == num_online_cpus())
-				alternatives_smp_switch(0);
-			return;
-		}
-		msleep(100);
-	}
- 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
-}
-
-static __init int setup_additional_cpus(char *s)
-{
-	return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
-}
-early_param("additional_cpus", setup_additional_cpus);
-
-#else /* ... !CONFIG_HOTPLUG_CPU */
-
-int __cpu_disable(void)
-{
-	return -ENOSYS;
-}
-
-void __cpu_die(unsigned int cpu)
-{
-	/* We said "no" in __cpu_disable */
-	BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
new file mode 100644
index 0000000..3449064
--- /dev/null
+++ b/arch/x86/kernel/smpcommon.c
@@ -0,0 +1,83 @@
+/*
+ * SMP stuff which is common to all sub-architectures.
+ */
+#include <linux/module.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_X86_32
+DEFINE_PER_CPU(unsigned long, this_cpu_off);
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+/* Initialize the CPU's GDT.  This is either the boot CPU doing itself
+   (still using the master per-cpu area), or a CPU doing it for a
+   secondary which will soon come up. */
+__cpuinit void init_gdt(int cpu)
+{
+	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
+	pack_descriptor(&gdt[GDT_ENTRY_PERCPU],
+			__per_cpu_offset[cpu], 0xFFFFF,
+			0x2 | DESCTYPE_S, 0x8);
+
+	gdt[GDT_ENTRY_PERCPU].s = 1;
+
+	per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
+	per_cpu(cpu_number, cpu) = cpu;
+}
+#endif
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+		      int wait)
+{
+	return smp_call_function_mask(cpu_online_map, func, info, wait);
+}
+EXPORT_SYMBOL(smp_call_function);
+
+/**
+ * smp_call_function_single - Run a function on a specific CPU
+ * @cpu: The target CPU.  Cannot be the calling CPU.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+			     int nonatomic, int wait)
+{
+	/* prevent preemption and reschedule on another processor */
+	int ret;
+	int me = get_cpu();
+	if (cpu == me) {
+		local_irq_disable();
+		func(info);
+		local_irq_enable();
+		put_cpu();
+		return 0;
+	}
+
+	ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
+	put_cpu();
+	return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
diff --git a/arch/x86/kernel/smpcommon_32.c b/arch/x86/kernel/smpcommon_32.c
index 8bc38af..8b13789 100644
--- a/arch/x86/kernel/smpcommon_32.c
+++ b/arch/x86/kernel/smpcommon_32.c
@@ -1,82 +1 @@
-/*
- * SMP stuff which is common to all sub-architectures.
- */
-#include <linux/module.h>
-#include <asm/smp.h>
 
-DEFINE_PER_CPU(unsigned long, this_cpu_off);
-EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-
-/* Initialize the CPU's GDT.  This is either the boot CPU doing itself
-   (still using the master per-cpu area), or a CPU doing it for a
-   secondary which will soon come up. */
-__cpuinit void init_gdt(int cpu)
-{
-	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
-
-	pack_descriptor(&gdt[GDT_ENTRY_PERCPU],
-			__per_cpu_offset[cpu], 0xFFFFF,
-			0x2 | DESCTYPE_S, 0x8);
-
-	gdt[GDT_ENTRY_PERCPU].s = 1;
-
-	per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
-	per_cpu(cpu_number, cpu) = cpu;
-}
-
-
-/**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
-		      int wait)
-{
-	return smp_call_function_mask(cpu_online_map, func, info, wait);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/**
- * smp_call_function_single - Run a function on a specific CPU
- * @cpu: The target CPU.  Cannot be the calling CPU.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
-			     int nonatomic, int wait)
-{
-	/* prevent preemption and reschedule on another processor */
-	int ret;
-	int me = get_cpu();
-	if (cpu == me) {
-		local_irq_disable();
-		func(info);
-		local_irq_enable();
-		put_cpu();
-		return 0;
-	}
-
-	ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
-
-	put_cpu();
-	return ret;
-}
-EXPORT_SYMBOL(smp_call_function_single);
diff --git a/arch/x86/kernel/srat_32.c b/arch/x86/kernel/srat_32.c
index b72e613..70e4a37 100644
--- a/arch/x86/kernel/srat_32.c
+++ b/arch/x86/kernel/srat_32.c
@@ -277,14 +277,14 @@
 	rsdp_address = acpi_os_get_root_pointer();
 	if (!rsdp_address) {
 		printk("%s: System description tables not found\n",
-		       __FUNCTION__);
+		       __func__);
 		goto out_err;
 	}
 
-	printk("%s: assigning address to rsdp\n", __FUNCTION__);
+	printk("%s: assigning address to rsdp\n", __func__);
 	rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
 	if (!rsdp) {
-		printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
+		printk("%s: Didn't find ACPI root!\n", __func__);
 		goto out_err;
 	}
 
@@ -292,7 +292,7 @@
 		rsdp->oem_id);
 
 	if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
-		printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
+		printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __func__);
 		goto out_err;
 	}
 
@@ -302,7 +302,7 @@
 	if (!rsdt) {
 		printk(KERN_WARNING
 		       "%s: ACPI: Invalid root system description tables (RSDT)\n",
-		       __FUNCTION__);
+		       __func__);
 		goto out_err;
 	}
 
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 071ff47..92c20fe 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -148,7 +148,7 @@
 	if (child != current)
 		return;
 
-	wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
+	update_debugctlmsr(val);
 }
 
 /*
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
index 72f4634..6878a9c 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/summit_32.c
@@ -35,43 +35,47 @@
 static struct scal_detail   *scal_devs[MAX_NUMNODES] __initdata;
 static struct rio_detail    *rio_devs[MAX_NUMNODES*4] __initdata;
 
+static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata;
+
 static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
 {
 	int twister = 0, node = 0;
 	int i, bus, num_buses;
 
-	for(i = 0; i < rio_table_hdr->num_rio_dev; i++){
-		if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id){
+	for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
+		if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) {
 			twister = rio_devs[i]->owner_id;
 			break;
 		}
 	}
-	if (i == rio_table_hdr->num_rio_dev){
-		printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __FUNCTION__);
+	if (i == rio_table_hdr->num_rio_dev) {
+		printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__);
 		return last_bus;
 	}
 
-	for(i = 0; i < rio_table_hdr->num_scal_dev; i++){
-		if (scal_devs[i]->node_id == twister){
+	for (i = 0; i < rio_table_hdr->num_scal_dev; i++) {
+		if (scal_devs[i]->node_id == twister) {
 			node = scal_devs[i]->node_id;
 			break;
 		}
 	}
-	if (i == rio_table_hdr->num_scal_dev){
-		printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __FUNCTION__);
+	if (i == rio_table_hdr->num_scal_dev) {
+		printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__);
 		return last_bus;
 	}
 
-	switch (rio_devs[wpeg_num]->type){
+	switch (rio_devs[wpeg_num]->type) {
 	case CompatWPEG:
-		/* The Compatibility Winnipeg controls the 2 legacy buses,
+		/*
+		 * The Compatibility Winnipeg controls the 2 legacy buses,
 		 * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case
 		 * a PCI-PCI bridge card is used in either slot: total 5 buses.
 		 */
 		num_buses = 5;
 		break;
 	case AltWPEG:
-		/* The Alternate Winnipeg controls the 2 133MHz buses [1 slot
+		/*
+		 * The Alternate Winnipeg controls the 2 133MHz buses [1 slot
 		 * each], their 2 "extra" buses, the 100MHz bus [2 slots] and
 		 * the "extra" buses for each of those slots: total 7 buses.
 		 */
@@ -79,17 +83,18 @@
 		break;
 	case LookOutAWPEG:
 	case LookOutBWPEG:
-		/* A Lookout Winnipeg controls 3 100MHz buses [2 slots each]
+		/*
+		 * A Lookout Winnipeg controls 3 100MHz buses [2 slots each]
 		 * & the "extra" buses for each of those slots: total 9 buses.
 		 */
 		num_buses = 9;
 		break;
 	default:
-		printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __FUNCTION__);
+		printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__);
 		return last_bus;
 	}
 
-	for(bus = last_bus; bus < last_bus + num_buses; bus++)
+	for (bus = last_bus; bus < last_bus + num_buses; bus++)
 		mp_bus_id_to_node[bus] = node;
 	return bus;
 }
@@ -99,14 +104,14 @@
 	unsigned long ptr;
 	int i, scal_detail_size, rio_detail_size;
 
-	if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){
-		printk(KERN_WARNING "%s: MAX_NUMNODES too low!  Defined as %d, but system has %d nodes.\n", __FUNCTION__, MAX_NUMNODES, rio_table_hdr->num_scal_dev);
+	if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) {
+		printk(KERN_WARNING "%s: MAX_NUMNODES too low!  Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev);
 		return 0;
 	}
 
-	switch (rio_table_hdr->version){
+	switch (rio_table_hdr->version) {
 	default:
-		printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __FUNCTION__, rio_table_hdr->version);
+		printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version);
 		return 0;
 	case 2:
 		scal_detail_size = 11;
@@ -119,10 +124,10 @@
 	}
 
 	ptr = (unsigned long)rio_table_hdr + 3;
-	for(i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size)
+	for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size)
 		scal_devs[i] = (struct scal_detail *)ptr;
 
-	for(i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size)
+	for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size)
 		rio_devs[i] = (struct rio_detail *)ptr;
 
 	return 1;
@@ -140,9 +145,9 @@
 
 	rio_table_hdr = NULL;
 	offset = 0x180;
-	while (offset){
+	while (offset) {
 		/* The block id is stored in the 2nd word */
-		if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){
+		if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) {
 			/* set the pointer past the offset & block id */
 			rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
 			break;
@@ -150,8 +155,8 @@
 		/* The next offset is stored in the 1st word.  0 means no more */
 		offset = *((unsigned short *)(ptr + offset));
 	}
-	if (!rio_table_hdr){
-		printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __FUNCTION__);
+	if (!rio_table_hdr) {
+		printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__);
 		return;
 	}
 
@@ -161,8 +166,8 @@
 	/* The first Winnipeg we're looking for has an index of 0 */
 	next_wpeg = 0;
 	do {
-		for(i = 0; i < rio_table_hdr->num_rio_dev; i++){
-			if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg){
+		for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
+			if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) {
 				/* It's the Winnipeg we're looking for! */
 				next_bus = setup_pci_node_map_for_wpeg(i, next_bus);
 				next_wpeg++;
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 9d498c2..170d43c 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -1,4 +1,4 @@
-/* System call table for x86-64. */ 
+/* System call table for x86-64. */
 
 #include <linux/linkage.h>
 #include <linux/sys.h>
@@ -7,20 +7,23 @@
 
 #define __NO_STUBS
 
-#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; 
+#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
 #undef _ASM_X86_64_UNISTD_H_
 #include <asm/unistd_64.h>
 
 #undef __SYSCALL
-#define __SYSCALL(nr, sym) [ nr ] = sym, 
+#define __SYSCALL(nr, sym) [nr] = sym,
 #undef _ASM_X86_64_UNISTD_H_
 
-typedef void (*sys_call_ptr_t)(void); 
+typedef void (*sys_call_ptr_t)(void);
 
 extern void sys_ni_syscall(void);
 
 const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
-	/* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */ 
+	/*
+	*Smells like a like a compiler bug -- it doesn't work
+	*when the & below is removed.
+	*/
 	[0 ... __NR_syscall_max] = &sys_ni_syscall,
 #include <asm/unistd_64.h>
 };
diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c
index 10b8a6f..787a5e4 100644
--- a/arch/x86/kernel/test_nx.c
+++ b/arch/x86/kernel/test_nx.c
@@ -11,6 +11,8 @@
  */
 #include <linux/module.h>
 #include <linux/sort.h>
+#include <linux/slab.h>
+
 #include <asm/uaccess.h>
 #include <asm/asm.h>
 
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
new file mode 100644
index 0000000..9bb2363
--- /dev/null
+++ b/arch/x86/kernel/tlb_32.c
@@ -0,0 +1,243 @@
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+
+#include <asm/tlbflush.h>
+
+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
+			____cacheline_aligned = { &init_mm, 0, };
+
+/* must come after the send_IPI functions above for inlining */
+#include <mach_ipi.h>
+
+/*
+ *	Smarter SMP flushing macros.
+ *		c/o Linus Torvalds.
+ *
+ *	These mean you can really definitely utterly forget about
+ *	writing to user space from interrupts. (Its not allowed anyway).
+ *
+ *	Optimizations Manfred Spraul <manfred@colorfullife.com>
+ */
+
+static cpumask_t flush_cpumask;
+static struct mm_struct *flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+
+/*
+ * We cannot call mmdrop() because we are in interrupt context,
+ * instead update mm->cpu_vm_mask.
+ *
+ * We need to reload %cr3 since the page tables may be going
+ * away from under us..
+ */
+void leave_mm(int cpu)
+{
+	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
+		BUG();
+	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
+	load_cr3(swapper_pg_dir);
+}
+EXPORT_SYMBOL_GPL(leave_mm);
+
+/*
+ *
+ * The flush IPI assumes that a thread switch happens in this order:
+ * [cpu0: the cpu that switches]
+ * 1) switch_mm() either 1a) or 1b)
+ * 1a) thread switch to a different mm
+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
+ * 	Stop ipi delivery for the old mm. This is not synchronized with
+ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
+ * 	for the wrong mm, and in the worst case we perform a superfluous
+ * 	tlb flush.
+ * 1a2) set cpu_tlbstate to TLBSTATE_OK
+ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
+ *	was in lazy tlb mode.
+ * 1a3) update cpu_tlbstate[].active_mm
+ * 	Now cpu0 accepts tlb flushes for the new mm.
+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
+ * 	Now the other cpus will send tlb flush ipis.
+ * 1a4) change cr3.
+ * 1b) thread switch without mm change
+ *	cpu_tlbstate[].active_mm is correct, cpu0 already handles
+ *	flush ipis.
+ * 1b1) set cpu_tlbstate to TLBSTATE_OK
+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
+ * 	Atomically set the bit [other cpus will start sending flush ipis],
+ * 	and test the bit.
+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
+ * 2) switch %%esp, ie current
+ *
+ * The interrupt must handle 2 special cases:
+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
+ *   runs in kernel space, the cpu could load tlb entries for user space
+ *   pages.
+ *
+ * The good news is that cpu_tlbstate is local to each cpu, no
+ * write/read ordering problems.
+ */
+
+/*
+ * TLB flush IPI:
+ *
+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
+ * 2) Leave the mm if we are in the lazy tlb mode.
+ */
+
+void smp_invalidate_interrupt(struct pt_regs *regs)
+{
+	unsigned long cpu;
+
+	cpu = get_cpu();
+
+	if (!cpu_isset(cpu, flush_cpumask))
+		goto out;
+		/*
+		 * This was a BUG() but until someone can quote me the
+		 * line from the intel manual that guarantees an IPI to
+		 * multiple CPUs is retried _only_ on the erroring CPUs
+		 * its staying as a return
+		 *
+		 * BUG();
+		 */
+
+	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
+		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
+			if (flush_va == TLB_FLUSH_ALL)
+				local_flush_tlb();
+			else
+				__flush_tlb_one(flush_va);
+		} else
+			leave_mm(cpu);
+	}
+	ack_APIC_irq();
+	smp_mb__before_clear_bit();
+	cpu_clear(cpu, flush_cpumask);
+	smp_mb__after_clear_bit();
+out:
+	put_cpu_no_resched();
+	__get_cpu_var(irq_stat).irq_tlb_count++;
+}
+
+void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
+			     unsigned long va)
+{
+	cpumask_t cpumask = *cpumaskp;
+
+	/*
+	 * A couple of (to be removed) sanity checks:
+	 *
+	 * - current CPU must not be in mask
+	 * - mask must exist :)
+	 */
+	BUG_ON(cpus_empty(cpumask));
+	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+	BUG_ON(!mm);
+
+#ifdef CONFIG_HOTPLUG_CPU
+	/* If a CPU which we ran on has gone down, OK. */
+	cpus_and(cpumask, cpumask, cpu_online_map);
+	if (unlikely(cpus_empty(cpumask)))
+		return;
+#endif
+
+	/*
+	 * i'm not happy about this global shared spinlock in the
+	 * MM hot path, but we'll see how contended it is.
+	 * AK: x86-64 has a faster method that could be ported.
+	 */
+	spin_lock(&tlbstate_lock);
+
+	flush_mm = mm;
+	flush_va = va;
+	cpus_or(flush_cpumask, cpumask, flush_cpumask);
+	/*
+	 * We have to send the IPI only to
+	 * CPUs affected.
+	 */
+	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
+
+	while (!cpus_empty(flush_cpumask))
+		/* nothing. lockup detection does not belong here */
+		cpu_relax();
+
+	flush_mm = NULL;
+	flush_va = 0;
+	spin_unlock(&tlbstate_lock);
+}
+
+void flush_tlb_current_task(void)
+{
+	struct mm_struct *mm = current->mm;
+	cpumask_t cpu_mask;
+
+	preempt_disable();
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
+
+	local_flush_tlb();
+	if (!cpus_empty(cpu_mask))
+		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+	preempt_enable();
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	cpumask_t cpu_mask;
+
+	preempt_disable();
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
+
+	if (current->active_mm == mm) {
+		if (current->mm)
+			local_flush_tlb();
+		else
+			leave_mm(smp_processor_id());
+	}
+	if (!cpus_empty(cpu_mask))
+		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+
+	preempt_enable();
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	cpumask_t cpu_mask;
+
+	preempt_disable();
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
+
+	if (current->active_mm == mm) {
+		if (current->mm)
+			__flush_tlb_one(va);
+		 else
+			leave_mm(smp_processor_id());
+	}
+
+	if (!cpus_empty(cpu_mask))
+		flush_tlb_others(cpu_mask, mm, va);
+
+	preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+static void do_flush_tlb_all(void *info)
+{
+	unsigned long cpu = smp_processor_id();
+
+	__flush_tlb_all();
+	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
+		leave_mm(cpu);
+}
+
+void flush_tlb_all(void)
+{
+	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
+}
+
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
new file mode 100644
index 0000000..1558e51
--- /dev/null
+++ b/arch/x86/kernel/tlb_64.c
@@ -0,0 +1,274 @@
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/interrupt.h>
+
+#include <asm/mtrr.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/proto.h>
+#include <asm/apicdef.h>
+#include <asm/idle.h>
+
+#include <mach_ipi.h>
+/*
+ *	Smarter SMP flushing macros.
+ *		c/o Linus Torvalds.
+ *
+ *	These mean you can really definitely utterly forget about
+ *	writing to user space from interrupts. (Its not allowed anyway).
+ *
+ *	Optimizations Manfred Spraul <manfred@colorfullife.com>
+ *
+ *	More scalable flush, from Andi Kleen
+ *
+ *	To avoid global state use 8 different call vectors.
+ *	Each CPU uses a specific vector to trigger flushes on other
+ *	CPUs. Depending on the received vector the target CPUs look into
+ *	the right per cpu variable for the flush data.
+ *
+ *	With more than 8 CPUs they are hashed to the 8 available
+ *	vectors. The limited global vector space forces us to this right now.
+ *	In future when interrupts are split into per CPU domains this could be
+ *	fixed, at the cost of triggering multiple IPIs in some cases.
+ */
+
+union smp_flush_state {
+	struct {
+		cpumask_t flush_cpumask;
+		struct mm_struct *flush_mm;
+		unsigned long flush_va;
+		spinlock_t tlbstate_lock;
+	};
+	char pad[SMP_CACHE_BYTES];
+} ____cacheline_aligned;
+
+/* State is put into the per CPU data section, but padded
+   to a full cache line because other CPUs can access it and we don't
+   want false sharing in the per cpu data segment. */
+static DEFINE_PER_CPU(union smp_flush_state, flush_state);
+
+/*
+ * We cannot call mmdrop() because we are in interrupt context,
+ * instead update mm->cpu_vm_mask.
+ */
+void leave_mm(int cpu)
+{
+	if (read_pda(mmu_state) == TLBSTATE_OK)
+		BUG();
+	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
+	load_cr3(swapper_pg_dir);
+}
+EXPORT_SYMBOL_GPL(leave_mm);
+
+/*
+ *
+ * The flush IPI assumes that a thread switch happens in this order:
+ * [cpu0: the cpu that switches]
+ * 1) switch_mm() either 1a) or 1b)
+ * 1a) thread switch to a different mm
+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
+ *	Stop ipi delivery for the old mm. This is not synchronized with
+ *	the other cpus, but smp_invalidate_interrupt ignore flush ipis
+ *	for the wrong mm, and in the worst case we perform a superfluous
+ *	tlb flush.
+ * 1a2) set cpu mmu_state to TLBSTATE_OK
+ *	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
+ *	was in lazy tlb mode.
+ * 1a3) update cpu active_mm
+ *	Now cpu0 accepts tlb flushes for the new mm.
+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
+ *	Now the other cpus will send tlb flush ipis.
+ * 1a4) change cr3.
+ * 1b) thread switch without mm change
+ *	cpu active_mm is correct, cpu0 already handles
+ *	flush ipis.
+ * 1b1) set cpu mmu_state to TLBSTATE_OK
+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
+ *	Atomically set the bit [other cpus will start sending flush ipis],
+ *	and test the bit.
+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
+ * 2) switch %%esp, ie current
+ *
+ * The interrupt must handle 2 special cases:
+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
+ *   runs in kernel space, the cpu could load tlb entries for user space
+ *   pages.
+ *
+ * The good news is that cpu mmu_state is local to each cpu, no
+ * write/read ordering problems.
+ */
+
+/*
+ * TLB flush IPI:
+ *
+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
+ * 2) Leave the mm if we are in the lazy tlb mode.
+ *
+ * Interrupts are disabled.
+ */
+
+asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
+{
+	int cpu;
+	int sender;
+	union smp_flush_state *f;
+
+	cpu = smp_processor_id();
+	/*
+	 * orig_rax contains the negated interrupt vector.
+	 * Use that to determine where the sender put the data.
+	 */
+	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
+	f = &per_cpu(flush_state, sender);
+
+	if (!cpu_isset(cpu, f->flush_cpumask))
+		goto out;
+		/*
+		 * This was a BUG() but until someone can quote me the
+		 * line from the intel manual that guarantees an IPI to
+		 * multiple CPUs is retried _only_ on the erroring CPUs
+		 * its staying as a return
+		 *
+		 * BUG();
+		 */
+
+	if (f->flush_mm == read_pda(active_mm)) {
+		if (read_pda(mmu_state) == TLBSTATE_OK) {
+			if (f->flush_va == TLB_FLUSH_ALL)
+				local_flush_tlb();
+			else
+				__flush_tlb_one(f->flush_va);
+		} else
+			leave_mm(cpu);
+	}
+out:
+	ack_APIC_irq();
+	cpu_clear(cpu, f->flush_cpumask);
+	add_pda(irq_tlb_count, 1);
+}
+
+void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
+			     unsigned long va)
+{
+	int sender;
+	union smp_flush_state *f;
+	cpumask_t cpumask = *cpumaskp;
+
+	/* Caller has disabled preemption */
+	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
+	f = &per_cpu(flush_state, sender);
+
+	/*
+	 * Could avoid this lock when
+	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
+	 * probably not worth checking this for a cache-hot lock.
+	 */
+	spin_lock(&f->tlbstate_lock);
+
+	f->flush_mm = mm;
+	f->flush_va = va;
+	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
+
+	/*
+	 * We have to send the IPI only to
+	 * CPUs affected.
+	 */
+	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
+
+	while (!cpus_empty(f->flush_cpumask))
+		cpu_relax();
+
+	f->flush_mm = NULL;
+	f->flush_va = 0;
+	spin_unlock(&f->tlbstate_lock);
+}
+
+int __cpuinit init_smp_flush(void)
+{
+	int i;
+
+	for_each_cpu_mask(i, cpu_possible_map) {
+		spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
+	}
+	return 0;
+}
+core_initcall(init_smp_flush);
+
+void flush_tlb_current_task(void)
+{
+	struct mm_struct *mm = current->mm;
+	cpumask_t cpu_mask;
+
+	preempt_disable();
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
+
+	local_flush_tlb();
+	if (!cpus_empty(cpu_mask))
+		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+	preempt_enable();
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	cpumask_t cpu_mask;
+
+	preempt_disable();
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
+
+	if (current->active_mm == mm) {
+		if (current->mm)
+			local_flush_tlb();
+		else
+			leave_mm(smp_processor_id());
+	}
+	if (!cpus_empty(cpu_mask))
+		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+
+	preempt_enable();
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	cpumask_t cpu_mask;
+
+	preempt_disable();
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
+
+	if (current->active_mm == mm) {
+		if (current->mm)
+			__flush_tlb_one(va);
+		else
+			leave_mm(smp_processor_id());
+	}
+
+	if (!cpus_empty(cpu_mask))
+		flush_tlb_others(cpu_mask, mm, va);
+
+	preempt_enable();
+}
+
+static void do_flush_tlb_all(void *info)
+{
+	unsigned long cpu = smp_processor_id();
+
+	__flush_tlb_all();
+	if (read_pda(mmu_state) == TLBSTATE_LAZY)
+		leave_mm(cpu);
+}
+
+void flush_tlb_all(void)
+{
+	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
+}
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
new file mode 100644
index 0000000..abbf199
--- /dev/null
+++ b/arch/x86/kernel/trampoline.c
@@ -0,0 +1,18 @@
+#include <linux/io.h>
+
+#include <asm/trampoline.h>
+
+/* ready for x86_64, no harm for x86, since it will overwrite after alloc */
+unsigned char *trampoline_base = __va(TRAMPOLINE_BASE);
+
+/*
+ * Currently trivial. Write the real->protected mode
+ * bootstrap into the page concerned. The caller
+ * has made sure it's suitably aligned.
+ */
+unsigned long setup_trampoline(void)
+{
+	memcpy(trampoline_base, trampoline_data,
+	       trampoline_end - trampoline_data);
+	return virt_to_phys(trampoline_base);
+}
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 4aedd0b..894293c 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -30,12 +30,7 @@
 #include <asm/msr.h>
 #include <asm/segment.h>
 
-/* We can free up trampoline after bootup if cpu hotplug is not supported. */
-#ifndef CONFIG_HOTPLUG_CPU
-.section .init.data, "aw", @progbits
-#else
 .section .rodata, "a", @progbits
-#endif
 
 .code16
 
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index b22c01e..65791ca 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -9,26 +9,28 @@
  * 'Traps.c' handles hardware traps and faults after we have saved some
  * state in 'asm.s'.
  */
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
 #include <linux/interrupt.h>
-#include <linux/highmem.h>
 #include <linux/kallsyms.h>
-#include <linux/ptrace.h>
-#include <linux/utsname.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
 #include <linux/kprobes.h>
-#include <linux/kexec.h>
-#include <linux/unwind.h>
 #include <linux/uaccess.h>
-#include <linux/nmi.h>
+#include <linux/utsname.h>
+#include <linux/kdebug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/unwind.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kexec.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/init.h>
 #include <linux/bug.h>
+#include <linux/nmi.h>
+#include <linux/mm.h>
 
 #ifdef CONFIG_EISA
 #include <linux/ioport.h>
@@ -43,21 +45,18 @@
 #include <linux/edac.h>
 #endif
 
+#include <asm/arch_hooks.h>
+#include <asm/stacktrace.h>
 #include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
 #include <asm/debugreg.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/unwind.h>
 #include <asm/desc.h>
 #include <asm/i387.h>
 #include <asm/nmi.h>
-#include <asm/unwind.h>
 #include <asm/smp.h>
-#include <asm/arch_hooks.h>
-#include <linux/kdebug.h>
-#include <asm/stacktrace.h>
-
-#include <linux/module.h>
+#include <asm/io.h>
 
 #include "mach_traps.h"
 
@@ -69,7 +68,7 @@
 asmlinkage int system_call(void);
 
 /* Do we ignore FPU interrupts ? */
-char ignore_fpu_irq = 0;
+char ignore_fpu_irq;
 
 /*
  * The IDT has to be page-aligned to simplify the Pentium
@@ -105,12 +104,13 @@
 void printk_address(unsigned long address, int reliable)
 {
 #ifdef CONFIG_KALLSYMS
-	unsigned long offset = 0, symsize;
+	char namebuf[KSYM_NAME_LEN];
+	unsigned long offset = 0;
+	unsigned long symsize;
 	const char *symname;
-	char *modname;
-	char *delim = ":";
-	char namebuf[128];
 	char reliab[4] = "";
+	char *delim = ":";
+	char *modname;
 
 	symname = kallsyms_lookup(address, &symsize, &offset,
 					&modname, namebuf);
@@ -138,13 +138,14 @@
 
 /* The form of the top of the frame on the stack */
 struct stack_frame {
-	struct stack_frame *next_frame;
-	unsigned long return_address;
+	struct stack_frame	*next_frame;
+	unsigned long		return_address;
 };
 
-static inline unsigned long print_context_stack(struct thread_info *tinfo,
-				unsigned long *stack, unsigned long bp,
-				const struct stacktrace_ops *ops, void *data)
+static inline unsigned long
+print_context_stack(struct thread_info *tinfo,
+		    unsigned long *stack, unsigned long bp,
+		    const struct stacktrace_ops *ops, void *data)
 {
 	struct stack_frame *frame = (struct stack_frame *)bp;
 
@@ -166,7 +167,7 @@
 	return bp;
 }
 
-#define MSG(msg) ops->warning(data, msg)
+#define MSG(msg)		ops->warning(data, msg)
 
 void dump_trace(struct task_struct *task, struct pt_regs *regs,
 		unsigned long *stack, unsigned long bp,
@@ -177,6 +178,7 @@
 
 	if (!stack) {
 		unsigned long dummy;
+
 		stack = &dummy;
 		if (task != current)
 			stack = (unsigned long *)task->thread.sp;
@@ -186,7 +188,7 @@
 	if (!bp) {
 		if (task == current) {
 			/* Grab bp right from our regs */
-			asm ("movl %%ebp, %0" : "=r" (bp) : );
+			asm("movl %%ebp, %0" : "=r" (bp) :);
 		} else {
 			/* bp is the last reg pushed by switch_to */
 			bp = *(unsigned long *) task->thread.sp;
@@ -196,15 +198,18 @@
 
 	while (1) {
 		struct thread_info *context;
+
 		context = (struct thread_info *)
 			((unsigned long)stack & (~(THREAD_SIZE - 1)));
 		bp = print_context_stack(context, stack, bp, ops, data);
-		/* Should be after the line below, but somewhere
-		   in early boot context comes out corrupted and we
-		   can't reference it -AK */
+		/*
+		 * Should be after the line below, but somewhere
+		 * in early boot context comes out corrupted and we
+		 * can't reference it:
+		 */
 		if (ops->stack(data, "IRQ") < 0)
 			break;
-		stack = (unsigned long*)context->previous_esp;
+		stack = (unsigned long *)context->previous_esp;
 		if (!stack)
 			break;
 		touch_nmi_watchdog();
@@ -243,15 +248,15 @@
 }
 
 static const struct stacktrace_ops print_trace_ops = {
-	.warning = print_trace_warning,
-	.warning_symbol = print_trace_warning_symbol,
-	.stack = print_trace_stack,
-	.address = print_trace_address,
+	.warning		= print_trace_warning,
+	.warning_symbol		= print_trace_warning_symbol,
+	.stack			= print_trace_stack,
+	.address		= print_trace_address,
 };
 
 static void
 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
-		unsigned long *stack, unsigned long bp, char *log_lvl)
+		   unsigned long *stack, unsigned long bp, char *log_lvl)
 {
 	dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
 	printk("%s =======================\n", log_lvl);
@@ -263,21 +268,22 @@
 	show_trace_log_lvl(task, regs, stack, bp, "");
 }
 
-static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
-		       unsigned long *sp, unsigned long bp, char *log_lvl)
+static void
+show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+		   unsigned long *sp, unsigned long bp, char *log_lvl)
 {
 	unsigned long *stack;
 	int i;
 
 	if (sp == NULL) {
 		if (task)
-			sp = (unsigned long*)task->thread.sp;
+			sp = (unsigned long *)task->thread.sp;
 		else
 			sp = (unsigned long *)&sp;
 	}
 
 	stack = sp;
-	for(i = 0; i < kstack_depth_to_print; i++) {
+	for (i = 0; i < kstack_depth_to_print; i++) {
 		if (kstack_end(stack))
 			break;
 		if (i && ((i % 8) == 0))
@@ -285,6 +291,7 @@
 		printk("%08lx ", *stack++);
 	}
 	printk("\n%sCall Trace:\n", log_lvl);
+
 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
@@ -299,8 +306,8 @@
  */
 void dump_stack(void)
 {
-	unsigned long stack;
 	unsigned long bp = 0;
+	unsigned long stack;
 
 #ifdef CONFIG_FRAME_POINTER
 	if (!bp)
@@ -312,6 +319,7 @@
 		init_utsname()->release,
 		(int)strcspn(init_utsname()->version, " "),
 		init_utsname()->version);
+
 	show_trace(current, NULL, &stack, bp);
 }
 
@@ -323,6 +331,7 @@
 
 	print_modules();
 	__show_registers(regs, 0);
+
 	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
 		TASK_COMM_LEN, current->comm, task_pid_nr(current),
 		current_thread_info(), current, task_thread_info(current));
@@ -331,10 +340,10 @@
 	 * time of the fault..
 	 */
 	if (!user_mode_vm(regs)) {
-		u8 *ip;
 		unsigned int code_prologue = code_bytes * 43 / 64;
 		unsigned int code_len = code_bytes;
 		unsigned char c;
+		u8 *ip;
 
 		printk("\n" KERN_EMERG "Stack: ");
 		show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
@@ -361,7 +370,7 @@
 		}
 	}
 	printk("\n");
-}	
+}
 
 int is_valid_bugaddr(unsigned long ip)
 {
@@ -377,10 +386,10 @@
 
 static int die_counter;
 
-int __kprobes __die(const char * str, struct pt_regs * regs, long err)
+int __kprobes __die(const char *str, struct pt_regs *regs, long err)
 {
-	unsigned long sp;
 	unsigned short ss;
+	unsigned long sp;
 
 	printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
 #ifdef CONFIG_PREEMPT
@@ -395,8 +404,8 @@
 	printk("\n");
 
 	if (notify_die(DIE_OOPS, str, regs, err,
-				current->thread.trap_no, SIGSEGV) !=
-			NOTIFY_STOP) {
+			current->thread.trap_no, SIGSEGV) != NOTIFY_STOP) {
+
 		show_registers(regs);
 		/* Executive summary in case the oops scrolled away */
 		sp = (unsigned long) (&regs->sp);
@@ -408,17 +417,18 @@
 		printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
 		print_symbol("%s", regs->ip);
 		printk(" SS:ESP %04x:%08lx\n", ss, sp);
+
 		return 0;
-	} else {
-		return 1;
 	}
+
+	return 1;
 }
 
 /*
- * This is gone through when something in the kernel has done something bad and
- * is about to be terminated.
+ * This is gone through when something in the kernel has done something bad
+ * and is about to be terminated:
  */
-void die(const char * str, struct pt_regs * regs, long err)
+void die(const char *str, struct pt_regs *regs, long err)
 {
 	static struct {
 		raw_spinlock_t lock;
@@ -440,8 +450,9 @@
 		die.lock_owner = smp_processor_id();
 		die.lock_owner_depth = 0;
 		bust_spinlocks(1);
-	} else
+	} else {
 		raw_local_irq_save(flags);
+	}
 
 	if (++die.lock_owner_depth < 3) {
 		report_bug(regs->ip, regs);
@@ -474,19 +485,20 @@
 	do_exit(SIGSEGV);
 }
 
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+static inline void
+die_if_kernel(const char *str, struct pt_regs *regs, long err)
 {
 	if (!user_mode_vm(regs))
 		die(str, regs, err);
 }
 
-static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
-			      struct pt_regs * regs, long error_code,
-			      siginfo_t *info)
+static void __kprobes
+do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs *regs,
+	long error_code, siginfo_t *info)
 {
 	struct task_struct *tsk = current;
 
-	if (regs->flags & VM_MASK) {
+	if (regs->flags & X86_VM_MASK) {
 		if (vm86)
 			goto vm86_trap;
 		goto trap_signal;
@@ -495,111 +507,112 @@
 	if (!user_mode(regs))
 		goto kernel_trap;
 
-	trap_signal: {
-		/*
-		 * We want error_code and trap_no set for userspace faults and
-		 * kernelspace faults which result in die(), but not
-		 * kernelspace faults which are fixed up.  die() gives the
-		 * process no chance to handle the signal and notice the
-		 * kernel fault information, so that won't result in polluting
-		 * the information about previously queued, but not yet
-		 * delivered, faults.  See also do_general_protection below.
-		 */
+trap_signal:
+	/*
+	 * We want error_code and trap_no set for userspace faults and
+	 * kernelspace faults which result in die(), but not
+	 * kernelspace faults which are fixed up.  die() gives the
+	 * process no chance to handle the signal and notice the
+	 * kernel fault information, so that won't result in polluting
+	 * the information about previously queued, but not yet
+	 * delivered, faults.  See also do_general_protection below.
+	 */
+	tsk->thread.error_code = error_code;
+	tsk->thread.trap_no = trapnr;
+
+	if (info)
+		force_sig_info(signr, info, tsk);
+	else
+		force_sig(signr, tsk);
+	return;
+
+kernel_trap:
+	if (!fixup_exception(regs)) {
 		tsk->thread.error_code = error_code;
 		tsk->thread.trap_no = trapnr;
-
-		if (info)
-			force_sig_info(signr, info, tsk);
-		else
-			force_sig(signr, tsk);
-		return;
+		die(str, regs, error_code);
 	}
+	return;
 
-	kernel_trap: {
-		if (!fixup_exception(regs)) {
-			tsk->thread.error_code = error_code;
-			tsk->thread.trap_no = trapnr;
-			die(str, regs, error_code);
-		}
-		return;
-	}
-
-	vm86_trap: {
-		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
-		if (ret) goto trap_signal;
-		return;
-	}
+vm86_trap:
+	if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
+						error_code, trapnr))
+		goto trap_signal;
+	return;
 }
 
-#define DO_ERROR(trapnr, signr, str, name) \
-void do_##name(struct pt_regs * regs, long error_code) \
-{ \
-	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-						== NOTIFY_STOP) \
-		return; \
-	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
+#define DO_ERROR(trapnr, signr, str, name)				\
+void do_##name(struct pt_regs *regs, long error_code)			\
+{									\
+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)	\
+						== NOTIFY_STOP)		\
+		return;							\
+	do_trap(trapnr, signr, str, 0, regs, error_code, NULL);		\
 }
 
-#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
-void do_##name(struct pt_regs * regs, long error_code) \
-{ \
-	siginfo_t info; \
-	if (irq) \
-		local_irq_enable(); \
-	info.si_signo = signr; \
-	info.si_errno = 0; \
-	info.si_code = sicode; \
-	info.si_addr = (void __user *)siaddr; \
-	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-						== NOTIFY_STOP) \
-		return; \
-	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq)	\
+void do_##name(struct pt_regs *regs, long error_code)			\
+{									\
+	siginfo_t info;							\
+	if (irq)							\
+		local_irq_enable();					\
+	info.si_signo = signr;						\
+	info.si_errno = 0;						\
+	info.si_code = sicode;						\
+	info.si_addr = (void __user *)siaddr;				\
+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)	\
+						== NOTIFY_STOP)		\
+		return;							\
+	do_trap(trapnr, signr, str, 0, regs, error_code, &info);	\
 }
 
-#define DO_VM86_ERROR(trapnr, signr, str, name) \
-void do_##name(struct pt_regs * regs, long error_code) \
-{ \
-	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-						== NOTIFY_STOP) \
-		return; \
-	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
+#define DO_VM86_ERROR(trapnr, signr, str, name)				\
+void do_##name(struct pt_regs *regs, long error_code)			\
+{									\
+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)	\
+						== NOTIFY_STOP)		\
+		return;							\
+	do_trap(trapnr, signr, str, 1, regs, error_code, NULL);		\
 }
 
-#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-void do_##name(struct pt_regs * regs, long error_code) \
-{ \
-	siginfo_t info; \
-	info.si_signo = signr; \
-	info.si_errno = 0; \
-	info.si_code = sicode; \
-	info.si_addr = (void __user *)siaddr; \
-	trace_hardirqs_fixup(); \
-	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-						== NOTIFY_STOP) \
-		return; \
-	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr)	\
+void do_##name(struct pt_regs *regs, long error_code)			\
+{									\
+	siginfo_t info;							\
+	info.si_signo = signr;						\
+	info.si_errno = 0;						\
+	info.si_code = sicode;						\
+	info.si_addr = (void __user *)siaddr;				\
+	trace_hardirqs_fixup();						\
+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)	\
+						== NOTIFY_STOP)		\
+		return;							\
+	do_trap(trapnr, signr, str, 1, regs, error_code, &info);	\
 }
 
-DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->ip)
+DO_VM86_ERROR_INFO(0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->ip)
 #ifndef CONFIG_KPROBES
-DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
+DO_VM86_ERROR(3, SIGTRAP, "int3", int3)
 #endif
-DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
-DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0)
-DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
+DO_VM86_ERROR(4, SIGSEGV, "overflow", overflow)
+DO_VM86_ERROR(5, SIGSEGV, "bounds", bounds)
+DO_ERROR_INFO(6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0)
+DO_ERROR(9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
 DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
 DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
 DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
 
-void __kprobes do_general_protection(struct pt_regs * regs,
-					      long error_code)
+void __kprobes do_general_protection(struct pt_regs *regs, long error_code)
 {
-	int cpu = get_cpu();
-	struct tss_struct *tss = &per_cpu(init_tss, cpu);
-	struct thread_struct *thread = &current->thread;
+	struct thread_struct *thread;
+	struct tss_struct *tss;
+	int cpu;
+
+	cpu = get_cpu();
+	tss = &per_cpu(init_tss, cpu);
+	thread = &current->thread;
 
 	/*
 	 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
@@ -616,19 +629,21 @@
 		 * If the previously set map was extending to higher ports
 		 * than the current one, pad extra space with 0xff (no access).
 		 */
-		if (thread->io_bitmap_max < tss->io_bitmap_max)
+		if (thread->io_bitmap_max < tss->io_bitmap_max) {
 			memset((char *) tss->io_bitmap +
 				thread->io_bitmap_max, 0xff,
 				tss->io_bitmap_max - thread->io_bitmap_max);
+		}
 		tss->io_bitmap_max = thread->io_bitmap_max;
 		tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 		tss->io_bitmap_owner = thread;
 		put_cpu();
+
 		return;
 	}
 	put_cpu();
 
-	if (regs->flags & VM_MASK)
+	if (regs->flags & X86_VM_MASK)
 		goto gp_in_vm86;
 
 	if (!user_mode(regs))
@@ -636,6 +651,7 @@
 
 	current->thread.error_code = error_code;
 	current->thread.trap_no = 13;
+
 	if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) &&
 	    printk_ratelimit()) {
 		printk(KERN_INFO
@@ -666,21 +682,24 @@
 }
 
 static __kprobes void
-mem_parity_error(unsigned char reason, struct pt_regs * regs)
+mem_parity_error(unsigned char reason, struct pt_regs *regs)
 {
-	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
-		"CPU %d.\n", reason, smp_processor_id());
-	printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
+	printk(KERN_EMERG
+		"Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+			reason, smp_processor_id());
+
+	printk(KERN_EMERG
+		"You have some hardware problem, likely on the PCI bus.\n");
 
 #if defined(CONFIG_EDAC)
-	if(edac_handler_set()) {
+	if (edac_handler_set()) {
 		edac_atomic_assert_error();
 		return;
 	}
 #endif
 
 	if (panic_on_unrecovered_nmi)
-                panic("NMI: Not continuing");
+		panic("NMI: Not continuing");
 
 	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
 
@@ -689,7 +708,7 @@
 }
 
 static __kprobes void
-io_check_error(unsigned char reason, struct pt_regs * regs)
+io_check_error(unsigned char reason, struct pt_regs *regs)
 {
 	unsigned long i;
 
@@ -699,28 +718,37 @@
 	/* Re-enable the IOCK line, wait for a few seconds */
 	reason = (reason & 0xf) | 8;
 	outb(reason, 0x61);
+
 	i = 2000;
-	while (--i) udelay(1000);
+	while (--i)
+		udelay(1000);
+
 	reason &= ~8;
 	outb(reason, 0x61);
 }
 
 static __kprobes void
-unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
 {
+	if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
+		return;
 #ifdef CONFIG_MCA
-	/* Might actually be able to figure out what the guilty party
-	* is. */
-	if( MCA_bus ) {
+	/*
+	 * Might actually be able to figure out what the guilty party
+	 * is:
+	 */
+	if (MCA_bus) {
 		mca_handle_nmi();
 		return;
 	}
 #endif
-	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
-		"CPU %d.\n", reason, smp_processor_id());
+	printk(KERN_EMERG
+		"Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+			reason, smp_processor_id());
+
 	printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
 	if (panic_on_unrecovered_nmi)
-                panic("NMI: Not continuing");
+		panic("NMI: Not continuing");
 
 	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
 }
@@ -729,14 +757,13 @@
 
 void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
 {
-	if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
-	    NOTIFY_STOP)
+	if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == NOTIFY_STOP)
 		return;
 
 	spin_lock(&nmi_print_lock);
 	/*
 	* We are in trouble anyway, lets at least try
-	* to get a message out.
+	* to get a message out:
 	*/
 	bust_spinlocks(1);
 	printk(KERN_EMERG "%s", msg);
@@ -747,9 +774,10 @@
 	spin_unlock(&nmi_print_lock);
 	bust_spinlocks(0);
 
-	/* If we are in kernel we are probably nested up pretty bad
-	 * and might aswell get out now while we still can.
-	*/
+	/*
+	 * If we are in kernel we are probably nested up pretty bad
+	 * and might aswell get out now while we still can:
+	 */
 	if (!user_mode_vm(regs)) {
 		current->thread.trap_no = 2;
 		crash_kexec(regs);
@@ -758,14 +786,14 @@
 	do_exit(SIGSEGV);
 }
 
-static __kprobes void default_do_nmi(struct pt_regs * regs)
+static __kprobes void default_do_nmi(struct pt_regs *regs)
 {
 	unsigned char reason = 0;
 
-	/* Only the BSP gets external NMIs from the system.  */
+	/* Only the BSP gets external NMIs from the system: */
 	if (!smp_processor_id())
 		reason = get_nmi_reason();
- 
+
 	if (!(reason & 0xc0)) {
 		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
 							== NOTIFY_STOP)
@@ -778,8 +806,10 @@
 		if (nmi_watchdog_tick(regs, reason))
 			return;
 		if (!do_nmi_callback(regs, smp_processor_id()))
-#endif
 			unknown_nmi_error(reason, regs);
+#else
+		unknown_nmi_error(reason, regs);
+#endif
 
 		return;
 	}
@@ -791,14 +821,14 @@
 		io_check_error(reason, regs);
 	/*
 	 * Reassert NMI in case it became active meanwhile
-	 * as it's edge-triggered.
+	 * as it's edge-triggered:
 	 */
 	reassert_nmi();
 }
 
 static int ignore_nmis;
 
-__kprobes void do_nmi(struct pt_regs * regs, long error_code)
+__kprobes void do_nmi(struct pt_regs *regs, long error_code)
 {
 	int cpu;
 
@@ -834,9 +864,12 @@
 	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
 			== NOTIFY_STOP)
 		return;
-	/* This is an interrupt gate, because kprobes wants interrupts
-	disabled.  Normal trap handlers don't. */
+	/*
+	 * This is an interrupt gate, because kprobes wants interrupts
+	 * disabled. Normal trap handlers don't.
+	 */
 	restore_interrupts(regs);
+
 	do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
 }
 #endif
@@ -851,7 +884,7 @@
  * from user space. Such code must not hold kernel locks (since it
  * can equally take a page fault), therefore it is safe to call
  * force_sig_info even though that claims and releases locks.
- * 
+ *
  * Code in ./signal.c ensures that the debug control register
  * is restored before we deliver any signal, and therefore that
  * user code runs with the correct debug control register even though
@@ -863,10 +896,10 @@
  * find every occurrence of the TF bit that could be saved away even
  * by user code)
  */
-void __kprobes do_debug(struct pt_regs * regs, long error_code)
+void __kprobes do_debug(struct pt_regs *regs, long error_code)
 {
-	unsigned int condition;
 	struct task_struct *tsk = current;
+	unsigned int condition;
 
 	trace_hardirqs_fixup();
 
@@ -891,7 +924,7 @@
 			goto clear_dr7;
 	}
 
-	if (regs->flags & VM_MASK)
+	if (regs->flags & X86_VM_MASK)
 		goto debug_vm86;
 
 	/* Save debug status register where ptrace can see it */
@@ -914,7 +947,8 @@
 	/* Ok, finally something we can handle */
 	send_sigtrap(tsk, regs, error_code);
 
-	/* Disable additional traps. They'll be re-enabled when
+	/*
+	 * Disable additional traps. They'll be re-enabled when
 	 * the signal is delivered.
 	 */
 clear_dr7:
@@ -927,7 +961,7 @@
 
 clear_TF_reenable:
 	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-	regs->flags &= ~TF_MASK;
+	regs->flags &= ~X86_EFLAGS_TF;
 	return;
 }
 
@@ -938,9 +972,10 @@
  */
 void math_error(void __user *ip)
 {
-	struct task_struct * task;
+	struct task_struct *task;
+	unsigned short cwd;
+	unsigned short swd;
 	siginfo_t info;
-	unsigned short cwd, swd;
 
 	/*
 	 * Save the info for the exception handler and clear the error.
@@ -966,36 +1001,36 @@
 	cwd = get_fpu_cwd(task);
 	swd = get_fpu_swd(task);
 	switch (swd & ~cwd & 0x3f) {
-		case 0x000: /* No unmasked exception */
-			return;
-		default:    /* Multiple exceptions */
-			break;
-		case 0x001: /* Invalid Op */
-			/*
-			 * swd & 0x240 == 0x040: Stack Underflow
-			 * swd & 0x240 == 0x240: Stack Overflow
-			 * User must clear the SF bit (0x40) if set
-			 */
-			info.si_code = FPE_FLTINV;
-			break;
-		case 0x002: /* Denormalize */
-		case 0x010: /* Underflow */
-			info.si_code = FPE_FLTUND;
-			break;
-		case 0x004: /* Zero Divide */
-			info.si_code = FPE_FLTDIV;
-			break;
-		case 0x008: /* Overflow */
-			info.si_code = FPE_FLTOVF;
-			break;
-		case 0x020: /* Precision */
-			info.si_code = FPE_FLTRES;
-			break;
+	case 0x000: /* No unmasked exception */
+		return;
+	default:    /* Multiple exceptions */
+		break;
+	case 0x001: /* Invalid Op */
+		/*
+		 * swd & 0x240 == 0x040: Stack Underflow
+		 * swd & 0x240 == 0x240: Stack Overflow
+		 * User must clear the SF bit (0x40) if set
+		 */
+		info.si_code = FPE_FLTINV;
+		break;
+	case 0x002: /* Denormalize */
+	case 0x010: /* Underflow */
+		info.si_code = FPE_FLTUND;
+		break;
+	case 0x004: /* Zero Divide */
+		info.si_code = FPE_FLTDIV;
+		break;
+	case 0x008: /* Overflow */
+		info.si_code = FPE_FLTOVF;
+		break;
+	case 0x020: /* Precision */
+		info.si_code = FPE_FLTRES;
+		break;
 	}
 	force_sig_info(SIGFPE, &info, task);
 }
 
-void do_coprocessor_error(struct pt_regs * regs, long error_code)
+void do_coprocessor_error(struct pt_regs *regs, long error_code)
 {
 	ignore_fpu_irq = 1;
 	math_error((void __user *)regs->ip);
@@ -1003,9 +1038,9 @@
 
 static void simd_math_error(void __user *ip)
 {
-	struct task_struct * task;
-	siginfo_t info;
+	struct task_struct *task;
 	unsigned short mxcsr;
+	siginfo_t info;
 
 	/*
 	 * Save the info for the exception handler and clear the error.
@@ -1026,82 +1061,80 @@
 	 */
 	mxcsr = get_fpu_mxcsr(task);
 	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-		case 0x000:
-		default:
-			break;
-		case 0x001: /* Invalid Op */
-			info.si_code = FPE_FLTINV;
-			break;
-		case 0x002: /* Denormalize */
-		case 0x010: /* Underflow */
-			info.si_code = FPE_FLTUND;
-			break;
-		case 0x004: /* Zero Divide */
-			info.si_code = FPE_FLTDIV;
-			break;
-		case 0x008: /* Overflow */
-			info.si_code = FPE_FLTOVF;
-			break;
-		case 0x020: /* Precision */
-			info.si_code = FPE_FLTRES;
-			break;
+	case 0x000:
+	default:
+		break;
+	case 0x001: /* Invalid Op */
+		info.si_code = FPE_FLTINV;
+		break;
+	case 0x002: /* Denormalize */
+	case 0x010: /* Underflow */
+		info.si_code = FPE_FLTUND;
+		break;
+	case 0x004: /* Zero Divide */
+		info.si_code = FPE_FLTDIV;
+		break;
+	case 0x008: /* Overflow */
+		info.si_code = FPE_FLTOVF;
+		break;
+	case 0x020: /* Precision */
+		info.si_code = FPE_FLTRES;
+		break;
 	}
 	force_sig_info(SIGFPE, &info, task);
 }
 
-void do_simd_coprocessor_error(struct pt_regs * regs,
-					  long error_code)
+void do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
 {
 	if (cpu_has_xmm) {
 		/* Handle SIMD FPU exceptions on PIII+ processors. */
 		ignore_fpu_irq = 1;
 		simd_math_error((void __user *)regs->ip);
-	} else {
-		/*
-		 * Handle strange cache flush from user space exception
-		 * in all other cases.  This is undocumented behaviour.
-		 */
-		if (regs->flags & VM_MASK) {
-			handle_vm86_fault((struct kernel_vm86_regs *)regs,
-					  error_code);
-			return;
-		}
-		current->thread.trap_no = 19;
-		current->thread.error_code = error_code;
-		die_if_kernel("cache flush denied", regs, error_code);
-		force_sig(SIGSEGV, current);
+		return;
 	}
+	/*
+	 * Handle strange cache flush from user space exception
+	 * in all other cases.  This is undocumented behaviour.
+	 */
+	if (regs->flags & X86_VM_MASK) {
+		handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
+		return;
+	}
+	current->thread.trap_no = 19;
+	current->thread.error_code = error_code;
+	die_if_kernel("cache flush denied", regs, error_code);
+	force_sig(SIGSEGV, current);
 }
 
-void do_spurious_interrupt_bug(struct pt_regs * regs,
-					  long error_code)
+void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 {
 #if 0
 	/* No need to warn about this any longer. */
-	printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
+	printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
 #endif
 }
 
-unsigned long patch_espfix_desc(unsigned long uesp,
-					  unsigned long kesp)
+unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
 {
 	struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;
 	unsigned long base = (kesp - uesp) & -THREAD_SIZE;
 	unsigned long new_kesp = kesp - base;
 	unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
 	__u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
+
 	/* Set up base for espfix segment */
- 	desc &= 0x00f0ff0000000000ULL;
- 	desc |=	((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
+	desc &= 0x00f0ff0000000000ULL;
+	desc |=	((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
 		((((__u64)base) << 32) & 0xff00000000000000ULL) |
 		((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
 		(lim_pages & 0xffff);
 	*(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
+
 	return new_kesp;
 }
 
 /*
- *  'math_state_restore()' saves the current math information in the
+ * 'math_state_restore()' saves the current math information in the
  * old math state array, and gets the new ones from the current task
  *
  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
@@ -1115,7 +1148,7 @@
 	struct thread_info *thread = current_thread_info();
 	struct task_struct *tsk = thread->task;
 
-	clts();		/* Allow maths ops (or we recurse) */
+	clts();				/* Allow maths ops (or we recurse) */
 	if (!tsk_used_math(tsk))
 		init_fpu(tsk);
 	restore_fpu(tsk);
@@ -1128,53 +1161,52 @@
 
 asmlinkage void math_emulate(long arg)
 {
-	printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
-	printk(KERN_EMERG "killing %s.\n",current->comm);
-	force_sig(SIGFPE,current);
+	printk(KERN_EMERG
+		"math-emulation not enabled and no coprocessor found.\n");
+	printk(KERN_EMERG "killing %s.\n", current->comm);
+	force_sig(SIGFPE, current);
 	schedule();
 }
 
 #endif /* CONFIG_MATH_EMULATION */
 
-
 void __init trap_init(void)
 {
 	int i;
 
 #ifdef CONFIG_EISA
 	void __iomem *p = early_ioremap(0x0FFFD9, 4);
-	if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
+
+	if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
 		EISA_bus = 1;
-	}
 	early_iounmap(p, 4);
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
 	init_apic_mappings();
 #endif
-
-	set_trap_gate(0,&divide_error);
-	set_intr_gate(1,&debug);
-	set_intr_gate(2,&nmi);
+	set_trap_gate(0,  &divide_error);
+	set_intr_gate(1,  &debug);
+	set_intr_gate(2,  &nmi);
 	set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
-	set_system_gate(4,&overflow);
-	set_trap_gate(5,&bounds);
-	set_trap_gate(6,&invalid_op);
-	set_trap_gate(7,&device_not_available);
-	set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
-	set_trap_gate(9,&coprocessor_segment_overrun);
-	set_trap_gate(10,&invalid_TSS);
-	set_trap_gate(11,&segment_not_present);
-	set_trap_gate(12,&stack_segment);
-	set_trap_gate(13,&general_protection);
-	set_intr_gate(14,&page_fault);
-	set_trap_gate(15,&spurious_interrupt_bug);
-	set_trap_gate(16,&coprocessor_error);
-	set_trap_gate(17,&alignment_check);
+	set_system_gate(4, &overflow);
+	set_trap_gate(5,  &bounds);
+	set_trap_gate(6,  &invalid_op);
+	set_trap_gate(7,  &device_not_available);
+	set_task_gate(8,  GDT_ENTRY_DOUBLEFAULT_TSS);
+	set_trap_gate(9,  &coprocessor_segment_overrun);
+	set_trap_gate(10, &invalid_TSS);
+	set_trap_gate(11, &segment_not_present);
+	set_trap_gate(12, &stack_segment);
+	set_trap_gate(13, &general_protection);
+	set_intr_gate(14, &page_fault);
+	set_trap_gate(15, &spurious_interrupt_bug);
+	set_trap_gate(16, &coprocessor_error);
+	set_trap_gate(17, &alignment_check);
 #ifdef CONFIG_X86_MCE
-	set_trap_gate(18,&machine_check);
+	set_trap_gate(18, &machine_check);
 #endif
-	set_trap_gate(19,&simd_coprocessor_error);
+	set_trap_gate(19, &simd_coprocessor_error);
 
 	/*
 	 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
@@ -1187,21 +1219,22 @@
 		printk("done.\n");
 	}
 	if (cpu_has_xmm) {
-		printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
-				"support... ");
+		printk(KERN_INFO
+			"Enabling unmasked SIMD FPU exception support... ");
 		set_in_cr4(X86_CR4_OSXMMEXCPT);
 		printk("done.\n");
 	}
 
-	set_system_gate(SYSCALL_VECTOR,&system_call);
+	set_system_gate(SYSCALL_VECTOR, &system_call);
 
-	/* Reserve all the builtin and the syscall vector. */
+	/* Reserve all the builtin and the syscall vector: */
 	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
 		set_bit(i, used_vectors);
+
 	set_bit(SYSCALL_VECTOR, used_vectors);
 
 	/*
-	 * Should be a barrier for any external CPU state.
+	 * Should be a barrier for any external CPU state:
 	 */
 	cpu_init();
 
@@ -1211,6 +1244,7 @@
 static int __init kstack_setup(char *s)
 {
 	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
+
 	return 1;
 }
 __setup("kstack=", kstack_setup);
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 0454666..79aa6fc 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -33,6 +33,8 @@
 #include <linux/kdebug.h>
 #include <linux/utsname.h>
 
+#include <mach_traps.h>
+
 #if defined(CONFIG_EDAC)
 #include <linux/edac.h>
 #endif
@@ -600,8 +602,13 @@
 
 void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
 {
-	unsigned long flags = oops_begin();
+	unsigned long flags;
 
+	if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) ==
+	    NOTIFY_STOP)
+		return;
+
+	flags = oops_begin();
 	/*
 	 * We are in trouble anyway, lets at least try
 	 * to get a message out.
@@ -806,6 +813,8 @@
 static __kprobes void
 unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
 {
+	if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
+		return;
 	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
 		reason);
 	printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index c2241e0..3d7e6e9 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -84,8 +84,8 @@
 
 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
 {
-	unsigned long flags, prev_scale, *scale;
 	unsigned long long tsc_now, ns_now;
+	unsigned long flags, *scale;
 
 	local_irq_save(flags);
 	sched_clock_idle_sleep_event();
@@ -95,7 +95,6 @@
 	rdtscll(tsc_now);
 	ns_now = __cycles_2_ns(tsc_now);
 
-	prev_scale = *scale;
 	if (cpu_khz)
 		*scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
 
@@ -392,13 +391,15 @@
 	int cpu;
 
 	if (!cpu_has_tsc)
-		goto out_no_tsc;
+		return;
 
 	cpu_khz = calculate_cpu_khz();
 	tsc_khz = cpu_khz;
 
-	if (!cpu_khz)
-		goto out_no_tsc;
+	if (!cpu_khz) {
+		mark_tsc_unstable("could not calculate TSC khz");
+		return;
+	}
 
 	printk("Detected %lu.%03lu MHz processor.\n",
 				(unsigned long)cpu_khz / 1000,
@@ -431,9 +432,4 @@
 		tsc_enabled = 1;
 
 	clocksource_register(&clocksource_tsc);
-
-	return;
-
-out_no_tsc:
-	setup_clear_cpu_cap(X86_FEATURE_TSC);
 }
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index d3bebaa..ceeba01 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -44,8 +44,8 @@
 
 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
 {
-	unsigned long flags, prev_scale, *scale;
 	unsigned long long tsc_now, ns_now;
+	unsigned long flags, *scale;
 
 	local_irq_save(flags);
 	sched_clock_idle_sleep_event();
@@ -55,7 +55,6 @@
 	rdtscll(tsc_now);
 	ns_now = __cycles_2_ns(tsc_now);
 
-	prev_scale = *scale;
 	if (cpu_khz)
 		*scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
 
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 738c210..38f566f 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -64,7 +64,7 @@
 
 
 #define KVM86	((struct kernel_vm86_struct *)regs)
-#define VMPI 	KVM86->vm86plus
+#define VMPI	KVM86->vm86plus
 
 
 /*
@@ -81,7 +81,7 @@
 #define VFLAGS	(*(unsigned short *)&(current->thread.v86flags))
 #define VEFLAGS	(current->thread.v86flags)
 
-#define set_flags(X,new,mask) \
+#define set_flags(X, new, mask) \
 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
 
 #define SAFE_MASK	(0xDD5)
@@ -93,8 +93,10 @@
 {
 	int ret = 0;
 
-	/* kernel_vm86_regs is missing gs, so copy everything up to
-	   (but not including) orig_eax, and then rest including orig_eax. */
+	/*
+	 * kernel_vm86_regs is missing gs, so copy everything up to
+	 * (but not including) orig_eax, and then rest including orig_eax.
+	 */
 	ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
 	ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
 			    sizeof(struct kernel_vm86_regs) -
@@ -120,7 +122,7 @@
 	return ret;
 }
 
-struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
+struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
 {
 	struct tss_struct *tss;
 	struct pt_regs *ret;
@@ -137,9 +139,9 @@
 		printk("no vm86_info: BAD\n");
 		do_exit(SIGSEGV);
 	}
-	set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask);
-	tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
-	tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
+	set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask);
+	tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs);
+	tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap);
 	if (tmp) {
 		printk("vm86: could not access userspace vm86_info\n");
 		do_exit(SIGSEGV);
@@ -237,20 +239,21 @@
 
 	tsk = current;
 	switch (regs.bx) {
-		case VM86_REQUEST_IRQ:
-		case VM86_FREE_IRQ:
-		case VM86_GET_IRQ_BITS:
-		case VM86_GET_AND_RESET_IRQ:
-			ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
-			goto out;
-		case VM86_PLUS_INSTALL_CHECK:
-			/* NOTE: on old vm86 stuff this will return the error
-			   from access_ok(), because the subfunction is
-			   interpreted as (invalid) address to vm86_struct.
-			   So the installation check works.
-			 */
-			ret = 0;
-			goto out;
+	case VM86_REQUEST_IRQ:
+	case VM86_FREE_IRQ:
+	case VM86_GET_IRQ_BITS:
+	case VM86_GET_AND_RESET_IRQ:
+		ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
+		goto out;
+	case VM86_PLUS_INSTALL_CHECK:
+		/*
+		 * NOTE: on old vm86 stuff this will return the error
+		 *  from access_ok(), because the subfunction is
+		 *  interpreted as (invalid) address to vm86_struct.
+		 *  So the installation check works.
+		 */
+		ret = 0;
+		goto out;
 	}
 
 	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
@@ -296,21 +299,21 @@
 	VEFLAGS = info->regs.pt.flags;
 	info->regs.pt.flags &= SAFE_MASK;
 	info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
-	info->regs.pt.flags |= VM_MASK;
+	info->regs.pt.flags |= X86_VM_MASK;
 
 	switch (info->cpu_type) {
-		case CPU_286:
-			tsk->thread.v86mask = 0;
-			break;
-		case CPU_386:
-			tsk->thread.v86mask = NT_MASK | IOPL_MASK;
-			break;
-		case CPU_486:
-			tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK;
-			break;
-		default:
-			tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
-			break;
+	case CPU_286:
+		tsk->thread.v86mask = 0;
+		break;
+	case CPU_386:
+		tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+		break;
+	case CPU_486:
+		tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+		break;
+	default:
+		tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+		break;
 	}
 
 /*
@@ -346,9 +349,9 @@
 	/* we never return here */
 }
 
-static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
+static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
 {
-	struct pt_regs * regs32;
+	struct pt_regs *regs32;
 
 	regs32 = save_v86_state(regs16);
 	regs32->ax = retval;
@@ -358,29 +361,30 @@
 		: : "r" (regs32), "r" (current_thread_info()));
 }
 
-static inline void set_IF(struct kernel_vm86_regs * regs)
+static inline void set_IF(struct kernel_vm86_regs *regs)
 {
-	VEFLAGS |= VIF_MASK;
-	if (VEFLAGS & VIP_MASK)
+	VEFLAGS |= X86_EFLAGS_VIF;
+	if (VEFLAGS & X86_EFLAGS_VIP)
 		return_to_32bit(regs, VM86_STI);
 }
 
-static inline void clear_IF(struct kernel_vm86_regs * regs)
+static inline void clear_IF(struct kernel_vm86_regs *regs)
 {
-	VEFLAGS &= ~VIF_MASK;
+	VEFLAGS &= ~X86_EFLAGS_VIF;
 }
 
-static inline void clear_TF(struct kernel_vm86_regs * regs)
+static inline void clear_TF(struct kernel_vm86_regs *regs)
 {
-	regs->pt.flags &= ~TF_MASK;
+	regs->pt.flags &= ~X86_EFLAGS_TF;
 }
 
-static inline void clear_AC(struct kernel_vm86_regs * regs)
+static inline void clear_AC(struct kernel_vm86_regs *regs)
 {
-	regs->pt.flags &= ~AC_MASK;
+	regs->pt.flags &= ~X86_EFLAGS_AC;
 }
 
-/* It is correct to call set_IF(regs) from the set_vflags_*
+/*
+ * It is correct to call set_IF(regs) from the set_vflags_*
  * functions. However someone forgot to call clear_IF(regs)
  * in the opposite case.
  * After the command sequence CLI PUSHF STI POPF you should
@@ -391,41 +395,41 @@
  * [KD]
  */
 
-static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs)
+static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
 {
 	set_flags(VEFLAGS, flags, current->thread.v86mask);
 	set_flags(regs->pt.flags, flags, SAFE_MASK);
-	if (flags & IF_MASK)
+	if (flags & X86_EFLAGS_IF)
 		set_IF(regs);
 	else
 		clear_IF(regs);
 }
 
-static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
+static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
 {
 	set_flags(VFLAGS, flags, current->thread.v86mask);
 	set_flags(regs->pt.flags, flags, SAFE_MASK);
-	if (flags & IF_MASK)
+	if (flags & X86_EFLAGS_IF)
 		set_IF(regs);
 	else
 		clear_IF(regs);
 }
 
-static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
+static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
 {
 	unsigned long flags = regs->pt.flags & RETURN_MASK;
 
-	if (VEFLAGS & VIF_MASK)
-		flags |= IF_MASK;
-	flags |= IOPL_MASK;
+	if (VEFLAGS & X86_EFLAGS_VIF)
+		flags |= X86_EFLAGS_IF;
+	flags |= X86_EFLAGS_IOPL;
 	return flags | (VEFLAGS & current->thread.v86mask);
 }
 
-static inline int is_revectored(int nr, struct revectored_struct * bitmap)
+static inline int is_revectored(int nr, struct revectored_struct *bitmap)
 {
 	__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
 		:"=r" (nr)
-		:"m" (*bitmap),"r" (nr));
+		:"m" (*bitmap), "r" (nr));
 	return nr;
 }
 
@@ -437,7 +441,7 @@
 		ptr--; \
 		if (put_user(__val, base + ptr) < 0) \
 			goto err_label; \
-	} while(0)
+	} while (0)
 
 #define pushw(base, ptr, val, err_label) \
 	do { \
@@ -448,7 +452,7 @@
 		ptr--; \
 		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
 			goto err_label; \
-	} while(0)
+	} while (0)
 
 #define pushl(base, ptr, val, err_label) \
 	do { \
@@ -465,7 +469,7 @@
 		ptr--; \
 		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
 			goto err_label; \
-	} while(0)
+	} while (0)
 
 #define popb(base, ptr, err_label) \
 	({ \
@@ -512,7 +516,7 @@
  * in userspace is always better than an Oops anyway.) [KD]
  */
 static void do_int(struct kernel_vm86_regs *regs, int i,
-    unsigned char __user * ssp, unsigned short sp)
+    unsigned char __user *ssp, unsigned short sp)
 {
 	unsigned long __user *intr_ptr;
 	unsigned long segoffs;
@@ -521,7 +525,7 @@
 		goto cannot_handle;
 	if (is_revectored(i, &KVM86->int_revectored))
 		goto cannot_handle;
-	if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored))
+	if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
 		goto cannot_handle;
 	intr_ptr = (unsigned long __user *) (i << 2);
 	if (get_user(segoffs, intr_ptr))
@@ -543,30 +547,23 @@
 	return_to_32bit(regs, VM86_INTx + (i << 8));
 }
 
-int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno)
+int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
 {
 	if (VMPI.is_vm86pus) {
-		if ( (trapno==3) || (trapno==1) )
+		if ((trapno == 3) || (trapno == 1))
 			return_to_32bit(regs, VM86_TRAP + (trapno << 8));
 		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
 		return 0;
 	}
-	if (trapno !=1)
+	if (trapno != 1)
 		return 1; /* we let this handle by the calling routine */
-	if (current->ptrace & PT_PTRACED) {
-		unsigned long flags;
-		spin_lock_irqsave(&current->sighand->siglock, flags);
-		sigdelset(&current->blocked, SIGTRAP);
-		recalc_sigpending();
-		spin_unlock_irqrestore(&current->sighand->siglock, flags);
-	}
-	send_sig(SIGTRAP, current, 1);
 	current->thread.trap_no = trapno;
 	current->thread.error_code = error_code;
+	force_sig(SIGTRAP, current);
 	return 0;
 }
 
-void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
+void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
 {
 	unsigned char opcode;
 	unsigned char __user *csp;
@@ -576,11 +573,11 @@
 
 #define CHECK_IF_IN_TRAP \
 	if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
-		newflags |= TF_MASK
+		newflags |= X86_EFLAGS_TF
 #define VM86_FAULT_RETURN do { \
-	if (VMPI.force_return_for_pic  && (VEFLAGS & (IF_MASK | VIF_MASK))) \
+	if (VMPI.force_return_for_pic  && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
 		return_to_32bit(regs, VM86_PICRETURN); \
-	if (orig_flags & TF_MASK) \
+	if (orig_flags & X86_EFLAGS_TF) \
 		handle_vm86_trap(regs, 0, 1); \
 	return; } while (0)
 
@@ -595,17 +592,17 @@
 	pref_done = 0;
 	do {
 		switch (opcode = popb(csp, ip, simulate_sigsegv)) {
-			case 0x66:      /* 32-bit data */     data32=1; break;
-			case 0x67:      /* 32-bit address */  break;
-			case 0x2e:      /* CS */              break;
-			case 0x3e:      /* DS */              break;
-			case 0x26:      /* ES */              break;
-			case 0x36:      /* SS */              break;
-			case 0x65:      /* GS */              break;
-			case 0x64:      /* FS */              break;
-			case 0xf2:      /* repnz */       break;
-			case 0xf3:      /* rep */             break;
-			default: pref_done = 1;
+		case 0x66:      /* 32-bit data */     data32 = 1; break;
+		case 0x67:      /* 32-bit address */  break;
+		case 0x2e:      /* CS */              break;
+		case 0x3e:      /* DS */              break;
+		case 0x26:      /* ES */              break;
+		case 0x36:      /* SS */              break;
+		case 0x65:      /* GS */              break;
+		case 0x64:      /* FS */              break;
+		case 0xf2:      /* repnz */       break;
+		case 0xf3:      /* rep */             break;
+		default: pref_done = 1;
 		}
 	} while (!pref_done);
 
@@ -628,7 +625,7 @@
 		{
 		unsigned long newflags;
 		if (data32) {
-			newflags=popl(ssp, sp, simulate_sigsegv);
+			newflags = popl(ssp, sp, simulate_sigsegv);
 			SP(regs) += 4;
 		} else {
 			newflags = popw(ssp, sp, simulate_sigsegv);
@@ -636,20 +633,20 @@
 		}
 		IP(regs) = ip;
 		CHECK_IF_IN_TRAP;
-		if (data32) {
+		if (data32)
 			set_vflags_long(newflags, regs);
-		} else {
+		else
 			set_vflags_short(newflags, regs);
-		}
+
 		VM86_FAULT_RETURN;
 		}
 
 	/* int xx */
 	case 0xcd: {
-		int intno=popb(csp, ip, simulate_sigsegv);
+		int intno = popb(csp, ip, simulate_sigsegv);
 		IP(regs) = ip;
 		if (VMPI.vm86dbg_active) {
-			if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] )
+			if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
 				return_to_32bit(regs, VM86_INTx + (intno << 8));
 		}
 		do_int(regs, intno, ssp, sp);
@@ -663,9 +660,9 @@
 		unsigned long newcs;
 		unsigned long newflags;
 		if (data32) {
-			newip=popl(ssp, sp, simulate_sigsegv);
-			newcs=popl(ssp, sp, simulate_sigsegv);
-			newflags=popl(ssp, sp, simulate_sigsegv);
+			newip = popl(ssp, sp, simulate_sigsegv);
+			newcs = popl(ssp, sp, simulate_sigsegv);
+			newflags = popl(ssp, sp, simulate_sigsegv);
 			SP(regs) += 12;
 		} else {
 			newip = popw(ssp, sp, simulate_sigsegv);
@@ -734,18 +731,18 @@
 static DEFINE_SPINLOCK(irqbits_lock);
 static int irqbits;
 
-#define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \
+#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
 	| (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO)  | (1 << SIGURG) \
-	| (1 << SIGUNUSED) )
-	
+	| (1 << SIGUNUSED))
+
 static irqreturn_t irq_handler(int intno, void *dev_id)
 {
 	int irq_bit;
 	unsigned long flags;
 
-	spin_lock_irqsave(&irqbits_lock, flags);	
+	spin_lock_irqsave(&irqbits_lock, flags);
 	irq_bit = 1 << intno;
-	if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk)
+	if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
 		goto out;
 	irqbits |= irq_bit;
 	if (vm86_irqs[intno].sig)
@@ -759,7 +756,7 @@
 	return IRQ_HANDLED;
 
 out:
-	spin_unlock_irqrestore(&irqbits_lock, flags);	
+	spin_unlock_irqrestore(&irqbits_lock, flags);
 	return IRQ_NONE;
 }
 
@@ -770,9 +767,9 @@
 	free_irq(irqnumber, NULL);
 	vm86_irqs[irqnumber].tsk = NULL;
 
-	spin_lock_irqsave(&irqbits_lock, flags);	
+	spin_lock_irqsave(&irqbits_lock, flags);
 	irqbits &= ~(1 << irqnumber);
-	spin_unlock_irqrestore(&irqbits_lock, flags);	
+	spin_unlock_irqrestore(&irqbits_lock, flags);
 }
 
 void release_vm86_irqs(struct task_struct *task)
@@ -788,10 +785,10 @@
 	int bit;
 	unsigned long flags;
 	int ret = 0;
-	
+
 	if (invalid_vm86_irq(irqnumber)) return 0;
 	if (vm86_irqs[irqnumber].tsk != current) return 0;
-	spin_lock_irqsave(&irqbits_lock, flags);	
+	spin_lock_irqsave(&irqbits_lock, flags);
 	bit = irqbits & (1 << irqnumber);
 	irqbits &= ~bit;
 	if (bit) {
@@ -799,7 +796,7 @@
 		ret = 1;
 	}
 
-	spin_unlock_irqrestore(&irqbits_lock, flags);	
+	spin_unlock_irqrestore(&irqbits_lock, flags);
 	return ret;
 }
 
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index 2ffa965..ce5ed08 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -149,6 +149,11 @@
 	*(.con_initcall.init)
   	__con_initcall_end = .;
   }
+  .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
+	__x86cpuvendor_start = .;
+	*(.x86cpuvendor.init)
+	__x86cpuvendor_end = .;
+  }
   SECURITY_INIT
   . = ALIGN(4);
   .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index fab1322..b7ab3c3 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -177,6 +177,11 @@
 	*(.con_initcall.init)
   }
   __con_initcall_end = .;
+  __x86cpuvendor_start = .;
+  .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
+	*(.x86cpuvendor.init)
+  }
+  __x86cpuvendor_end = .;
   SECURITY_INIT
 
   . = ALIGN(8);
@@ -247,3 +252,9 @@
 
   DWARF_DEBUG
 }
+
+/*
+ * Build-time check on the image size:
+ */
+ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
+	"kernel image bigger than KERNEL_IMAGE_SIZE")
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index d971210..caf2a26 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -8,6 +8,8 @@
  *
  * Ravikiran Thirumalai <kiran@scalemp.com>,
  * Shai Fultheim <shai@scalemp.com>
+ * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
+ *			     Ravikiran Thirumalai <kiran@scalemp.com>
  */
 
 #include <linux/init.h>
@@ -15,38 +17,137 @@
 #include <linux/pci_regs.h>
 #include <asm/pci-direct.h>
 #include <asm/io.h>
+#include <asm/paravirt.h>
 
-static int __init vsmp_init(void)
+#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
+/*
+ * Interrupt control on vSMPowered systems:
+ * ~AC is a shadow of IF.  If IF is 'on' AC should be 'off'
+ * and vice versa.
+ */
+
+static unsigned long vsmp_save_fl(void)
+{
+	unsigned long flags = native_save_fl();
+
+	if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
+		flags &= ~X86_EFLAGS_IF;
+	return flags;
+}
+
+static void vsmp_restore_fl(unsigned long flags)
+{
+	if (flags & X86_EFLAGS_IF)
+		flags &= ~X86_EFLAGS_AC;
+	else
+		flags |= X86_EFLAGS_AC;
+	native_restore_fl(flags);
+}
+
+static void vsmp_irq_disable(void)
+{
+	unsigned long flags = native_save_fl();
+
+	native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
+}
+
+static void vsmp_irq_enable(void)
+{
+	unsigned long flags = native_save_fl();
+
+	native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
+}
+
+static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
+				  unsigned long addr, unsigned len)
+{
+	switch (type) {
+	case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
+	case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
+	case PARAVIRT_PATCH(pv_irq_ops.save_fl):
+	case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
+		return paravirt_patch_default(type, clobbers, ibuf, addr, len);
+	default:
+		return native_patch(type, clobbers, ibuf, addr, len);
+	}
+
+}
+
+static void __init set_vsmp_pv_ops(void)
 {
 	void *address;
-	unsigned int cap, ctl;
-
-	if (!early_pci_allowed())
-		return 0;
-
-	/* Check if we are running on a ScaleMP vSMP box */
-	if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) !=
-	     PCI_VENDOR_ID_SCALEMP) ||
-	    (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) !=
-	     PCI_DEVICE_ID_SCALEMP_VSMP_CTL))
-		return 0;
+	unsigned int cap, ctl, cfg;
 
 	/* set vSMP magic bits to indicate vSMP capable kernel */
-	address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8);
+	cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
+	address = early_ioremap(cfg, 8);
 	cap = readl(address);
 	ctl = readl(address + 4);
 	printk(KERN_INFO "vSMP CTL: capabilities:0x%08x  control:0x%08x\n",
 	       cap, ctl);
 	if (cap & ctl & (1 << 4)) {
-		/* Turn on vSMP IRQ fastpath handling (see system.h) */
+		/* Setup irq ops and turn on vSMP  IRQ fastpath handling */
+		pv_irq_ops.irq_disable = vsmp_irq_disable;
+		pv_irq_ops.irq_enable  = vsmp_irq_enable;
+		pv_irq_ops.save_fl  = vsmp_save_fl;
+		pv_irq_ops.restore_fl  = vsmp_restore_fl;
+		pv_init_ops.patch = vsmp_patch;
+
 		ctl &= ~(1 << 4);
 		writel(ctl, address + 4);
 		ctl = readl(address + 4);
 		printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl);
 	}
 
-	iounmap(address);
-	return 0;
+	early_iounmap(address, 8);
+}
+#else
+static void __init set_vsmp_pv_ops(void)
+{
+}
+#endif
+
+#ifdef CONFIG_PCI
+static int is_vsmp = -1;
+
+static void __init detect_vsmp_box(void)
+{
+	is_vsmp = 0;
+
+	if (!early_pci_allowed())
+		return;
+
+	/* Check if we are running on a ScaleMP vSMPowered box */
+	if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) ==
+	     (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16)))
+		is_vsmp = 1;
 }
 
-core_initcall(vsmp_init);
+int is_vsmp_box(void)
+{
+	if (is_vsmp != -1)
+		return is_vsmp;
+	else {
+		WARN_ON_ONCE(1);
+		return 0;
+	}
+}
+#else
+static int __init detect_vsmp_box(void)
+{
+}
+int is_vsmp_box(void)
+{
+	return 0;
+}
+#endif
+
+void __init vsmp_init(void)
+{
+	detect_vsmp_box();
+	if (!is_vsmp_box())
+		return;
+
+	set_vsmp_pv_ops();
+	return;
+}
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index a66e9c1..58882f9 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -4,7 +4,6 @@
 #include <linux/module.h>
 #include <linux/smp.h>
 
-#include <asm/semaphore.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -12,11 +11,6 @@
 
 EXPORT_SYMBOL(kernel_thread);
 
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_failed_interruptible);
-EXPORT_SYMBOL(__down_failed_trylock);
-EXPORT_SYMBOL(__up_wakeup);
-
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
 EXPORT_SYMBOL(__get_user_4);
@@ -35,15 +29,17 @@
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);
 
-/* Export string functions. We normally rely on gcc builtin for most of these,
-   but gcc sometimes decides not to inline them. */    
+/*
+ * Export string functions. We normally rely on gcc builtin for most of these,
+ * but gcc sometimes decides not to inline them.
+ */
 #undef memcpy
 #undef memset
 #undef memmove
 
-extern void * memset(void *,int,__kernel_size_t);
-extern void * memcpy(void *,const void *,__kernel_size_t);
-extern void * __memcpy(void *,const void *,__kernel_size_t);
+extern void *memset(void *, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *, const void *, __kernel_size_t);
 
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 3335b45..af65b2d 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -661,7 +661,7 @@
 	if (delta < LG_CLOCK_MIN_DELTA) {
 		if (printk_ratelimit())
 			printk(KERN_DEBUG "%s: small delta %lu ns\n",
-			       __FUNCTION__, delta);
+			       __func__, delta);
 		return -ETIME;
 	}
 
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index 37756b6..5415a9d 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -25,7 +25,7 @@
 	int d0, d1, d2;
 
 	if (dest < src) {
-		memcpy(dest,src,n);
+		memcpy(dest, src, n);
 	} else {
 		__asm__ __volatile__(
 			"std\n\t"
diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c
index 80175e4..0a33909 100644
--- a/arch/x86/lib/memmove_64.c
+++ b/arch/x86/lib/memmove_64.c
@@ -6,10 +6,10 @@
 #include <linux/module.h>
 
 #undef memmove
-void *memmove(void * dest,const void *src,size_t count)
+void *memmove(void *dest, const void *src, size_t count)
 {
-	if (dest < src) { 
-		return memcpy(dest,src,count);
+	if (dest < src) {
+		return memcpy(dest, src, count);
 	} else {
 		char *p = dest + count;
 		const char *s = src + count;
@@ -17,5 +17,5 @@
 			*--p = *--s;
 	}
 	return dest;
-} 
+}
 EXPORT_SYMBOL(memmove);
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
index cc9b4a4..c9f2d9b 100644
--- a/arch/x86/lib/mmx_32.c
+++ b/arch/x86/lib/mmx_32.c
@@ -1,32 +1,30 @@
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/hardirq.h>
-#include <linux/module.h>
-
-#include <asm/asm.h>
-#include <asm/i387.h>
-
-
 /*
  *	MMX 3DNow! library helper functions
  *
  *	To do:
- *	We can use MMX just for prefetch in IRQ's. This may be a win. 
+ *	We can use MMX just for prefetch in IRQ's. This may be a win.
  *		(reported so on K6-III)
  *	We should use a better code neutral filler for the short jump
  *		leal ebx. [ebx] is apparently best for K6-2, but Cyrix ??
  *	We also want to clobber the filler register so we don't get any
- *		register forwarding stalls on the filler. 
+ *		register forwarding stalls on the filler.
  *
  *	Add *user handling. Checksums are not a win with MMX on any CPU
  *	tested so far for any MMX solution figured.
  *
- *	22/09/2000 - Arjan van de Ven 
- *		Improved for non-egineering-sample Athlons 
+ *	22/09/2000 - Arjan van de Ven
+ *		Improved for non-egineering-sample Athlons
  *
  */
- 
+#include <linux/hardirq.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+
+#include <asm/i387.h>
+#include <asm/asm.h>
+
 void *_mmx_memcpy(void *to, const void *from, size_t len)
 {
 	void *p;
@@ -51,12 +49,10 @@
 		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
 		"   jmp 2b\n"
 		".previous\n"
-		_ASM_EXTABLE(1b,3b)
-		: : "r" (from) );
-		
-	
-	for(; i>5; i--)
-	{
+			_ASM_EXTABLE(1b, 3b)
+			: : "r" (from));
+
+	for ( ; i > 5; i--) {
 		__asm__ __volatile__ (
 		"1:  prefetch 320(%0)\n"
 		"2:  movq (%0), %%mm0\n"
@@ -79,14 +75,14 @@
 		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
 		"   jmp 2b\n"
 		".previous\n"
-		_ASM_EXTABLE(1b,3b)
-		: : "r" (from), "r" (to) : "memory");
-		from+=64;
-		to+=64;
+			_ASM_EXTABLE(1b, 3b)
+			: : "r" (from), "r" (to) : "memory");
+
+		from += 64;
+		to += 64;
 	}
 
-	for(; i>0; i--)
-	{
+	for ( ; i > 0; i--) {
 		__asm__ __volatile__ (
 		"  movq (%0), %%mm0\n"
 		"  movq 8(%0), %%mm1\n"
@@ -104,17 +100,20 @@
 		"  movq %%mm1, 40(%1)\n"
 		"  movq %%mm2, 48(%1)\n"
 		"  movq %%mm3, 56(%1)\n"
-		: : "r" (from), "r" (to) : "memory");
-		from+=64;
-		to+=64;
+			: : "r" (from), "r" (to) : "memory");
+
+		from += 64;
+		to += 64;
 	}
 	/*
-	 *	Now do the tail of the block
+	 * Now do the tail of the block:
 	 */
-	__memcpy(to, from, len&63);
+	__memcpy(to, from, len & 63);
 	kernel_fpu_end();
+
 	return p;
 }
+EXPORT_SYMBOL(_mmx_memcpy);
 
 #ifdef CONFIG_MK7
 
@@ -128,13 +127,12 @@
 	int i;
 
 	kernel_fpu_begin();
-	
+
 	__asm__ __volatile__ (
 		"  pxor %%mm0, %%mm0\n" : :
 	);
 
-	for(i=0;i<4096/64;i++)
-	{
+	for (i = 0; i < 4096/64; i++) {
 		__asm__ __volatile__ (
 		"  movntq %%mm0, (%0)\n"
 		"  movntq %%mm0, 8(%0)\n"
@@ -145,14 +143,15 @@
 		"  movntq %%mm0, 48(%0)\n"
 		"  movntq %%mm0, 56(%0)\n"
 		: : "r" (page) : "memory");
-		page+=64;
+		page += 64;
 	}
-	/* since movntq is weakly-ordered, a "sfence" is needed to become
-	 * ordered again.
+
+	/*
+	 * Since movntq is weakly-ordered, a "sfence" is needed to become
+	 * ordered again:
 	 */
-	__asm__ __volatile__ (
-		"  sfence \n" : :
-	);
+	__asm__ __volatile__("sfence\n"::);
+
 	kernel_fpu_end();
 }
 
@@ -162,10 +161,11 @@
 
 	kernel_fpu_begin();
 
-	/* maybe the prefetch stuff can go before the expensive fnsave...
+	/*
+	 * maybe the prefetch stuff can go before the expensive fnsave...
 	 * but that is for later. -AV
 	 */
-	__asm__ __volatile__ (
+	__asm__ __volatile__(
 		"1: prefetch (%0)\n"
 		"   prefetch 64(%0)\n"
 		"   prefetch 128(%0)\n"
@@ -176,11 +176,9 @@
 		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
 		"   jmp 2b\n"
 		".previous\n"
-		_ASM_EXTABLE(1b,3b)
-		: : "r" (from) );
+			_ASM_EXTABLE(1b, 3b) : : "r" (from));
 
-	for(i=0; i<(4096-320)/64; i++)
-	{
+	for (i = 0; i < (4096-320)/64; i++) {
 		__asm__ __volatile__ (
 		"1: prefetch 320(%0)\n"
 		"2: movq (%0), %%mm0\n"
@@ -203,13 +201,13 @@
 		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
 		"   jmp 2b\n"
 		".previous\n"
-		_ASM_EXTABLE(1b,3b)
-		: : "r" (from), "r" (to) : "memory");
-		from+=64;
-		to+=64;
+		_ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
+
+		from += 64;
+		to += 64;
 	}
-	for(i=(4096-320)/64; i<4096/64; i++)
-	{
+
+	for (i = (4096-320)/64; i < 4096/64; i++) {
 		__asm__ __volatile__ (
 		"2: movq (%0), %%mm0\n"
 		"   movntq %%mm0, (%1)\n"
@@ -227,37 +225,34 @@
 		"   movntq %%mm6, 48(%1)\n"
 		"   movq 56(%0), %%mm7\n"
 		"   movntq %%mm7, 56(%1)\n"
-		: : "r" (from), "r" (to) : "memory");
-		from+=64;
-		to+=64;
+			: : "r" (from), "r" (to) : "memory");
+		from += 64;
+		to += 64;
 	}
-	/* since movntq is weakly-ordered, a "sfence" is needed to become
-	 * ordered again.
+	/*
+	 * Since movntq is weakly-ordered, a "sfence" is needed to become
+	 * ordered again:
 	 */
-	__asm__ __volatile__ (
-		"  sfence \n" : :
-	);
+	__asm__ __volatile__("sfence \n"::);
 	kernel_fpu_end();
 }
 
-#else
+#else /* CONFIG_MK7 */
 
 /*
  *	Generic MMX implementation without K7 specific streaming
  */
- 
 static void fast_clear_page(void *page)
 {
 	int i;
-	
+
 	kernel_fpu_begin();
-	
+
 	__asm__ __volatile__ (
 		"  pxor %%mm0, %%mm0\n" : :
 	);
 
-	for(i=0;i<4096/128;i++)
-	{
+	for (i = 0; i < 4096/128; i++) {
 		__asm__ __volatile__ (
 		"  movq %%mm0, (%0)\n"
 		"  movq %%mm0, 8(%0)\n"
@@ -275,8 +270,8 @@
 		"  movq %%mm0, 104(%0)\n"
 		"  movq %%mm0, 112(%0)\n"
 		"  movq %%mm0, 120(%0)\n"
-		: : "r" (page) : "memory");
-		page+=128;
+			: : "r" (page) : "memory");
+		page += 128;
 	}
 
 	kernel_fpu_end();
@@ -285,8 +280,7 @@
 static void fast_copy_page(void *to, void *from)
 {
 	int i;
-	
-	
+
 	kernel_fpu_begin();
 
 	__asm__ __volatile__ (
@@ -300,11 +294,9 @@
 		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
 		"   jmp 2b\n"
 		".previous\n"
-		_ASM_EXTABLE(1b,3b)
-		: : "r" (from) );
+			_ASM_EXTABLE(1b, 3b) : : "r" (from));
 
-	for(i=0; i<4096/64; i++)
-	{
+	for (i = 0; i < 4096/64; i++) {
 		__asm__ __volatile__ (
 		"1: prefetch 320(%0)\n"
 		"2: movq (%0), %%mm0\n"
@@ -327,60 +319,59 @@
 		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
 		"   jmp 2b\n"
 		".previous\n"
-		_ASM_EXTABLE(1b,3b)
-		: : "r" (from), "r" (to) : "memory");
-		from+=64;
-		to+=64;
+			_ASM_EXTABLE(1b, 3b)
+			: : "r" (from), "r" (to) : "memory");
+
+		from += 64;
+		to += 64;
 	}
 	kernel_fpu_end();
 }
 
-
-#endif
+#endif /* !CONFIG_MK7 */
 
 /*
- *	Favour MMX for page clear and copy. 
+ * Favour MMX for page clear and copy:
  */
-
-static void slow_zero_page(void * page)
+static void slow_zero_page(void *page)
 {
 	int d0, d1;
-	__asm__ __volatile__( \
-		"cld\n\t" \
-		"rep ; stosl" \
-		: "=&c" (d0), "=&D" (d1)
-		:"a" (0),"1" (page),"0" (1024)
-		:"memory");
+
+	__asm__ __volatile__(
+		"cld\n\t"
+		"rep ; stosl"
+
+			: "=&c" (d0), "=&D" (d1)
+			:"a" (0), "1" (page), "0" (1024)
+			:"memory");
 }
- 
-void mmx_clear_page(void * page)
+
+void mmx_clear_page(void *page)
 {
-	if(unlikely(in_interrupt()))
+	if (unlikely(in_interrupt()))
 		slow_zero_page(page);
 	else
 		fast_clear_page(page);
 }
+EXPORT_SYMBOL(mmx_clear_page);
 
 static void slow_copy_page(void *to, void *from)
 {
 	int d0, d1, d2;
-	__asm__ __volatile__( \
-		"cld\n\t" \
-		"rep ; movsl" \
-		: "=&c" (d0), "=&D" (d1), "=&S" (d2) \
-		: "0" (1024),"1" ((long) to),"2" ((long) from) \
+
+	__asm__ __volatile__(
+		"cld\n\t"
+		"rep ; movsl"
+		: "=&c" (d0), "=&D" (d1), "=&S" (d2)
+		: "0" (1024), "1" ((long) to), "2" ((long) from)
 		: "memory");
 }
-  
 
 void mmx_copy_page(void *to, void *from)
 {
-	if(unlikely(in_interrupt()))
+	if (unlikely(in_interrupt()))
 		slow_copy_page(to, from);
 	else
 		fast_copy_page(to, from);
 }
-
-EXPORT_SYMBOL(_mmx_memcpy);
-EXPORT_SYMBOL(mmx_clear_page);
 EXPORT_SYMBOL(mmx_copy_page);
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S
index 3899bd3..648fe47 100644
--- a/arch/x86/lib/semaphore_32.S
+++ b/arch/x86/lib/semaphore_32.S
@@ -30,89 +30,6 @@
  * value or just clobbered..
  */
 	.section .sched.text, "ax"
-ENTRY(__down_failed)
-	CFI_STARTPROC
-	FRAME
-	pushl %edx
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET edx,0
-	pushl %ecx
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET ecx,0
-	call __down
-	popl %ecx
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE ecx
-	popl %edx
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE edx
-	ENDFRAME
-	ret
-	CFI_ENDPROC
-	ENDPROC(__down_failed)
-
-ENTRY(__down_failed_interruptible)
-	CFI_STARTPROC
-	FRAME
-	pushl %edx
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET edx,0
-	pushl %ecx
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET ecx,0
-	call __down_interruptible
-	popl %ecx
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE ecx
-	popl %edx
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE edx
-	ENDFRAME
-	ret
-	CFI_ENDPROC
-	ENDPROC(__down_failed_interruptible)
-
-ENTRY(__down_failed_trylock)
-	CFI_STARTPROC
-	FRAME
-	pushl %edx
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET edx,0
-	pushl %ecx
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET ecx,0
-	call __down_trylock
-	popl %ecx
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE ecx
-	popl %edx
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE edx
-	ENDFRAME
-	ret
-	CFI_ENDPROC
-	ENDPROC(__down_failed_trylock)
-
-ENTRY(__up_wakeup)
-	CFI_STARTPROC
-	FRAME
-	pushl %edx
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET edx,0
-	pushl %ecx
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET ecx,0
-	call __up
-	popl %ecx
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE ecx
-	popl %edx
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE edx
-	ENDFRAME
-	ret
-	CFI_ENDPROC
-	ENDPROC(__up_wakeup)
 
 /*
  * rw spinlock fallbacks
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index c2c0504..94972e7 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -14,25 +14,25 @@
 #include <linux/module.h>
 
 #ifdef __HAVE_ARCH_STRCPY
-char *strcpy(char * dest,const char *src)
+char *strcpy(char *dest, const char *src)
 {
 	int d0, d1, d2;
-	asm volatile( "1:\tlodsb\n\t"
+	asm volatile("1:\tlodsb\n\t"
 		"stosb\n\t"
 		"testb %%al,%%al\n\t"
 		"jne 1b"
 		: "=&S" (d0), "=&D" (d1), "=&a" (d2)
-		:"0" (src),"1" (dest) : "memory");
+		:"0" (src), "1" (dest) : "memory");
 	return dest;
 }
 EXPORT_SYMBOL(strcpy);
 #endif
 
 #ifdef __HAVE_ARCH_STRNCPY
-char *strncpy(char * dest,const char *src,size_t count)
+char *strncpy(char *dest, const char *src, size_t count)
 {
 	int d0, d1, d2, d3;
-	asm volatile( "1:\tdecl %2\n\t"
+	asm volatile("1:\tdecl %2\n\t"
 		"js 2f\n\t"
 		"lodsb\n\t"
 		"stosb\n\t"
@@ -42,17 +42,17 @@
 		"stosb\n"
 		"2:"
 		: "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
-		:"0" (src),"1" (dest),"2" (count) : "memory");
+		:"0" (src), "1" (dest), "2" (count) : "memory");
 	return dest;
 }
 EXPORT_SYMBOL(strncpy);
 #endif
 
 #ifdef __HAVE_ARCH_STRCAT
-char *strcat(char * dest,const char * src)
+char *strcat(char *dest, const char *src)
 {
 	int d0, d1, d2, d3;
-	asm volatile( "repne\n\t"
+	asm volatile("repne\n\t"
 		"scasb\n\t"
 		"decl %1\n"
 		"1:\tlodsb\n\t"
@@ -67,10 +67,10 @@
 #endif
 
 #ifdef __HAVE_ARCH_STRNCAT
-char *strncat(char * dest,const char * src,size_t count)
+char *strncat(char *dest, const char *src, size_t count)
 {
 	int d0, d1, d2, d3;
-	asm volatile( "repne\n\t"
+	asm volatile("repne\n\t"
 		"scasb\n\t"
 		"decl %1\n\t"
 		"movl %8,%3\n"
@@ -83,7 +83,7 @@
 		"2:\txorl %2,%2\n\t"
 		"stosb"
 		: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
-		: "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count)
+		: "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu), "g" (count)
 		: "memory");
 	return dest;
 }
@@ -91,11 +91,11 @@
 #endif
 
 #ifdef __HAVE_ARCH_STRCMP
-int strcmp(const char * cs,const char * ct)
+int strcmp(const char *cs, const char *ct)
 {
 	int d0, d1;
 	int res;
-	asm volatile( "1:\tlodsb\n\t"
+	asm volatile("1:\tlodsb\n\t"
 		"scasb\n\t"
 		"jne 2f\n\t"
 		"testb %%al,%%al\n\t"
@@ -106,7 +106,7 @@
 		"orb $1,%%al\n"
 		"3:"
 		:"=a" (res), "=&S" (d0), "=&D" (d1)
-		:"1" (cs),"2" (ct)
+		:"1" (cs), "2" (ct)
 		:"memory");
 	return res;
 }
@@ -114,11 +114,11 @@
 #endif
 
 #ifdef __HAVE_ARCH_STRNCMP
-int strncmp(const char * cs,const char * ct,size_t count)
+int strncmp(const char *cs, const char *ct, size_t count)
 {
 	int res;
 	int d0, d1, d2;
-	asm volatile( "1:\tdecl %3\n\t"
+	asm volatile("1:\tdecl %3\n\t"
 		"js 2f\n\t"
 		"lodsb\n\t"
 		"scasb\n\t"
@@ -131,7 +131,7 @@
 		"orb $1,%%al\n"
 		"4:"
 		:"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
-		:"1" (cs),"2" (ct),"3" (count)
+		:"1" (cs), "2" (ct), "3" (count)
 		:"memory");
 	return res;
 }
@@ -139,11 +139,11 @@
 #endif
 
 #ifdef __HAVE_ARCH_STRCHR
-char *strchr(const char * s, int c)
+char *strchr(const char *s, int c)
 {
 	int d0;
-	char * res;
-	asm volatile( "movb %%al,%%ah\n"
+	char *res;
+	asm volatile("movb %%al,%%ah\n"
 		"1:\tlodsb\n\t"
 		"cmpb %%ah,%%al\n\t"
 		"je 2f\n\t"
@@ -153,7 +153,7 @@
 		"2:\tmovl %1,%0\n\t"
 		"decl %0"
 		:"=a" (res), "=&S" (d0)
-		:"1" (s),"0" (c)
+		:"1" (s), "0" (c)
 		:"memory");
 	return res;
 }
@@ -161,16 +161,16 @@
 #endif
 
 #ifdef __HAVE_ARCH_STRLEN
-size_t strlen(const char * s)
+size_t strlen(const char *s)
 {
 	int d0;
 	int res;
-	asm volatile( "repne\n\t"
+	asm volatile("repne\n\t"
 		"scasb\n\t"
 		"notl %0\n\t"
 		"decl %0"
 		:"=c" (res), "=&D" (d0)
-		:"1" (s),"a" (0), "0" (0xffffffffu)
+		:"1" (s), "a" (0), "0" (0xffffffffu)
 		:"memory");
 	return res;
 }
@@ -178,19 +178,19 @@
 #endif
 
 #ifdef __HAVE_ARCH_MEMCHR
-void *memchr(const void *cs,int c,size_t count)
+void *memchr(const void *cs, int c, size_t count)
 {
 	int d0;
 	void *res;
 	if (!count)
 		return NULL;
-	asm volatile( "repne\n\t"
+	asm volatile("repne\n\t"
 		"scasb\n\t"
 		"je 1f\n\t"
 		"movl $1,%0\n"
 		"1:\tdecl %0"
 		:"=D" (res), "=&c" (d0)
-		:"a" (c),"0" (cs),"1" (count)
+		:"a" (c), "0" (cs), "1" (count)
 		:"memory");
 	return res;
 }
@@ -198,7 +198,7 @@
 #endif
 
 #ifdef __HAVE_ARCH_MEMSCAN
-void *memscan(void * addr, int c, size_t size)
+void *memscan(void *addr, int c, size_t size)
 {
 	if (!size)
 		return addr;
@@ -219,7 +219,7 @@
 {
 	int d0;
 	int res;
-	asm volatile( "movl %2,%0\n\t"
+	asm volatile("movl %2,%0\n\t"
 		"jmp 2f\n"
 		"1:\tcmpb $0,(%0)\n\t"
 		"je 3f\n\t"
@@ -229,7 +229,7 @@
 		"jne 1b\n"
 		"3:\tsubl %2,%0"
 		:"=a" (res), "=&d" (d0)
-		:"c" (s),"1" (count)
+		:"c" (s), "1" (count)
 		:"memory");
 	return res;
 }
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index a3dafbf..42e8a50 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -1,9 +1,9 @@
 #include <linux/string.h>
 
-char * strstr(const char * cs,const char * ct)
+char *strstr(const char *cs, const char *ct)
 {
 int	d0, d1;
-register char * __res;
+register char *__res;
 __asm__ __volatile__(
 	"movl %6,%%edi\n\t"
 	"repne\n\t"
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index 8b92d42..e009251 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -41,11 +41,6 @@
 	thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
 #endif	
 	
-	thunk __down_failed,__down
-	thunk_retrax __down_failed_interruptible,__down_interruptible
-	thunk_retrax __down_failed_trylock,__down_trylock
-	thunk __up_wakeup,__up
-
 #ifdef CONFIG_TRACE_IRQFLAGS
 	thunk trace_hardirqs_on_thunk,trace_hardirqs_on
 	thunk trace_hardirqs_off_thunk,trace_hardirqs_off
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index e849b99..24e6094 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -1,4 +1,4 @@
-/* 
+/*
  * User address space access functions.
  * The non inlined parts of asm-i386/uaccess.h are here.
  *
@@ -22,14 +22,14 @@
 #endif
 	return 1;
 }
-#define movsl_is_ok(a1,a2,n) \
-	__movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
+#define movsl_is_ok(a1, a2, n) \
+	__movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
 
 /*
  * Copy a null terminated string from userspace.
  */
 
-#define __do_strncpy_from_user(dst,src,count,res)			   \
+#define __do_strncpy_from_user(dst, src, count, res)			   \
 do {									   \
 	int __d0, __d1, __d2;						   \
 	might_sleep();							   \
@@ -61,7 +61,7 @@
  *         least @count bytes long.
  * @src:   Source address, in user space.
  * @count: Maximum number of bytes to copy, including the trailing NUL.
- * 
+ *
  * Copies a NUL-terminated string from userspace to kernel space.
  * Caller must check the specified block with access_ok() before calling
  * this function.
@@ -90,7 +90,7 @@
  *         least @count bytes long.
  * @src:   Source address, in user space.
  * @count: Maximum number of bytes to copy, including the trailing NUL.
- * 
+ *
  * Copies a NUL-terminated string from userspace to kernel space.
  *
  * On success, returns the length of the string (not including the trailing
@@ -120,7 +120,7 @@
 do {									\
 	int __d0;							\
 	might_sleep();							\
-  	__asm__ __volatile__(						\
+	__asm__ __volatile__(						\
 		"0:	rep; stosl\n"					\
 		"	movl %2,%0\n"					\
 		"1:	rep; stosb\n"					\
@@ -333,17 +333,17 @@
 	__asm__ __volatile__(
 		       "        .align 2,0x90\n"
 		       "0:      movl 32(%4), %%eax\n"
-		       "        cmpl $67, %0\n"      
-		       "        jbe 2f\n"            
+		       "        cmpl $67, %0\n"
+		       "        jbe 2f\n"
 		       "1:      movl 64(%4), %%eax\n"
-		       "        .align 2,0x90\n"     
-		       "2:      movl 0(%4), %%eax\n" 
-		       "21:     movl 4(%4), %%edx\n" 
-		       "        movl %%eax, 0(%3)\n" 
-		       "        movl %%edx, 4(%3)\n" 
-		       "3:      movl 8(%4), %%eax\n" 
-		       "31:     movl 12(%4),%%edx\n" 
-		       "        movl %%eax, 8(%3)\n" 
+		       "        .align 2,0x90\n"
+		       "2:      movl 0(%4), %%eax\n"
+		       "21:     movl 4(%4), %%edx\n"
+		       "        movl %%eax, 0(%3)\n"
+		       "        movl %%edx, 4(%3)\n"
+		       "3:      movl 8(%4), %%eax\n"
+		       "31:     movl 12(%4),%%edx\n"
+		       "        movl %%eax, 8(%3)\n"
 		       "        movl %%edx, 12(%3)\n"
 		       "4:      movl 16(%4), %%eax\n"
 		       "41:     movl 20(%4), %%edx\n"
@@ -369,38 +369,38 @@
 		       "91:     movl 60(%4), %%edx\n"
 		       "        movl %%eax, 56(%3)\n"
 		       "        movl %%edx, 60(%3)\n"
-		       "        addl $-64, %0\n"     
-		       "        addl $64, %4\n"      
-		       "        addl $64, %3\n"      
-		       "        cmpl $63, %0\n"      
-		       "        ja  0b\n"            
-		       "5:      movl  %0, %%eax\n"   
-		       "        shrl  $2, %0\n"      
-		       "        andl $3, %%eax\n"    
-		       "        cld\n"               
-		       "6:      rep; movsl\n"   
+		       "        addl $-64, %0\n"
+		       "        addl $64, %4\n"
+		       "        addl $64, %3\n"
+		       "        cmpl $63, %0\n"
+		       "        ja  0b\n"
+		       "5:      movl  %0, %%eax\n"
+		       "        shrl  $2, %0\n"
+		       "        andl $3, %%eax\n"
+		       "        cld\n"
+		       "6:      rep; movsl\n"
 		       "        movl %%eax,%0\n"
-		       "7:      rep; movsb\n"	
-		       "8:\n"			
+		       "7:      rep; movsb\n"
+		       "8:\n"
 		       ".section .fixup,\"ax\"\n"
-		       "9:      lea 0(%%eax,%0,4),%0\n"	
-		       "16:     pushl %0\n"	
-		       "        pushl %%eax\n"	
+		       "9:      lea 0(%%eax,%0,4),%0\n"
+		       "16:     pushl %0\n"
+		       "        pushl %%eax\n"
 		       "        xorl %%eax,%%eax\n"
-		       "        rep; stosb\n"	
-		       "        popl %%eax\n"	
-		       "        popl %0\n"	
-		       "        jmp 8b\n"	
-		       ".previous\n"		
+		       "        rep; stosb\n"
+		       "        popl %%eax\n"
+		       "        popl %0\n"
+		       "        jmp 8b\n"
+		       ".previous\n"
 		       ".section __ex_table,\"a\"\n"
-		       "	.align 4\n"	   
-		       "	.long 0b,16b\n"	 
+		       "	.align 4\n"
+		       "	.long 0b,16b\n"
 		       "	.long 1b,16b\n"
 		       "	.long 2b,16b\n"
 		       "	.long 21b,16b\n"
-		       "	.long 3b,16b\n"	
+		       "	.long 3b,16b\n"
 		       "	.long 31b,16b\n"
-		       "	.long 4b,16b\n"	
+		       "	.long 4b,16b\n"
 		       "	.long 41b,16b\n"
 		       "	.long 10b,16b\n"
 		       "	.long 51b,16b\n"
@@ -412,9 +412,9 @@
 		       "	.long 81b,16b\n"
 		       "	.long 14b,16b\n"
 		       "	.long 91b,16b\n"
-		       "	.long 6b,9b\n"	
-		       "        .long 7b,16b\n" 
-		       ".previous"		
+		       "	.long 6b,9b\n"
+		       "        .long 7b,16b\n"
+		       ".previous"
 		       : "=&c"(size), "=&D" (d0), "=&S" (d1)
 		       :  "1"(to), "2"(from), "0"(size)
 		       : "eax", "edx", "memory");
@@ -429,7 +429,7 @@
 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
 				const void __user *from, unsigned long size)
 {
-        int d0, d1;
+	int d0, d1;
 
 	__asm__ __volatile__(
 	       "        .align 2,0x90\n"
@@ -526,7 +526,7 @@
 static unsigned long __copy_user_intel_nocache(void *to,
 				const void __user *from, unsigned long size)
 {
-        int d0, d1;
+	int d0, d1;
 
 	__asm__ __volatile__(
 	       "        .align 2,0x90\n"
@@ -629,7 +629,7 @@
 #endif /* CONFIG_X86_INTEL_USERCOPY */
 
 /* Generic arbitrary sized copy.  */
-#define __copy_user(to,from,size)					\
+#define __copy_user(to, from, size)					\
 do {									\
 	int __d0, __d1, __d2;						\
 	__asm__ __volatile__(						\
@@ -665,7 +665,7 @@
 		: "memory");						\
 } while (0)
 
-#define __copy_user_zeroing(to,from,size)				\
+#define __copy_user_zeroing(to, from, size)				\
 do {									\
 	int __d0, __d1, __d2;						\
 	__asm__ __volatile__(						\
@@ -712,7 +712,7 @@
 {
 #ifndef CONFIG_X86_WP_WORKS_OK
 	if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
-			((unsigned long )to) < TASK_SIZE) {
+			((unsigned long)to) < TASK_SIZE) {
 		/*
 		 * When we are in an atomic section (see
 		 * mm/filemap.c:file_read_actor), return the full
@@ -721,26 +721,26 @@
 		if (in_atomic())
 			return n;
 
-		/* 
+		/*
 		 * CPU does not honor the WP bit when writing
 		 * from supervisory mode, and due to preemption or SMP,
 		 * the page tables can change at any time.
 		 * Do it manually.	Manfred <manfred@colorfullife.com>
 		 */
 		while (n) {
-		      	unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
+			unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
 			unsigned long len = PAGE_SIZE - offset;
 			int retval;
 			struct page *pg;
 			void *maddr;
-			
+
 			if (len > n)
 				len = n;
 
 survive:
 			down_read(&current->mm->mmap_sem);
 			retval = get_user_pages(current, current->mm,
-					(unsigned long )to, 1, 1, 0, &pg, NULL);
+					(unsigned long)to, 1, 1, 0, &pg, NULL);
 
 			if (retval == -ENOMEM && is_global_init(current)) {
 				up_read(&current->mm->mmap_sem);
@@ -750,8 +750,8 @@
 
 			if (retval != 1) {
 				up_read(&current->mm->mmap_sem);
-		       		break;
-		       	}
+				break;
+			}
 
 			maddr = kmap_atomic(pg, KM_USER0);
 			memcpy(maddr + offset, from, len);
@@ -802,12 +802,12 @@
 					unsigned long n)
 {
 #ifdef CONFIG_X86_INTEL_USERCOPY
-	if ( n > 64 && cpu_has_xmm2)
-                n = __copy_user_zeroing_intel_nocache(to, from, n);
+	if (n > 64 && cpu_has_xmm2)
+		n = __copy_user_zeroing_intel_nocache(to, from, n);
 	else
 		__copy_user_zeroing(to, from, n);
 #else
-        __copy_user_zeroing(to, from, n);
+	__copy_user_zeroing(to, from, n);
 #endif
 	return n;
 }
@@ -817,12 +817,12 @@
 					unsigned long n)
 {
 #ifdef CONFIG_X86_INTEL_USERCOPY
-	if ( n > 64 && cpu_has_xmm2)
-                n = __copy_user_intel_nocache(to, from, n);
+	if (n > 64 && cpu_has_xmm2)
+		n = __copy_user_intel_nocache(to, from, n);
 	else
 		__copy_user(to, from, n);
 #else
-        __copy_user(to, from, n);
+	__copy_user(to, from, n);
 #endif
 	return n;
 }
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
index 292a225..95fc463 100644
--- a/arch/x86/mach-generic/bigsmp.c
+++ b/arch/x86/mach-generic/bigsmp.c
@@ -1,4 +1,4 @@
-/* 
+/*
  * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
  * Drives the local APIC in "clustered mode".
  */
@@ -32,26 +32,26 @@
 
 
 static const struct dmi_system_id bigsmp_dmi_table[] = {
-	{ hp_ht_bigsmp, "HP ProLiant DL760 G2", {
-		DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
-		DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
-	}},
+	{ hp_ht_bigsmp, "HP ProLiant DL760 G2",
+	{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+	DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
+	},
 
-	{ hp_ht_bigsmp, "HP ProLiant DL740", {
-		DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
-		DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
-	 }},
+	{ hp_ht_bigsmp, "HP ProLiant DL740",
+	{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+	DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
+	},
 	 { }
 };
 
 
 static int probe_bigsmp(void)
-{ 
+{
 	if (def_to_bigsmp)
-        	dmi_bigsmp = 1;
+	dmi_bigsmp = 1;
 	else
 		dmi_check_system(bigsmp_dmi_table);
-	return dmi_bigsmp; 
-} 
+	return dmi_bigsmp;
+}
 
-struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp); 
+struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp);
diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c
index 1af0cc7..9e835a1 100644
--- a/arch/x86/mach-generic/default.c
+++ b/arch/x86/mach-generic/default.c
@@ -1,4 +1,4 @@
-/* 
+/*
  * Default generic APIC driver. This handles up to 8 CPUs.
  */
 #define APIC_DEFINITION 1
@@ -19,8 +19,8 @@
 
 /* should be called last. */
 static int probe_default(void)
-{ 
+{
 	return 1;
-} 
+}
 
-struct genapic apic_default = APIC_INIT("default", probe_default); 
+struct genapic apic_default = APIC_INIT("default", probe_default);
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
index f410d3c..c5ae751 100644
--- a/arch/x86/mach-generic/probe.c
+++ b/arch/x86/mach-generic/probe.c
@@ -1,8 +1,9 @@
-/* Copyright 2003 Andi Kleen, SuSE Labs. 
- * Subject to the GNU Public License, v.2 
- * 
+/*
+ * Copyright 2003 Andi Kleen, SuSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
  * Generic x86 APIC driver probe layer.
- */  
+ */
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/string.h>
@@ -24,7 +25,7 @@
 
 static struct genapic *apic_probe[] __initdata = {
 	&apic_summit,
-	&apic_bigsmp, 
+	&apic_bigsmp,
 	&apic_es7000,
 	&apic_default,	/* must be last */
 	NULL,
@@ -69,7 +70,7 @@
 }
 
 void __init generic_apic_probe(void)
-{ 
+{
 	if (!cmdline_apic) {
 		int i;
 		for (i = 0; apic_probe[i]; i++) {
@@ -83,40 +84,40 @@
 			panic("Didn't find an APIC driver");
 	}
 	printk(KERN_INFO "Using APIC driver %s\n", genapic->name);
-} 
+}
 
 /* These functions can switch the APIC even after the initial ->probe() */
 
 int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid)
-{ 
+{
 	int i;
-	for (i = 0; apic_probe[i]; ++i) { 
-		if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) { 
+	for (i = 0; apic_probe[i]; ++i) {
+		if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) {
 			if (!cmdline_apic) {
 				genapic = apic_probe[i];
 				printk(KERN_INFO "Switched to APIC driver `%s'.\n",
 				       genapic->name);
 			}
 			return 1;
-		} 
-	} 
+		}
+	}
 	return 0;
-} 
+}
 
 int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 {
 	int i;
-	for (i = 0; apic_probe[i]; ++i) { 
-		if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { 
+	for (i = 0; apic_probe[i]; ++i) {
+		if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
 			if (!cmdline_apic) {
 				genapic = apic_probe[i];
 				printk(KERN_INFO "Switched to APIC driver `%s'.\n",
 				       genapic->name);
 			}
 			return 1;
-		} 
-	} 
-	return 0;	
+		}
+	}
+	return 0;
 }
 
 int hard_smp_processor_id(void)
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
index 74883cc..a97ea0f 100644
--- a/arch/x86/mach-generic/summit.c
+++ b/arch/x86/mach-generic/summit.c
@@ -1,4 +1,4 @@
-/* 
+/*
  * APIC driver for the IBM "Summit" chipset.
  */
 #define APIC_DEFINITION 1
@@ -19,9 +19,9 @@
 #include <asm/mach-summit/mach_mpparse.h>
 
 static int probe_summit(void)
-{ 
+{
 	/* probed later in mptable/ACPI hooks */
 	return 0;
-} 
+}
 
-struct genapic apic_summit = APIC_INIT("summit", probe_summit); 
+struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile
index 1faac81..8325b4c 100644
--- a/arch/x86/mach-rdc321x/Makefile
+++ b/arch/x86/mach-rdc321x/Makefile
@@ -1,5 +1,5 @@
 #
 # Makefile for the RDC321x specific parts of the kernel
 #
-obj-$(CONFIG_X86_RDC321X)        := gpio.o platform.o wdt.o
+obj-$(CONFIG_X86_RDC321X)        := gpio.o platform.o
 
diff --git a/arch/x86/mach-rdc321x/wdt.c b/arch/x86/mach-rdc321x/wdt.c
deleted file mode 100644
index ec5625a..0000000
--- a/arch/x86/mach-rdc321x/wdt.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * RDC321x watchdog driver
- *
- * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
- *
- * This driver is highly inspired from the cpu5_wdt driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/timer.h>
-#include <linux/completion.h>
-#include <linux/jiffies.h>
-#include <linux/platform_device.h>
-#include <linux/watchdog.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#include <asm/mach-rdc321x/rdc321x_defs.h>
-
-#define RDC_WDT_MASK	0x80000000 /* Mask */
-#define RDC_WDT_EN	0x00800000 /* Enable bit */
-#define RDC_WDT_WTI	0x00200000 /* Generate CPU reset/NMI/WDT on timeout */
-#define RDC_WDT_RST	0x00100000 /* Reset bit */
-#define RDC_WDT_WIF	0x00040000 /* WDT IRQ Flag */
-#define RDC_WDT_IRT	0x00000100 /* IRQ Routing table */
-#define RDC_WDT_CNT	0x00000001 /* WDT count */
-
-#define RDC_CLS_TMR	0x80003844 /* Clear timer */
-
-#define RDC_WDT_INTERVAL	(HZ/10+1)
-
-int nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, int, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-static int ticks = 1000;
-
-/* some device data */
-
-static struct {
-	struct completion stop;
-	volatile int running;
-	struct timer_list timer;
-	volatile int queue;
-	int default_ticks;
-	unsigned long inuse;
-} rdc321x_wdt_device;
-
-/* generic helper functions */
-
-static void rdc321x_wdt_trigger(unsigned long unused)
-{
-	if (rdc321x_wdt_device.running)
-		ticks--;
-
-	/* keep watchdog alive */
-	outl(RDC_WDT_EN|inl(RDC3210_CFGREG_DATA), RDC3210_CFGREG_DATA);
-
-	/* requeue?? */
-	if (rdc321x_wdt_device.queue && ticks)
-		mod_timer(&rdc321x_wdt_device.timer,
-				jiffies + RDC_WDT_INTERVAL);
-	else {
-		/* ticks doesn't matter anyway */
-		complete(&rdc321x_wdt_device.stop);
-	}
-
-}
-
-static void rdc321x_wdt_reset(void)
-{
-	ticks = rdc321x_wdt_device.default_ticks;
-}
-
-static void rdc321x_wdt_start(void)
-{
-	if (!rdc321x_wdt_device.queue) {
-		rdc321x_wdt_device.queue = 1;
-
-		/* Clear the timer */
-		outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR);
-
-		/* Enable watchdog and set the timeout to 81.92 us */
-		outl(RDC_WDT_EN|RDC_WDT_CNT, RDC3210_CFGREG_DATA);
-
-		mod_timer(&rdc321x_wdt_device.timer,
-				jiffies + RDC_WDT_INTERVAL);
-	}
-
-	/* if process dies, counter is not decremented */
-	rdc321x_wdt_device.running++;
-}
-
-static int rdc321x_wdt_stop(void)
-{
-	if (rdc321x_wdt_device.running)
-		rdc321x_wdt_device.running = 0;
-
-	ticks = rdc321x_wdt_device.default_ticks;
-
-	return -EIO;
-}
-
-/* filesystem operations */
-
-static int rdc321x_wdt_open(struct inode *inode, struct file *file)
-{
-	if (test_and_set_bit(0, &rdc321x_wdt_device.inuse))
-		return -EBUSY;
-
-	return nonseekable_open(inode, file);
-}
-
-static int rdc321x_wdt_release(struct inode *inode, struct file *file)
-{
-	clear_bit(0, &rdc321x_wdt_device.inuse);
-	return 0;
-}
-
-static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file,
-				unsigned int cmd, unsigned long arg)
-{
-	void __user *argp = (void __user *)arg;
-	unsigned int value;
-	static struct watchdog_info ident = {
-		.options = WDIOF_CARDRESET,
-		.identity = "RDC321x WDT",
-	};
-
-	switch (cmd) {
-	case WDIOC_KEEPALIVE:
-		rdc321x_wdt_reset();
-		break;
-	case WDIOC_GETSTATUS:
-		/* Read the value from the DATA register */
-		value = inl(RDC3210_CFGREG_DATA);
-		if (copy_to_user(argp, &value, sizeof(int)))
-			return -EFAULT;
-		break;
-	case WDIOC_GETSUPPORT:
-		if (copy_to_user(argp, &ident, sizeof(ident)))
-			return -EFAULT;
-		break;
-	case WDIOC_SETOPTIONS:
-		if (copy_from_user(&value, argp, sizeof(int)))
-			return -EFAULT;
-		switch (value) {
-		case WDIOS_ENABLECARD:
-			rdc321x_wdt_start();
-			break;
-		case WDIOS_DISABLECARD:
-			return rdc321x_wdt_stop();
-		default:
-			return -EINVAL;
-		}
-		break;
-	default:
-		return -ENOTTY;
-	}
-	return 0;
-}
-
-static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf,
-				size_t count, loff_t *ppos)
-{
-	if (!count)
-		return -EIO;
-
-	rdc321x_wdt_reset();
-
-	return count;
-}
-
-static const struct file_operations rdc321x_wdt_fops = {
-	.owner		= THIS_MODULE,
-	.llseek		= no_llseek,
-	.ioctl		= rdc321x_wdt_ioctl,
-	.open		= rdc321x_wdt_open,
-	.write		= rdc321x_wdt_write,
-	.release	= rdc321x_wdt_release,
-};
-
-static struct miscdevice rdc321x_wdt_misc = {
-	.minor	= WATCHDOG_MINOR,
-	.name	= "watchdog",
-	.fops	= &rdc321x_wdt_fops,
-};
-
-static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
-{
-	int err;
-
-	err = misc_register(&rdc321x_wdt_misc);
-	if (err < 0) {
-		printk(KERN_ERR PFX "watchdog misc_register failed\n");
-		return err;
-	}
-
-	/* Reset the watchdog */
-	outl(RDC_WDT_RST, RDC3210_CFGREG_DATA);
-
-	init_completion(&rdc321x_wdt_device.stop);
-	rdc321x_wdt_device.queue = 0;
-
-	clear_bit(0, &rdc321x_wdt_device.inuse);
-
-	setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
-
-	rdc321x_wdt_device.default_ticks = ticks;
-
-	printk(KERN_INFO PFX "watchdog init success\n");
-
-	return 0;
-}
-
-static int rdc321x_wdt_remove(struct platform_device *pdev)
-{
-	if (rdc321x_wdt_device.queue) {
-		rdc321x_wdt_device.queue = 0;
-		wait_for_completion(&rdc321x_wdt_device.stop);
-	}
-
-	misc_deregister(&rdc321x_wdt_misc);
-
-	return 0;
-}
-
-static struct platform_driver rdc321x_wdt_driver = {
-	.probe = rdc321x_wdt_probe,
-	.remove = rdc321x_wdt_remove,
-	.driver = {
-		.owner = THIS_MODULE,
-		.name = "rdc321x-wdt",
-	},
-};
-
-static int __init rdc321x_wdt_init(void)
-{
-	return platform_driver_register(&rdc321x_wdt_driver);
-}
-
-static void __exit rdc321x_wdt_exit(void)
-{
-	platform_driver_unregister(&rdc321x_wdt_driver);
-}
-
-module_init(rdc321x_wdt_init);
-module_exit(rdc321x_wdt_exit);
-
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_DESCRIPTION("RDC321x watchdog driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 3cc8eb2..be7235b 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -27,6 +27,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/arch_hooks.h>
+#include <asm/trampoline.h>
 
 /* TLB state -- visible externally, indexed physically */
 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
@@ -210,7 +211,7 @@
 /* steal a page from the bottom of memory for the trampoline and
  * squirrel its address away here.  This will be in kernel virtual
  * space */
-static __u32 trampoline_base;
+unsigned char *trampoline_base;
 
 /* The per cpu profile stuff - used in smp_local_timer_interrupt */
 static DEFINE_PER_CPU(int, prof_multiplier) = 1;
@@ -429,15 +430,15 @@
 }
 
 /* set up the trampoline and return the physical address of the code */
-static __u32 __init setup_trampoline(void)
+unsigned long __init setup_trampoline(void)
 {
 	/* these two are global symbols in trampoline.S */
 	extern const __u8 trampoline_end[];
 	extern const __u8 trampoline_data[];
 
-	memcpy((__u8 *) trampoline_base, trampoline_data,
+	memcpy(trampoline_base, trampoline_data,
 	       trampoline_end - trampoline_data);
-	return virt_to_phys((__u8 *) trampoline_base);
+	return virt_to_phys(trampoline_base);
 }
 
 /* Routine initially called when a non-boot CPU is brought online */
@@ -520,13 +521,6 @@
 	    & ~(voyager_extended_vic_processors
 		& voyager_allowed_boot_processors);
 
-	/* This is an area in head.S which was used to set up the
-	 * initial kernel stack.  We need to alter this to give the
-	 * booting CPU a new stack (taken from its idle process) */
-	extern struct {
-		__u8 *sp;
-		unsigned short ss;
-	} stack_start;
 	/* This is the format of the CPI IDT gate (in real mode) which
 	 * we're hijacking to boot the CPU */
 	union IDTFormat {
@@ -1166,7 +1160,7 @@
  * is sorted out */
 void __init smp_alloc_memory(void)
 {
-	trampoline_base = (__u32) alloc_bootmem_low_pages(PAGE_SIZE);
+	trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
 	if (__pa(trampoline_base) >= 0x93000)
 		BUG();
 }
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 760baee..4bab3b1 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -276,6 +276,7 @@
 	entry_sel_off.offset = FPU_ORIG_EIP;
 	entry_sel_off.selector = FPU_CS;
 	entry_sel_off.opcode = (byte1 << 8) | FPU_modrm;
+	entry_sel_off.empty = 0;
 
 	FPU_rm = FPU_modrm & 7;
 
diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c
index 799d4af..02af772 100644
--- a/arch/x86/math-emu/reg_ld_str.c
+++ b/arch/x86/math-emu/reg_ld_str.c
@@ -383,15 +383,15 @@
 	int exp;
 	FPU_REG tmp;
 
+	l[0] = 0;
+	l[1] = 0;
 	if (st0_tag == TAG_Valid) {
 		reg_copy(st0_ptr, &tmp);
 		exp = exponent(&tmp);
 
 		if (exp < DOUBLE_Emin) {	/* It may be a denormal */
 			addexponent(&tmp, -DOUBLE_Emin + 52);	/* largest exp to be 51 */
-
-		      denormal_arg:
-
+denormal_arg:
 			if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) {
 #ifdef PECULIAR_486
 				/* Did it round to a non-denormal ? */
@@ -477,8 +477,7 @@
 
 				/* This is a special case: see sec 16.2.5.1 of the 80486 book */
 				/* Overflow to infinity */
-				l[0] = 0x00000000;	/* Set to */
-				l[1] = 0x7ff00000;	/* + INF */
+				l[1] = 0x7ff00000;	/* Set to + INF */
 			} else {
 				if (precision_loss) {
 					if (increment)
@@ -492,8 +491,6 @@
 		}
 	} else if (st0_tag == TAG_Zero) {
 		/* Number is zero */
-		l[0] = 0;
-		l[1] = 0;
 	} else if (st0_tag == TAG_Special) {
 		st0_tag = FPU_Special(st0_ptr);
 		if (st0_tag == TW_Denormal) {
@@ -508,7 +505,6 @@
 			reg_copy(st0_ptr, &tmp);
 			goto denormal_arg;
 		} else if (st0_tag == TW_Infinity) {
-			l[0] = 0;
 			l[1] = 0x7ff00000;
 		} else if (st0_tag == TW_NaN) {
 			/* Is it really a NaN ? */
@@ -532,7 +528,6 @@
 				EXCEPTION(EX_Invalid);
 				if (!(control_word & CW_Invalid))
 					return 0;
-				l[0] = 0;
 				l[1] = 0xfff80000;
 			}
 		}
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 9832910..20941d2 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,5 +1,17 @@
+obj-y	:=  init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
+	    pat.o
+
+obj-$(CONFIG_X86_32)		+= pgtable_32.o
+
+obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
+obj-$(CONFIG_X86_PTDUMP)	+= dump_pagetables.o
+
+obj-$(CONFIG_HIGHMEM)		+= highmem_32.o
+
 ifeq ($(CONFIG_X86_32),y)
-include ${srctree}/arch/x86/mm/Makefile_32
+obj-$(CONFIG_NUMA)		+= discontig_32.o
 else
-include ${srctree}/arch/x86/mm/Makefile_64
+obj-$(CONFIG_NUMA)		+= numa_64.o
+obj-$(CONFIG_K8_NUMA)		+= k8topology_64.o
+obj-$(CONFIG_ACPI_NUMA)		+= srat_64.o
 endif
diff --git a/arch/x86/mm/Makefile_32 b/arch/x86/mm/Makefile_32
deleted file mode 100644
index c36ae88..0000000
--- a/arch/x86/mm/Makefile_32
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for the linux i386-specific parts of the memory manager.
-#
-
-obj-y	:= init_32.o pgtable_32.o fault.o ioremap.o extable.o pageattr.o mmap.o
-
-obj-$(CONFIG_NUMA) += discontig_32.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_HIGHMEM) += highmem_32.o
diff --git a/arch/x86/mm/Makefile_64 b/arch/x86/mm/Makefile_64
deleted file mode 100644
index 688c8c2..0000000
--- a/arch/x86/mm/Makefile_64
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for the linux x86_64-specific parts of the memory manager.
-#
-
-obj-y	 := init_64.o fault.o ioremap.o extable.o pageattr.o mmap.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_NUMA) += numa_64.o
-obj-$(CONFIG_K8_NUMA) += k8topology_64.o
-obj-$(CONFIG_ACPI_NUMA) += srat_64.o
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 8e25e06..eba0bbe 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -37,7 +37,7 @@
 #include <asm/e820.h>
 #include <asm/setup.h>
 #include <asm/mmzone.h>
-#include <bios_ebda.h>
+#include <asm/bios_ebda.h>
 
 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
new file mode 100644
index 0000000..6791b83
--- /dev/null
+++ b/arch/x86/mm/dump_pagetables.c
@@ -0,0 +1,354 @@
+/*
+ * Debug helper to dump the current kernel pagetables of the system
+ * so that we can see what the various memory ranges are set to.
+ *
+ * (C) Copyright 2008 Intel Corporation
+ *
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+#include <asm/pgtable.h>
+
+/*
+ * The dumper groups pagetable entries of the same type into one, and for
+ * that it needs to keep some state when walking, and flush this state
+ * when a "break" in the continuity is found.
+ */
+struct pg_state {
+	int level;
+	pgprot_t current_prot;
+	unsigned long start_address;
+	unsigned long current_address;
+	const struct addr_marker *marker;
+};
+
+struct addr_marker {
+	unsigned long start_address;
+	const char *name;
+};
+
+/* Address space markers hints */
+static struct addr_marker address_markers[] = {
+	{ 0, "User Space" },
+#ifdef CONFIG_X86_64
+	{ 0x8000000000000000UL, "Kernel Space" },
+	{ 0xffff810000000000UL, "Low Kernel Mapping" },
+	{ VMALLOC_START,        "vmalloc() Area" },
+	{ VMEMMAP_START,        "Vmemmap" },
+	{ __START_KERNEL_map,   "High Kernel Mapping" },
+	{ MODULES_VADDR,        "Modules" },
+	{ MODULES_END,          "End Modules" },
+#else
+	{ PAGE_OFFSET,          "Kernel Mapping" },
+	{ 0/* VMALLOC_START */, "vmalloc() Area" },
+	{ 0/*VMALLOC_END*/,     "vmalloc() End" },
+# ifdef CONFIG_HIGHMEM
+	{ 0/*PKMAP_BASE*/,      "Persisent kmap() Area" },
+# endif
+	{ 0/*FIXADDR_START*/,   "Fixmap Area" },
+#endif
+	{ -1, NULL }		/* End of list */
+};
+
+/* Multipliers for offsets within the PTEs */
+#define PTE_LEVEL_MULT (PAGE_SIZE)
+#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
+#define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
+#define PGD_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
+
+/*
+ * Print a readable form of a pgprot_t to the seq_file
+ */
+static void printk_prot(struct seq_file *m, pgprot_t prot, int level)
+{
+	pgprotval_t pr = pgprot_val(prot);
+	static const char * const level_name[] =
+		{ "cr3", "pgd", "pud", "pmd", "pte" };
+
+	if (!pgprot_val(prot)) {
+		/* Not present */
+		seq_printf(m, "                          ");
+	} else {
+		if (pr & _PAGE_USER)
+			seq_printf(m, "USR ");
+		else
+			seq_printf(m, "    ");
+		if (pr & _PAGE_RW)
+			seq_printf(m, "RW ");
+		else
+			seq_printf(m, "ro ");
+		if (pr & _PAGE_PWT)
+			seq_printf(m, "PWT ");
+		else
+			seq_printf(m, "    ");
+		if (pr & _PAGE_PCD)
+			seq_printf(m, "PCD ");
+		else
+			seq_printf(m, "    ");
+
+		/* Bit 9 has a different meaning on level 3 vs 4 */
+		if (level <= 3) {
+			if (pr & _PAGE_PSE)
+				seq_printf(m, "PSE ");
+			else
+				seq_printf(m, "    ");
+		} else {
+			if (pr & _PAGE_PAT)
+				seq_printf(m, "pat ");
+			else
+				seq_printf(m, "    ");
+		}
+		if (pr & _PAGE_GLOBAL)
+			seq_printf(m, "GLB ");
+		else
+			seq_printf(m, "    ");
+		if (pr & _PAGE_NX)
+			seq_printf(m, "NX ");
+		else
+			seq_printf(m, "x  ");
+	}
+	seq_printf(m, "%s\n", level_name[level]);
+}
+
+/*
+ * On 64 bits, sign-extend the 48 bit address to 64 bit
+ */
+static unsigned long normalize_addr(unsigned long u)
+{
+#ifdef CONFIG_X86_64
+	return (signed long)(u << 16) >> 16;
+#else
+	return u;
+#endif
+}
+
+/*
+ * This function gets called on a break in a continuous series
+ * of PTE entries; the next one is different so we need to
+ * print what we collected so far.
+ */
+static void note_page(struct seq_file *m, struct pg_state *st,
+		      pgprot_t new_prot, int level)
+{
+	pgprotval_t prot, cur;
+	static const char units[] = "KMGTPE";
+
+	/*
+	 * If we have a "break" in the series, we need to flush the state that
+	 * we have now. "break" is either changing perms, levels or
+	 * address space marker.
+	 */
+	prot = pgprot_val(new_prot) & ~(PTE_MASK);
+	cur = pgprot_val(st->current_prot) & ~(PTE_MASK);
+
+	if (!st->level) {
+		/* First entry */
+		st->current_prot = new_prot;
+		st->level = level;
+		st->marker = address_markers;
+		seq_printf(m, "---[ %s ]---\n", st->marker->name);
+	} else if (prot != cur || level != st->level ||
+		   st->current_address >= st->marker[1].start_address) {
+		const char *unit = units;
+		unsigned long delta;
+
+		/*
+		 * Now print the actual finished series
+		 */
+		seq_printf(m, "0x%p-0x%p   ",
+			   (void *)st->start_address,
+			   (void *)st->current_address);
+
+		delta = (st->current_address - st->start_address) >> 10;
+		while (!(delta & 1023) && unit[1]) {
+			delta >>= 10;
+			unit++;
+		}
+		seq_printf(m, "%9lu%c ", delta, *unit);
+		printk_prot(m, st->current_prot, st->level);
+
+		/*
+		 * We print markers for special areas of address space,
+		 * such as the start of vmalloc space etc.
+		 * This helps in the interpretation.
+		 */
+		if (st->current_address >= st->marker[1].start_address) {
+			st->marker++;
+			seq_printf(m, "---[ %s ]---\n", st->marker->name);
+		}
+
+		st->start_address = st->current_address;
+		st->current_prot = new_prot;
+		st->level = level;
+	}
+}
+
+static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
+							unsigned long P)
+{
+	int i;
+	pte_t *start;
+
+	start = (pte_t *) pmd_page_vaddr(addr);
+	for (i = 0; i < PTRS_PER_PTE; i++) {
+		pgprot_t prot = pte_pgprot(*start);
+
+		st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
+		note_page(m, st, prot, 4);
+		start++;
+	}
+}
+
+#if PTRS_PER_PMD > 1
+
+static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
+							unsigned long P)
+{
+	int i;
+	pmd_t *start;
+
+	start = (pmd_t *) pud_page_vaddr(addr);
+	for (i = 0; i < PTRS_PER_PMD; i++) {
+		st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
+		if (!pmd_none(*start)) {
+			pgprotval_t prot = pmd_val(*start) & ~PTE_MASK;
+
+			if (pmd_large(*start) || !pmd_present(*start))
+				note_page(m, st, __pgprot(prot), 3);
+			else
+				walk_pte_level(m, st, *start,
+					       P + i * PMD_LEVEL_MULT);
+		} else
+			note_page(m, st, __pgprot(0), 3);
+		start++;
+	}
+}
+
+#else
+#define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p)
+#define pud_large(a) pmd_large(__pmd(pud_val(a)))
+#define pud_none(a)  pmd_none(__pmd(pud_val(a)))
+#endif
+
+#if PTRS_PER_PUD > 1
+
+static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
+							unsigned long P)
+{
+	int i;
+	pud_t *start;
+
+	start = (pud_t *) pgd_page_vaddr(addr);
+
+	for (i = 0; i < PTRS_PER_PUD; i++) {
+		st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
+		if (!pud_none(*start)) {
+			pgprotval_t prot = pud_val(*start) & ~PTE_MASK;
+
+			if (pud_large(*start) || !pud_present(*start))
+				note_page(m, st, __pgprot(prot), 2);
+			else
+				walk_pmd_level(m, st, *start,
+					       P + i * PUD_LEVEL_MULT);
+		} else
+			note_page(m, st, __pgprot(0), 2);
+
+		start++;
+	}
+}
+
+#else
+#define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud(pgd_val(a)),p)
+#define pgd_large(a) pud_large(__pud(pgd_val(a)))
+#define pgd_none(a)  pud_none(__pud(pgd_val(a)))
+#endif
+
+static void walk_pgd_level(struct seq_file *m)
+{
+#ifdef CONFIG_X86_64
+	pgd_t *start = (pgd_t *) &init_level4_pgt;
+#else
+	pgd_t *start = swapper_pg_dir;
+#endif
+	int i;
+	struct pg_state st;
+
+	memset(&st, 0, sizeof(st));
+
+	for (i = 0; i < PTRS_PER_PGD; i++) {
+		st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
+		if (!pgd_none(*start)) {
+			pgprotval_t prot = pgd_val(*start) & ~PTE_MASK;
+
+			if (pgd_large(*start) || !pgd_present(*start))
+				note_page(m, &st, __pgprot(prot), 1);
+			else
+				walk_pud_level(m, &st, *start,
+					       i * PGD_LEVEL_MULT);
+		} else
+			note_page(m, &st, __pgprot(0), 1);
+
+		start++;
+	}
+
+	/* Flush out the last page */
+	st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
+	note_page(m, &st, __pgprot(0), 0);
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+	walk_pgd_level(m);
+	return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, ptdump_show, NULL);
+}
+
+static const struct file_operations ptdump_fops = {
+	.open		= ptdump_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+int pt_dump_init(void)
+{
+	struct dentry *pe;
+
+#ifdef CONFIG_X86_32
+	/* Not a compile-time constant on x86-32 */
+	address_markers[2].start_address = VMALLOC_START;
+	address_markers[3].start_address = VMALLOC_END;
+# ifdef CONFIG_HIGHMEM
+	address_markers[4].start_address = PKMAP_BASE;
+	address_markers[5].start_address = FIXADDR_START;
+# else
+	address_markers[4].start_address = FIXADDR_START;
+# endif
+#endif
+
+	pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL,
+				 &ptdump_fops);
+	if (!pe)
+		return -ENOMEM;
+
+	return 0;
+}
+
+__initcall(pt_dump_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
+MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index ec08d83..fd7e179 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -639,7 +639,7 @@
 #ifdef CONFIG_X86_32
 	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
 	   fault has been handled. */
-	if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
+	if (regs->flags & (X86_EFLAGS_IF | X86_VM_MASK))
 		local_irq_enable();
 
 	/*
@@ -976,9 +976,5 @@
 		if (address == start)
 			start = address + PGDIR_SIZE;
 	}
-	/* Check that there is no need to do the same for the modules area. */
-	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
-	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
-				(__START_KERNEL & PGDIR_MASK)));
 #endif
 }
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ee1091a..1500dc8 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -51,6 +51,8 @@
 
 unsigned int __VMALLOC_RESERVE = 128 << 20;
 
+unsigned long max_pfn_mapped;
+
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 unsigned long highstart_pfn, highend_pfn;
 
@@ -179,8 +181,13 @@
 			/*
 			 * Map with big pages if possible, otherwise
 			 * create normal page tables:
+			 *
+			 * Don't use a large page for the first 2/4MB of memory
+			 * because there are often fixed size MTRRs in there
+			 * and overlapping MTRRs into large pages can cause
+			 * slowdowns.
 			 */
-			if (cpu_has_pse) {
+			if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
 				unsigned int addr2;
 				pgprot_t prot = PAGE_KERNEL_LARGE;
 
@@ -194,6 +201,7 @@
 				set_pmd(pmd, pfn_pmd(pfn, prot));
 
 				pfn += PTRS_PER_PTE;
+				max_pfn_mapped = pfn;
 				continue;
 			}
 			pte = one_page_table_init(pmd);
@@ -208,6 +216,7 @@
 
 				set_pte(pte, pfn_pte(pfn, prot));
 			}
+			max_pfn_mapped = pfn;
 		}
 	}
 }
@@ -723,25 +732,17 @@
 	unsigned long start = PFN_ALIGN(_text);
 	unsigned long size = PFN_ALIGN(_etext) - start;
 
-#ifndef CONFIG_KPROBES
-#ifdef CONFIG_HOTPLUG_CPU
-	/* It must still be possible to apply SMP alternatives. */
-	if (num_possible_cpus() <= 1)
-#endif
-	{
-		set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
-		printk(KERN_INFO "Write protecting the kernel text: %luk\n",
-			size >> 10);
+	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+		size >> 10);
 
 #ifdef CONFIG_CPA_DEBUG
-		printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
-			start, start+size);
-		set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
+	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
+		start, start+size);
+	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
 
-		printk(KERN_INFO "Testing CPA: write protecting again\n");
-		set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
-#endif
-	}
+	printk(KERN_INFO "Testing CPA: write protecting again\n");
+	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
 #endif
 	start += size;
 	size = (unsigned long)__end_rodata - start;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a02a14f..1076097 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -54,6 +54,26 @@
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
+int direct_gbpages __meminitdata
+#ifdef CONFIG_DIRECT_GBPAGES
+				= 1
+#endif
+;
+
+static int __init parse_direct_gbpages_off(char *arg)
+{
+	direct_gbpages = 0;
+	return 0;
+}
+early_param("nogbpages", parse_direct_gbpages_off);
+
+static int __init parse_direct_gbpages_on(char *arg)
+{
+	direct_gbpages = 1;
+	return 0;
+}
+early_param("gbpages", parse_direct_gbpages_on);
+
 /*
  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  * physical space so we can cache the place of the first one and move
@@ -69,9 +89,6 @@
 
 	printk(KERN_INFO "Mem-info:\n");
 	show_free_areas();
-	printk(KERN_INFO "Free swap:       %6ldkB\n",
-		nr_swap_pages << (PAGE_SHIFT-10));
-
 	for_each_online_pgdat(pgdat) {
 		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
 			/*
@@ -296,7 +313,7 @@
 	__flush_tlb_all();
 }
 
-static void __meminit
+static unsigned long __meminit
 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
 {
 	int i = pmd_index(address);
@@ -318,21 +335,26 @@
 		set_pte((pte_t *)pmd,
 			pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
 	}
+	return address;
 }
 
-static void __meminit
+static unsigned long __meminit
 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
 {
 	pmd_t *pmd = pmd_offset(pud, 0);
+	unsigned long last_map_addr;
+
 	spin_lock(&init_mm.page_table_lock);
-	phys_pmd_init(pmd, address, end);
+	last_map_addr = phys_pmd_init(pmd, address, end);
 	spin_unlock(&init_mm.page_table_lock);
 	__flush_tlb_all();
+	return last_map_addr;
 }
 
-static void __meminit
+static unsigned long __meminit
 phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
 {
+	unsigned long last_map_addr = end;
 	int i = pud_index(addr);
 
 	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
@@ -350,7 +372,15 @@
 		}
 
 		if (pud_val(*pud)) {
-			phys_pmd_update(pud, addr, end);
+			if (!pud_large(*pud))
+				last_map_addr = phys_pmd_update(pud, addr, end);
+			continue;
+		}
+
+		if (direct_gbpages) {
+			set_pte((pte_t *)pud,
+				pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
+			last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
 			continue;
 		}
 
@@ -358,12 +388,14 @@
 
 		spin_lock(&init_mm.page_table_lock);
 		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
-		phys_pmd_init(pmd, addr, end);
+		last_map_addr = phys_pmd_init(pmd, addr, end);
 		spin_unlock(&init_mm.page_table_lock);
 
 		unmap_low_page(pmd);
 	}
 	__flush_tlb_all();
+
+	return last_map_addr >> PAGE_SHIFT;
 }
 
 static void __init find_early_table_space(unsigned long end)
@@ -371,9 +403,11 @@
 	unsigned long puds, pmds, tables, start;
 
 	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
-		 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
+	if (!direct_gbpages) {
+		pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+		tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+	}
 
 	/*
 	 * RED-PEN putting page tables only on node 0 could
@@ -393,16 +427,135 @@
 		(table_start << PAGE_SHIFT) + tables);
 }
 
+static void __init init_gbpages(void)
+{
+	if (direct_gbpages && cpu_has_gbpages)
+		printk(KERN_INFO "Using GB pages for direct mapping\n");
+	else
+		direct_gbpages = 0;
+}
+
+#ifdef CONFIG_MEMTEST_BOOTPARAM
+
+static void __init memtest(unsigned long start_phys, unsigned long size,
+				 unsigned pattern)
+{
+	unsigned long i;
+	unsigned long *start;
+	unsigned long start_bad;
+	unsigned long last_bad;
+	unsigned long val;
+	unsigned long start_phys_aligned;
+	unsigned long count;
+	unsigned long incr;
+
+	switch (pattern) {
+	case 0:
+		val = 0UL;
+		break;
+	case 1:
+		val = -1UL;
+		break;
+	case 2:
+		val = 0x5555555555555555UL;
+		break;
+	case 3:
+		val = 0xaaaaaaaaaaaaaaaaUL;
+		break;
+	default:
+		return;
+	}
+
+	incr = sizeof(unsigned long);
+	start_phys_aligned = ALIGN(start_phys, incr);
+	count = (size - (start_phys_aligned - start_phys))/incr;
+	start = __va(start_phys_aligned);
+	start_bad = 0;
+	last_bad = 0;
+
+	for (i = 0; i < count; i++)
+		start[i] = val;
+	for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
+		if (*start != val) {
+			if (start_phys_aligned == last_bad + incr) {
+				last_bad += incr;
+			} else {
+				if (start_bad) {
+					printk(KERN_CONT "\n  %016lx bad mem addr %016lx - %016lx reserved",
+						val, start_bad, last_bad + incr);
+					reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
+				}
+				start_bad = last_bad = start_phys_aligned;
+			}
+		}
+	}
+	if (start_bad) {
+		printk(KERN_CONT "\n  %016lx bad mem addr %016lx - %016lx reserved",
+			val, start_bad, last_bad + incr);
+		reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
+	}
+
+}
+
+static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE;
+
+static int __init parse_memtest(char *arg)
+{
+	if (arg)
+		memtest_pattern = simple_strtoul(arg, NULL, 0);
+	return 0;
+}
+
+early_param("memtest", parse_memtest);
+
+static void __init early_memtest(unsigned long start, unsigned long end)
+{
+	unsigned long t_start, t_size;
+	unsigned pattern;
+
+	if (!memtest_pattern)
+		return;
+
+	printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
+	for (pattern = 0; pattern < memtest_pattern; pattern++) {
+		t_start = start;
+		t_size = 0;
+		while (t_start < end) {
+			t_start = find_e820_area_size(t_start, &t_size, 1);
+
+			/* done ? */
+			if (t_start >= end)
+				break;
+			if (t_start + t_size > end)
+				t_size = end - t_start;
+
+			printk(KERN_CONT "\n  %016lx - %016lx pattern %d",
+				t_start, t_start + t_size, pattern);
+
+			memtest(t_start, t_size, pattern);
+
+			t_start += t_size;
+		}
+	}
+	printk(KERN_CONT "\n");
+}
+#else
+static void __init early_memtest(unsigned long start, unsigned long end)
+{
+}
+#endif
+
 /*
  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
  * This runs before bootmem is initialized and gets pages directly from
  * the physical memory. To access them they are temporarily mapped.
  */
-void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
+unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end)
 {
-	unsigned long next;
+	unsigned long next, last_map_addr = end;
+	unsigned long start_phys = start, end_phys = end;
 
-	pr_debug("init_memory_mapping\n");
+	printk(KERN_INFO "init_memory_mapping\n");
 
 	/*
 	 * Find space for the kernel direct mapping tables.
@@ -411,8 +564,10 @@
 	 * memory mapped. Unfortunately this is done currently before the
 	 * nodes are discovered.
 	 */
-	if (!after_bootmem)
+	if (!after_bootmem) {
+		init_gbpages();
 		find_early_table_space(end);
+	}
 
 	start = (unsigned long)__va(start);
 	end = (unsigned long)__va(end);
@@ -430,7 +585,7 @@
 		next = start + PGDIR_SIZE;
 		if (next > end)
 			next = end;
-		phys_pud_init(pud, __pa(start), __pa(next));
+		last_map_addr = phys_pud_init(pud, __pa(start), __pa(next));
 		if (!after_bootmem)
 			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
 		unmap_low_page(pud);
@@ -443,6 +598,11 @@
 	if (!after_bootmem)
 		reserve_early(table_start << PAGE_SHIFT,
 				 table_end << PAGE_SHIFT, "PGTABLE");
+
+	if (!after_bootmem)
+		early_memtest(start_phys, end_phys);
+
+	return last_map_addr;
 }
 
 #ifndef CONFIG_NUMA
@@ -482,11 +642,13 @@
 {
 	struct pglist_data *pgdat = NODE_DATA(nid);
 	struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
-	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 	int ret;
 
-	init_memory_mapping(start, start + size-1);
+	last_mapped_pfn = init_memory_mapping(start, start + size-1);
+	if (last_mapped_pfn > max_pfn_mapped)
+		max_pfn_mapped = last_mapped_pfn;
 
 	ret = __add_pages(zone, start_pfn, nr_pages);
 	WARN_ON(1);
@@ -596,24 +758,7 @@
 
 void mark_rodata_ro(void)
 {
-	unsigned long start = (unsigned long)_stext, end;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	/* It must still be possible to apply SMP alternatives. */
-	if (num_possible_cpus() > 1)
-		start = (unsigned long)_etext;
-#endif
-
-#ifdef CONFIG_KPROBES
-	start = (unsigned long)__start_rodata;
-#endif
-
-	end = (unsigned long)__end_rodata;
-	start = (start + PAGE_SIZE - 1) & PAGE_MASK;
-	end &= PAGE_MASK;
-	if (end <= start)
-		return;
-
+	unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
 
 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
 	       (end - start) >> 10);
@@ -636,6 +781,7 @@
 	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
 #endif
 }
+
 #endif
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -657,7 +803,7 @@
 		 * This can happen with kdump kernels when accessing
 		 * firmware tables:
 		 */
-		if (pfn < end_pfn_map)
+		if (pfn < max_pfn_mapped)
 			return;
 
 		printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 794895c..c590fd2 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -19,11 +19,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
-
-enum ioremap_mode {
-	IOR_MODE_UNCACHED,
-	IOR_MODE_CACHED,
-};
+#include <asm/pat.h>
 
 #ifdef CONFIG_X86_64
 
@@ -35,11 +31,23 @@
 }
 EXPORT_SYMBOL(__phys_addr);
 
+static inline int phys_addr_valid(unsigned long addr)
+{
+	return addr < (1UL << boot_cpu_data.x86_phys_bits);
+}
+
+#else
+
+static inline int phys_addr_valid(unsigned long addr)
+{
+	return 1;
+}
+
 #endif
 
 int page_is_ram(unsigned long pagenr)
 {
-	unsigned long addr, end;
+	resource_size_t addr, end;
 	int i;
 
 	/*
@@ -78,19 +86,22 @@
  * Fix up the linear direct mapping of the kernel to avoid cache attribute
  * conflicts.
  */
-static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
-			       enum ioremap_mode mode)
+int ioremap_change_attr(unsigned long vaddr, unsigned long size,
+			       unsigned long prot_val)
 {
 	unsigned long nrpages = size >> PAGE_SHIFT;
 	int err;
 
-	switch (mode) {
-	case IOR_MODE_UNCACHED:
+	switch (prot_val) {
+	case _PAGE_CACHE_UC:
 	default:
-		err = set_memory_uc(vaddr, nrpages);
+		err = _set_memory_uc(vaddr, nrpages);
 		break;
-	case IOR_MODE_CACHED:
-		err = set_memory_wb(vaddr, nrpages);
+	case _PAGE_CACHE_WC:
+		err = _set_memory_wc(vaddr, nrpages);
+		break;
+	case _PAGE_CACHE_WB:
+		err = _set_memory_wb(vaddr, nrpages);
 		break;
 	}
 
@@ -107,17 +118,27 @@
  * caller shouldn't need to know that small detail.
  */
 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
-			       enum ioremap_mode mode)
+			       unsigned long prot_val)
 {
-	unsigned long pfn, offset, last_addr, vaddr;
+	unsigned long pfn, offset, vaddr;
+	resource_size_t last_addr;
 	struct vm_struct *area;
+	unsigned long new_prot_val;
 	pgprot_t prot;
+	int retval;
 
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
 	if (!size || last_addr < phys_addr)
 		return NULL;
 
+	if (!phys_addr_valid(phys_addr)) {
+		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
+		       phys_addr);
+		WARN_ON_ONCE(1);
+		return NULL;
+	}
+
 	/*
 	 * Don't remap the low PCI/ISA area, it's always mapped..
 	 */
@@ -127,25 +148,14 @@
 	/*
 	 * Don't allow anybody to remap normal RAM that we're using..
 	 */
-	for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
-	     (pfn << PAGE_SHIFT) < last_addr; pfn++) {
-		if (page_is_ram(pfn) && pfn_valid(pfn) &&
-		    !PageReserved(pfn_to_page(pfn)))
-			return NULL;
-	}
+	for (pfn = phys_addr >> PAGE_SHIFT;
+				(pfn << PAGE_SHIFT) < last_addr; pfn++) {
 
-	switch (mode) {
-	case IOR_MODE_UNCACHED:
-	default:
-		/*
-		 * FIXME: we will use UC MINUS for now, as video fb drivers
-		 * depend on it. Upcoming ioremap_wc() will fix this behavior.
-		 */
-		prot = PAGE_KERNEL_UC_MINUS;
-		break;
-	case IOR_MODE_CACHED:
-		prot = PAGE_KERNEL;
-		break;
+		int is_ram = page_is_ram(pfn);
+
+		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
+			return NULL;
+		WARN_ON_ONCE(is_ram);
 	}
 
 	/*
@@ -155,6 +165,49 @@
 	phys_addr &= PAGE_MASK;
 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
 
+	retval = reserve_memtype(phys_addr, phys_addr + size,
+						prot_val, &new_prot_val);
+	if (retval) {
+		pr_debug("Warning: reserve_memtype returned %d\n", retval);
+		return NULL;
+	}
+
+	if (prot_val != new_prot_val) {
+		/*
+		 * Do not fallback to certain memory types with certain
+		 * requested type:
+		 * - request is uncached, return cannot be write-back
+		 * - request is uncached, return cannot be write-combine
+		 * - request is write-combine, return cannot be write-back
+		 */
+		if ((prot_val == _PAGE_CACHE_UC &&
+		     (new_prot_val == _PAGE_CACHE_WB ||
+		      new_prot_val == _PAGE_CACHE_WC)) ||
+		    (prot_val == _PAGE_CACHE_WC &&
+		     new_prot_val == _PAGE_CACHE_WB)) {
+			pr_debug(
+		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
+				phys_addr, phys_addr + size,
+				prot_val, new_prot_val);
+			free_memtype(phys_addr, phys_addr + size);
+			return NULL;
+		}
+		prot_val = new_prot_val;
+	}
+
+	switch (prot_val) {
+	case _PAGE_CACHE_UC:
+	default:
+		prot = PAGE_KERNEL_NOCACHE;
+		break;
+	case _PAGE_CACHE_WC:
+		prot = PAGE_KERNEL_WC;
+		break;
+	case _PAGE_CACHE_WB:
+		prot = PAGE_KERNEL;
+		break;
+	}
+
 	/*
 	 * Ok, go for it..
 	 */
@@ -164,11 +217,13 @@
 	area->phys_addr = phys_addr;
 	vaddr = (unsigned long) area->addr;
 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
+		free_memtype(phys_addr, phys_addr + size);
 		free_vm_area(area);
 		return NULL;
 	}
 
-	if (ioremap_change_attr(vaddr, size, mode) < 0) {
+	if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
+		free_memtype(phys_addr, phys_addr + size);
 		vunmap(area->addr);
 		return NULL;
 	}
@@ -199,13 +254,32 @@
  */
 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
-	return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
+	return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
 }
 EXPORT_SYMBOL(ioremap_nocache);
 
+/**
+ * ioremap_wc	-	map memory into CPU space write combined
+ * @offset:	bus address of the memory
+ * @size:	size of the resource to map
+ *
+ * This version of ioremap ensures that the memory is marked write combining.
+ * Write combining allows faster writes to some hardware devices.
+ *
+ * Must be freed with iounmap.
+ */
+void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
+{
+	if (pat_wc_enabled)
+		return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
+	else
+		return ioremap_nocache(phys_addr, size);
+}
+EXPORT_SYMBOL(ioremap_wc);
+
 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 {
-	return __ioremap(phys_addr, size, IOR_MODE_CACHED);
+	return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
 }
 EXPORT_SYMBOL(ioremap_cache);
 
@@ -252,6 +326,8 @@
 		return;
 	}
 
+	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
+
 	/* Finally remove it */
 	o = remove_vm_area((void *)addr);
 	BUG_ON(p != o || o == NULL);
@@ -272,8 +348,8 @@
 early_param("early_ioremap_debug", early_ioremap_debug_setup);
 
 static __initdata int after_paging_init;
-static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
-				__attribute__((aligned(PAGE_SIZE)));
+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
+		__section(.bss.page_aligned);
 
 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 {
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 16b82ad..2ea56f4 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -31,13 +31,15 @@
 
 struct memnode memnode;
 
+#ifdef CONFIG_SMP
 int x86_cpu_to_node_map_init[NR_CPUS] = {
 	[0 ... NR_CPUS-1] = NUMA_NO_NODE
 };
 void *x86_cpu_to_node_map_early_ptr;
+EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
+#endif
 DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map);
-EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
 
 s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
 	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
@@ -548,8 +550,6 @@
 {
 	int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
 
-	cpu_pda(cpu)->nodenumber = node;
-
 	if(cpu_to_node_map)
 		cpu_to_node_map[cpu] = node;
 	else if(per_cpu_offset(cpu))
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 7b79f6b..f7823a1 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -9,6 +9,8 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
 
 #include <asm/e820.h>
 #include <asm/processor.h>
@@ -17,6 +19,7 @@
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 #include <asm/proto.h>
+#include <asm/pat.h>
 
 /*
  * The current flushing context - we pass it instead of 5 arguments:
@@ -28,6 +31,7 @@
 	int		numpages;
 	int		flushtlb;
 	unsigned long	pfn;
+	unsigned	force_split : 1;
 };
 
 #ifdef CONFIG_X86_64
@@ -259,6 +263,9 @@
 	int i, do_split = 1;
 	unsigned int level;
 
+	if (cpa->force_split)
+		return 1;
+
 	spin_lock_irqsave(&pgd_lock, flags);
 	/*
 	 * Check for races, another CPU might have split this page
@@ -535,7 +542,7 @@
 repeat:
 	kpte = lookup_address(address, &level);
 	if (!kpte)
-		return primary ? -EINVAL : 0;
+		return 0;
 
 	old_pte = *kpte;
 	if (!pte_val(old_pte)) {
@@ -693,7 +700,8 @@
 }
 
 static int change_page_attr_set_clr(unsigned long addr, int numpages,
-				    pgprot_t mask_set, pgprot_t mask_clr)
+				    pgprot_t mask_set, pgprot_t mask_clr,
+				    int force_split)
 {
 	struct cpa_data cpa;
 	int ret, cache, checkalias;
@@ -704,7 +712,7 @@
 	 */
 	mask_set = canon_pgprot(mask_set);
 	mask_clr = canon_pgprot(mask_clr);
-	if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
+	if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
 		return 0;
 
 	/* Ensure we are PAGE_SIZE aligned */
@@ -721,6 +729,7 @@
 	cpa.mask_set = mask_set;
 	cpa.mask_clr = mask_clr;
 	cpa.flushtlb = 0;
+	cpa.force_split = force_split;
 
 	/* No alias checking for _NX bit modifications */
 	checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
@@ -759,26 +768,61 @@
 static inline int change_page_attr_set(unsigned long addr, int numpages,
 				       pgprot_t mask)
 {
-	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
+	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0);
 }
 
 static inline int change_page_attr_clear(unsigned long addr, int numpages,
 					 pgprot_t mask)
 {
-	return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
+	return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0);
+}
+
+int _set_memory_uc(unsigned long addr, int numpages)
+{
+	return change_page_attr_set(addr, numpages,
+				    __pgprot(_PAGE_CACHE_UC));
 }
 
 int set_memory_uc(unsigned long addr, int numpages)
 {
-	return change_page_attr_set(addr, numpages,
-				    __pgprot(_PAGE_PCD));
+	if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
+	                    _PAGE_CACHE_UC, NULL))
+		return -EINVAL;
+
+	return _set_memory_uc(addr, numpages);
 }
 EXPORT_SYMBOL(set_memory_uc);
 
-int set_memory_wb(unsigned long addr, int numpages)
+int _set_memory_wc(unsigned long addr, int numpages)
+{
+	return change_page_attr_set(addr, numpages,
+				    __pgprot(_PAGE_CACHE_WC));
+}
+
+int set_memory_wc(unsigned long addr, int numpages)
+{
+	if (!pat_wc_enabled)
+		return set_memory_uc(addr, numpages);
+
+	if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
+		_PAGE_CACHE_WC, NULL))
+		return -EINVAL;
+
+	return _set_memory_wc(addr, numpages);
+}
+EXPORT_SYMBOL(set_memory_wc);
+
+int _set_memory_wb(unsigned long addr, int numpages)
 {
 	return change_page_attr_clear(addr, numpages,
-				      __pgprot(_PAGE_PCD | _PAGE_PWT));
+				      __pgprot(_PAGE_CACHE_MASK));
+}
+
+int set_memory_wb(unsigned long addr, int numpages)
+{
+	free_memtype(addr, addr + numpages * PAGE_SIZE);
+
+	return _set_memory_wb(addr, numpages);
 }
 EXPORT_SYMBOL(set_memory_wb);
 
@@ -809,6 +853,12 @@
 	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
 }
 
+int set_memory_4k(unsigned long addr, int numpages)
+{
+	return change_page_attr_set_clr(addr, numpages, __pgprot(0),
+					__pgprot(0), 1);
+}
+
 int set_pages_uc(struct page *page, int numpages)
 {
 	unsigned long addr = (unsigned long)page_address(page);
@@ -918,6 +968,45 @@
 	cpa_fill_pool(NULL);
 }
 
+#ifdef CONFIG_DEBUG_FS
+static int dpa_show(struct seq_file *m, void *v)
+{
+	seq_puts(m, "DEBUG_PAGEALLOC\n");
+	seq_printf(m, "pool_size     : %lu\n", pool_size);
+	seq_printf(m, "pool_pages    : %lu\n", pool_pages);
+	seq_printf(m, "pool_low      : %lu\n", pool_low);
+	seq_printf(m, "pool_used     : %lu\n", pool_used);
+	seq_printf(m, "pool_failed   : %lu\n", pool_failed);
+
+	return 0;
+}
+
+static int dpa_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, dpa_show, NULL);
+}
+
+static const struct file_operations dpa_fops = {
+	.open		= dpa_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+int __init debug_pagealloc_proc_init(void)
+{
+	struct dentry *de;
+
+	de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL,
+				 &dpa_fops);
+	if (!de)
+		return -ENOMEM;
+
+	return 0;
+}
+__initcall(debug_pagealloc_proc_init);
+#endif
+
 #ifdef CONFIG_HIBERNATION
 
 bool kernel_page_present(struct page *page)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
new file mode 100644
index 0000000..72c0f60
--- /dev/null
+++ b/arch/x86/mm/pat.c
@@ -0,0 +1,421 @@
+/*
+ * Handle caching attributes in page tables (PAT)
+ *
+ * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ *          Suresh B Siddha <suresh.b.siddha@intel.com>
+ *
+ * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
+ */
+
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/fs.h>
+
+#include <asm/msr.h>
+#include <asm/tlbflush.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/pat.h>
+#include <asm/e820.h>
+#include <asm/cacheflush.h>
+#include <asm/fcntl.h>
+#include <asm/mtrr.h>
+
+int pat_wc_enabled = 1;
+
+static u64 __read_mostly boot_pat_state;
+
+static int nopat(char *str)
+{
+	pat_wc_enabled = 0;
+	printk(KERN_INFO "x86: PAT support disabled.\n");
+
+	return 0;
+}
+early_param("nopat", nopat);
+
+static int pat_known_cpu(void)
+{
+	if (!pat_wc_enabled)
+		return 0;
+
+	if (cpu_has_pat)
+		return 1;
+
+	pat_wc_enabled = 0;
+	printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
+	return 0;
+}
+
+enum {
+	PAT_UC = 0,		/* uncached */
+	PAT_WC = 1,		/* Write combining */
+	PAT_WT = 4,		/* Write Through */
+	PAT_WP = 5,		/* Write Protected */
+	PAT_WB = 6,		/* Write Back (default) */
+	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
+};
+
+#define PAT(x,y)	((u64)PAT_ ## y << ((x)*8))
+
+void pat_init(void)
+{
+	u64 pat;
+
+#ifndef CONFIG_X86_PAT
+	nopat(NULL);
+#endif
+
+	/* Boot CPU enables PAT based on CPU feature */
+	if (!smp_processor_id() && !pat_known_cpu())
+		return;
+
+	/* APs enable PAT iff boot CPU has enabled it before */
+	if (smp_processor_id() && !pat_wc_enabled)
+		return;
+
+	/* Set PWT to Write-Combining. All other bits stay the same */
+	/*
+	 * PTE encoding used in Linux:
+	 *      PAT
+	 *      |PCD
+	 *      ||PWT
+	 *      |||
+	 *      000 WB		_PAGE_CACHE_WB
+	 *      001 WC		_PAGE_CACHE_WC
+	 *      010 UC-		_PAGE_CACHE_UC_MINUS
+	 *      011 UC		_PAGE_CACHE_UC
+	 * PAT bit unused
+	 */
+	pat = PAT(0,WB) | PAT(1,WC) | PAT(2,UC_MINUS) | PAT(3,UC) |
+	      PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC);
+
+	/* Boot CPU check */
+	if (!smp_processor_id()) {
+		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
+	}
+
+	wrmsrl(MSR_IA32_CR_PAT, pat);
+	printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
+	       smp_processor_id(), boot_pat_state, pat);
+}
+
+#undef PAT
+
+static char *cattr_name(unsigned long flags)
+{
+	switch (flags & _PAGE_CACHE_MASK) {
+		case _PAGE_CACHE_UC:		return "uncached";
+		case _PAGE_CACHE_UC_MINUS:	return "uncached-minus";
+		case _PAGE_CACHE_WB:		return "write-back";
+		case _PAGE_CACHE_WC:		return "write-combining";
+		default:			return "broken";
+	}
+}
+
+/*
+ * The global memtype list keeps track of memory type for specific
+ * physical memory areas. Conflicting memory types in different
+ * mappings can cause CPU cache corruption. To avoid this we keep track.
+ *
+ * The list is sorted based on starting address and can contain multiple
+ * entries for each address (this allows reference counting for overlapping
+ * areas). All the aliases have the same cache attributes of course.
+ * Zero attributes are represented as holes.
+ *
+ * Currently the data structure is a list because the number of mappings
+ * are expected to be relatively small. If this should be a problem
+ * it could be changed to a rbtree or similar.
+ *
+ * memtype_lock protects the whole list.
+ */
+
+struct memtype {
+	u64 start;
+	u64 end;
+	unsigned long type;
+	struct list_head nd;
+};
+
+static LIST_HEAD(memtype_list);
+static DEFINE_SPINLOCK(memtype_lock); 	/* protects memtype list */
+
+/*
+ * Does intersection of PAT memory type and MTRR memory type and returns
+ * the resulting memory type as PAT understands it.
+ * (Type in pat and mtrr will not have same value)
+ * The intersection is based on "Effective Memory Type" tables in IA-32
+ * SDM vol 3a
+ */
+static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot,
+				unsigned long *ret_prot)
+{
+	unsigned long pat_type;
+	u8 mtrr_type;
+
+	mtrr_type = mtrr_type_lookup(start, end);
+	if (mtrr_type == 0xFF) {		/* MTRR not enabled */
+		*ret_prot = prot;
+		return 0;
+	}
+	if (mtrr_type == 0xFE) {		/* MTRR match error */
+		*ret_prot = _PAGE_CACHE_UC;
+		return -1;
+	}
+	if (mtrr_type != MTRR_TYPE_UNCACHABLE &&
+	    mtrr_type != MTRR_TYPE_WRBACK &&
+	    mtrr_type != MTRR_TYPE_WRCOMB) {	/* MTRR type unhandled */
+		*ret_prot = _PAGE_CACHE_UC;
+		return -1;
+	}
+
+	pat_type = prot & _PAGE_CACHE_MASK;
+	prot &= (~_PAGE_CACHE_MASK);
+
+	/* Currently doing intersection by hand. Optimize it later. */
+	if (pat_type == _PAGE_CACHE_WC) {
+		*ret_prot = prot | _PAGE_CACHE_WC;
+	} else if (pat_type == _PAGE_CACHE_UC_MINUS) {
+		*ret_prot = prot | _PAGE_CACHE_UC_MINUS;
+	} else if (pat_type == _PAGE_CACHE_UC ||
+	           mtrr_type == MTRR_TYPE_UNCACHABLE) {
+		*ret_prot = prot | _PAGE_CACHE_UC;
+	} else if (mtrr_type == MTRR_TYPE_WRCOMB) {
+		*ret_prot = prot | _PAGE_CACHE_WC;
+	} else {
+		*ret_prot = prot | _PAGE_CACHE_WB;
+	}
+
+	return 0;
+}
+
+int reserve_memtype(u64 start, u64 end, unsigned long req_type,
+			unsigned long *ret_type)
+{
+	struct memtype *new_entry = NULL;
+	struct memtype *parse;
+	unsigned long actual_type;
+	int err = 0;
+
+	/* Only track when pat_wc_enabled */
+	if (!pat_wc_enabled) {
+		if (ret_type)
+			*ret_type = req_type;
+
+		return 0;
+	}
+
+	/* Low ISA region is always mapped WB in page table. No need to track */
+	if (start >= ISA_START_ADDRESS && (end - 1) <= ISA_END_ADDRESS) {
+		if (ret_type)
+			*ret_type = _PAGE_CACHE_WB;
+
+		return 0;
+	}
+
+	req_type &= _PAGE_CACHE_MASK;
+	err = pat_x_mtrr_type(start, end, req_type, &actual_type);
+	if (err) {
+		if (ret_type)
+			*ret_type = actual_type;
+
+		return -EINVAL;
+	}
+
+	new_entry  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
+	if (!new_entry)
+		return -ENOMEM;
+
+	new_entry->start = start;
+	new_entry->end = end;
+	new_entry->type = actual_type;
+
+	if (ret_type)
+		*ret_type = actual_type;
+
+	spin_lock(&memtype_lock);
+
+	/* Search for existing mapping that overlaps the current range */
+	list_for_each_entry(parse, &memtype_list, nd) {
+		struct memtype *saved_ptr;
+
+		if (parse->start >= end) {
+			printk("New Entry\n");
+			list_add(&new_entry->nd, parse->nd.prev);
+			new_entry = NULL;
+			break;
+		}
+
+		if (start <= parse->start && end >= parse->start) {
+			if (actual_type != parse->type && ret_type) {
+				actual_type = parse->type;
+				*ret_type = actual_type;
+				new_entry->type = actual_type;
+			}
+
+			if (actual_type != parse->type) {
+				printk(
+		KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+					current->comm, current->pid,
+					start, end,
+					cattr_name(actual_type),
+					cattr_name(parse->type));
+				err = -EBUSY;
+				break;
+			}
+
+			saved_ptr = parse;
+			/*
+			 * Check to see whether the request overlaps more
+			 * than one entry in the list
+			 */
+			list_for_each_entry_continue(parse, &memtype_list, nd) {
+				if (end <= parse->start) {
+					break;
+				}
+
+				if (actual_type != parse->type) {
+					printk(
+		KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+						current->comm, current->pid,
+						start, end,
+						cattr_name(actual_type),
+						cattr_name(parse->type));
+					err = -EBUSY;
+					break;
+				}
+			}
+
+			if (err) {
+				break;
+			}
+
+			printk("Overlap at 0x%Lx-0x%Lx\n",
+			       saved_ptr->start, saved_ptr->end);
+			/* No conflict. Go ahead and add this new entry */
+			list_add(&new_entry->nd, saved_ptr->nd.prev);
+			new_entry = NULL;
+			break;
+		}
+
+		if (start < parse->end) {
+			if (actual_type != parse->type && ret_type) {
+				actual_type = parse->type;
+				*ret_type = actual_type;
+				new_entry->type = actual_type;
+			}
+
+			if (actual_type != parse->type) {
+				printk(
+		KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+					current->comm, current->pid,
+					start, end,
+					cattr_name(actual_type),
+					cattr_name(parse->type));
+				err = -EBUSY;
+				break;
+			}
+
+			saved_ptr = parse;
+			/*
+			 * Check to see whether the request overlaps more
+			 * than one entry in the list
+			 */
+			list_for_each_entry_continue(parse, &memtype_list, nd) {
+				if (end <= parse->start) {
+					break;
+				}
+
+				if (actual_type != parse->type) {
+					printk(
+		KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+						current->comm, current->pid,
+						start, end,
+						cattr_name(actual_type),
+						cattr_name(parse->type));
+					err = -EBUSY;
+					break;
+				}
+			}
+
+			if (err) {
+				break;
+			}
+
+			printk("Overlap at 0x%Lx-0x%Lx\n",
+			       saved_ptr->start, saved_ptr->end);
+			/* No conflict. Go ahead and add this new entry */
+			list_add(&new_entry->nd, &saved_ptr->nd);
+			new_entry = NULL;
+			break;
+		}
+	}
+
+	if (err) {
+		printk(
+	"reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
+			start, end, cattr_name(new_entry->type),
+			cattr_name(req_type));
+		kfree(new_entry);
+		spin_unlock(&memtype_lock);
+		return err;
+	}
+
+	if (new_entry) {
+		/* No conflict. Not yet added to the list. Add to the tail */
+		list_add_tail(&new_entry->nd, &memtype_list);
+		printk("New Entry\n");
+  	}
+
+	if (ret_type) {
+		printk(
+	"reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
+			start, end, cattr_name(actual_type),
+			cattr_name(req_type), cattr_name(*ret_type));
+	} else {
+		printk(
+	"reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
+			start, end, cattr_name(actual_type),
+			cattr_name(req_type));
+	}
+
+	spin_unlock(&memtype_lock);
+	return err;
+}
+
+int free_memtype(u64 start, u64 end)
+{
+	struct memtype *ml;
+	int err = -EINVAL;
+
+	/* Only track when pat_wc_enabled */
+	if (!pat_wc_enabled) {
+		return 0;
+	}
+
+	/* Low ISA region is always mapped WB. No need to track */
+	if (start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS) {
+		return 0;
+	}
+
+	spin_lock(&memtype_lock);
+	list_for_each_entry(ml, &memtype_list, nd) {
+		if (ml->start == start && ml->end == end) {
+			list_del(&ml->nd);
+			kfree(ml);
+			err = 0;
+			break;
+		}
+	}
+	spin_unlock(&memtype_lock);
+
+	if (err) {
+		printk(KERN_DEBUG "%s:%d freeing invalid memtype %Lx-%Lx\n",
+			current->comm, current->pid, start, end);
+	}
+
+	printk( "free_memtype request 0x%Lx-0x%Lx\n", start, end);
+	return err;
+}
+
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 2f9e9af..3165ec0 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -36,7 +36,6 @@
 
 	printk(KERN_INFO "Mem-info:\n");
 	show_free_areas();
-	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
 	for_each_online_pgdat(pgdat) {
 		pgdat_resize_lock(pgdat, &flags);
 		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
@@ -381,3 +380,10 @@
 }
 
 #endif
+
+int pmd_bad(pmd_t pmd)
+{
+	WARN_ON_ONCE(pmd_bad_v1(pmd) != pmd_bad_v2(pmd));
+
+	return pmd_bad_v1(pmd);
+}
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 845001c..1bae9c8 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -20,6 +20,7 @@
 #include <asm/proto.h>
 #include <asm/numa.h>
 #include <asm/e820.h>
+#include <asm/genapic.h>
 
 int acpi_numa __initdata;
 
@@ -132,7 +133,6 @@
 	int pxm, node;
 	int apic_id;
 
-	apic_id = pa->apic_id;
 	if (srat_disabled())
 		return;
 	if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
@@ -148,6 +148,11 @@
 		bad_srat();
 		return;
 	}
+
+	if (is_uv_system())
+		apic_id = (pa->apic_id << 8) | pa->local_sapic_eid;
+	else
+		apic_id = pa->apic_id;
 	apicid_to_node[apic_id] = node;
 	acpi_numa = 1;
 	printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
index 5341d48..cdfe4c5 100644
--- a/arch/x86/oprofile/init.c
+++ b/arch/x86/oprofile/init.c
@@ -10,18 +10,19 @@
 #include <linux/oprofile.h>
 #include <linux/init.h>
 #include <linux/errno.h>
- 
-/* We support CPUs that have performance counters like the Pentium Pro
+
+/*
+ * We support CPUs that have performance counters like the Pentium Pro
  * with the NMI mode driver.
  */
- 
-extern int op_nmi_init(struct oprofile_operations * ops);
-extern int op_nmi_timer_init(struct oprofile_operations * ops);
+
+extern int op_nmi_init(struct oprofile_operations *ops);
+extern int op_nmi_timer_init(struct oprofile_operations *ops);
 extern void op_nmi_exit(void);
 extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
 
 
-int __init oprofile_arch_init(struct oprofile_operations * ops)
+int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
 	int ret;
 
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index 1418e36..e3ecb71 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -17,14 +17,14 @@
 #include <asm/nmi.h>
 #include <asm/apic.h>
 #include <asm/ptrace.h>
- 
+
 static int profile_timer_exceptions_notify(struct notifier_block *self,
 					   unsigned long val, void *data)
 {
 	struct die_args *args = (struct die_args *)data;
 	int ret = NOTIFY_DONE;
 
-	switch(val) {
+	switch (val) {
 	case DIE_NMI:
 		oprofile_add_sample(args->regs, 0);
 		ret = NOTIFY_STOP;
@@ -56,7 +56,7 @@
 }
 
 
-int __init op_nmi_timer_init(struct oprofile_operations * ops)
+int __init op_nmi_timer_init(struct oprofile_operations *ops)
 {
 	if ((nmi_watchdog != NMI_IO_APIC) || (atomic_read(&nmi_active) <= 0))
 		return -ENODEV;
diff --git a/arch/x86/oprofile/op_model_athlon.c b/arch/x86/oprofile/op_model_athlon.c
index c3ee433..3d53487 100644
--- a/arch/x86/oprofile/op_model_athlon.c
+++ b/arch/x86/oprofile/op_model_athlon.c
@@ -1,4 +1,4 @@
-/**
+/*
  * @file op_model_athlon.h
  * athlon / K7 / K8 / Family 10h model-specific MSR operations
  *
@@ -14,28 +14,28 @@
 #include <asm/ptrace.h>
 #include <asm/msr.h>
 #include <asm/nmi.h>
- 
+
 #include "op_x86_model.h"
 #include "op_counter.h"
 
 #define NUM_COUNTERS 4
 #define NUM_CONTROLS 4
 
-#define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0)
-#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0)
-#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0)
+#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
+#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
+#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
 #define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
 
-#define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0)
-#define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));} while (0)
-#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0)
+#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
+#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
+#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
 #define CTRL_SET_ACTIVE(n) (n |= (1<<22))
 #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
 #define CTRL_CLEAR_LO(x) (x &= (1<<21))
 #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
 #define CTRL_SET_ENABLE(val) (val |= 1<<20)
-#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16))
-#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17))
+#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
+#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
 #define CTRL_SET_UM(val, m) (val |= (m << 8))
 #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
 #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
@@ -43,19 +43,19 @@
 #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
 
 static unsigned long reset_value[NUM_COUNTERS];
- 
+
 static void athlon_fill_in_addresses(struct op_msrs * const msrs)
 {
 	int i;
 
-	for (i=0; i < NUM_COUNTERS; i++) {
+	for (i = 0; i < NUM_COUNTERS; i++) {
 		if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
 			msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
 		else
 			msrs->counters[i].addr = 0;
 	}
 
-	for (i=0; i < NUM_CONTROLS; i++) {
+	for (i = 0; i < NUM_CONTROLS; i++) {
 		if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
 			msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
 		else
@@ -63,15 +63,15 @@
 	}
 }
 
- 
+
 static void athlon_setup_ctrs(struct op_msrs const * const msrs)
 {
 	unsigned int low, high;
 	int i;
- 
+
 	/* clear all counters */
 	for (i = 0 ; i < NUM_CONTROLS; ++i) {
-		if (unlikely(!CTRL_IS_RESERVED(msrs,i)))
+		if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
 			continue;
 		CTRL_READ(low, high, msrs, i);
 		CTRL_CLEAR_LO(low);
@@ -81,14 +81,14 @@
 
 	/* avoid a false detection of ctr overflows in NMI handler */
 	for (i = 0; i < NUM_COUNTERS; ++i) {
-		if (unlikely(!CTR_IS_RESERVED(msrs,i)))
+		if (unlikely(!CTR_IS_RESERVED(msrs, i)))
 			continue;
 		CTR_WRITE(1, msrs, i);
 	}
 
 	/* enable active counters */
 	for (i = 0; i < NUM_COUNTERS; ++i) {
-		if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) {
+		if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
 			reset_value[i] = counter_config[i].count;
 
 			CTR_WRITE(counter_config[i].count, msrs, i);
@@ -112,7 +112,7 @@
 	}
 }
 
- 
+
 static int athlon_check_ctrs(struct pt_regs * const regs,
 			     struct op_msrs const * const msrs)
 {
@@ -133,7 +133,7 @@
 	return 1;
 }
 
- 
+
 static void athlon_start(struct op_msrs const * const msrs)
 {
 	unsigned int low, high;
@@ -150,7 +150,7 @@
 
 static void athlon_stop(struct op_msrs const * const msrs)
 {
-	unsigned int low,high;
+	unsigned int low, high;
 	int i;
 
 	/* Subtle: stop on all counters to avoid race with
@@ -169,11 +169,11 @@
 	int i;
 
 	for (i = 0 ; i < NUM_COUNTERS ; ++i) {
-		if (CTR_IS_RESERVED(msrs,i))
+		if (CTR_IS_RESERVED(msrs, i))
 			release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
 	}
 	for (i = 0 ; i < NUM_CONTROLS ; ++i) {
-		if (CTRL_IS_RESERVED(msrs,i))
+		if (CTRL_IS_RESERVED(msrs, i))
 			release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
 	}
 }
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index c554f52..eff431f 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -1,4 +1,4 @@
-/**
+/*
  * @file op_model_ppro.h
  * pentium pro / P6 model-specific MSR operations
  *
@@ -15,45 +15,45 @@
 #include <asm/msr.h>
 #include <asm/apic.h>
 #include <asm/nmi.h>
- 
+
 #include "op_x86_model.h"
 #include "op_counter.h"
 
 #define NUM_COUNTERS 2
 #define NUM_CONTROLS 2
 
-#define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0)
-#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0)
-#define CTR_32BIT_WRITE(l,msrs,c)	\
-	do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0);} while (0)
+#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
+#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
+#define CTR_32BIT_WRITE(l, msrs, c)	\
+	do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0); } while (0)
 #define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
 
-#define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0)
-#define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l), (h));} while (0)
-#define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l), (h));} while (0)
+#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
+#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
+#define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
 #define CTRL_SET_ACTIVE(n) (n |= (1<<22))
 #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
 #define CTRL_CLEAR(x) (x &= (1<<21))
 #define CTRL_SET_ENABLE(val) (val |= 1<<20)
-#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16))
-#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17))
+#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
+#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
 #define CTRL_SET_UM(val, m) (val |= (m << 8))
 #define CTRL_SET_EVENT(val, e) (val |= e)
 
 static unsigned long reset_value[NUM_COUNTERS];
- 
+
 static void ppro_fill_in_addresses(struct op_msrs * const msrs)
 {
 	int i;
 
-	for (i=0; i < NUM_COUNTERS; i++) {
+	for (i = 0; i < NUM_COUNTERS; i++) {
 		if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
 			msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
 		else
 			msrs->counters[i].addr = 0;
 	}
-	
-	for (i=0; i < NUM_CONTROLS; i++) {
+
+	for (i = 0; i < NUM_CONTROLS; i++) {
 		if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
 			msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
 		else
@@ -69,23 +69,23 @@
 
 	/* clear all counters */
 	for (i = 0 ; i < NUM_CONTROLS; ++i) {
-		if (unlikely(!CTRL_IS_RESERVED(msrs,i)))
+		if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
 			continue;
 		CTRL_READ(low, high, msrs, i);
 		CTRL_CLEAR(low);
 		CTRL_WRITE(low, high, msrs, i);
 	}
-	
+
 	/* avoid a false detection of ctr overflows in NMI handler */
 	for (i = 0; i < NUM_COUNTERS; ++i) {
-		if (unlikely(!CTR_IS_RESERVED(msrs,i)))
+		if (unlikely(!CTR_IS_RESERVED(msrs, i)))
 			continue;
 		CTR_32BIT_WRITE(1, msrs, i);
 	}
 
 	/* enable active counters */
 	for (i = 0; i < NUM_COUNTERS; ++i) {
-		if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) {
+		if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
 			reset_value[i] = counter_config[i].count;
 
 			CTR_32BIT_WRITE(counter_config[i].count, msrs, i);
@@ -104,13 +104,13 @@
 	}
 }
 
- 
+
 static int ppro_check_ctrs(struct pt_regs * const regs,
 			   struct op_msrs const * const msrs)
 {
 	unsigned int low, high;
 	int i;
- 
+
 	for (i = 0 ; i < NUM_COUNTERS; ++i) {
 		if (!reset_value[i])
 			continue;
@@ -135,10 +135,10 @@
 	return 1;
 }
 
- 
+
 static void ppro_start(struct op_msrs const * const msrs)
 {
-	unsigned int low,high;
+	unsigned int low, high;
 	int i;
 
 	for (i = 0; i < NUM_COUNTERS; ++i) {
@@ -153,7 +153,7 @@
 
 static void ppro_stop(struct op_msrs const * const msrs)
 {
-	unsigned int low,high;
+	unsigned int low, high;
 	int i;
 
 	for (i = 0; i < NUM_COUNTERS; ++i) {
@@ -170,11 +170,11 @@
 	int i;
 
 	for (i = 0 ; i < NUM_COUNTERS ; ++i) {
-		if (CTR_IS_RESERVED(msrs,i))
+		if (CTR_IS_RESERVED(msrs, i))
 			release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
 	}
 	for (i = 0 ; i < NUM_CONTROLS ; ++i) {
-		if (CTRL_IS_RESERVED(msrs,i))
+		if (CTRL_IS_RESERVED(msrs, i))
 			release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
 	}
 }
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 103b9df..2ead723 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -30,6 +30,9 @@
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/errno.h>
+#include <linux/bootmem.h>
+
+#include <asm/pat.h>
 
 #include "pci.h"
 
@@ -297,10 +300,35 @@
 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
 }
 
+static void pci_unmap_page_range(struct vm_area_struct *vma)
+{
+	u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT;
+	free_memtype(addr, addr + vma->vm_end - vma->vm_start);
+}
+
+static void pci_track_mmap_page_range(struct vm_area_struct *vma)
+{
+	u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long flags = pgprot_val(vma->vm_page_prot)
+						& _PAGE_CACHE_MASK;
+
+	reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL);
+}
+
+static struct vm_operations_struct pci_mmap_ops = {
+	.open  = pci_track_mmap_page_range,
+	.close = pci_unmap_page_range,
+};
+
 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 			enum pci_mmap_state mmap_state, int write_combine)
 {
 	unsigned long prot;
+	u64 addr = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long len = vma->vm_end - vma->vm_start;
+	unsigned long flags;
+	unsigned long new_flags;
+	int retval;
 
 	/* I/O space cannot be accessed via normal processor loads and
 	 * stores on this platform.
@@ -308,21 +336,50 @@
 	if (mmap_state == pci_mmap_io)
 		return -EINVAL;
 
-	/* Leave vm_pgoff as-is, the PCI space address is the physical
-	 * address on this platform.
-	 */
 	prot = pgprot_val(vma->vm_page_prot);
-	if (boot_cpu_data.x86 > 3)
-		prot |= _PAGE_PCD | _PAGE_PWT;
+	if (pat_wc_enabled && write_combine)
+		prot |= _PAGE_CACHE_WC;
+	else if (boot_cpu_data.x86 > 3)
+		prot |= _PAGE_CACHE_UC;
+
 	vma->vm_page_prot = __pgprot(prot);
 
-	/* Write-combine setting is ignored, it is changed via the mtrr
-	 * interfaces on this platform.
-	 */
+	flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK;
+	retval = reserve_memtype(addr, addr + len, flags, &new_flags);
+	if (retval)
+		return retval;
+
+	if (flags != new_flags) {
+		/*
+		 * Do not fallback to certain memory types with certain
+		 * requested type:
+		 * - request is uncached, return cannot be write-back
+		 * - request is uncached, return cannot be write-combine
+		 * - request is write-combine, return cannot be write-back
+		 */
+		if ((flags == _PAGE_CACHE_UC &&
+		     (new_flags == _PAGE_CACHE_WB ||
+		      new_flags == _PAGE_CACHE_WC)) ||
+		    (flags == _PAGE_CACHE_WC &&
+		     new_flags == _PAGE_CACHE_WB)) {
+			free_memtype(addr, addr+len);
+			return -EINVAL;
+		}
+		flags = new_flags;
+	}
+
+	if (vma->vm_pgoff <= max_pfn_mapped &&
+	    ioremap_change_attr((unsigned long)__va(addr), len, flags)) {
+		free_memtype(addr, addr + len);
+		return -EINVAL;
+	}
+
 	if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 			       vma->vm_end - vma->vm_start,
 			       vma->vm_page_prot))
 		return -EAGAIN;
 
+	vma->vm_ops = &pci_mmap_ops;
+
 	return 0;
 }
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index a871586..579745c 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -200,7 +200,7 @@
 {
 	static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
 
-	WARN_ON_ONCE(pirq >= 16);
+	WARN_ON_ONCE(pirq > 16);
 	return irqmap[read_config_nybble(router, 0x48, pirq-1)];
 }
 
@@ -209,7 +209,7 @@
 	static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
 	unsigned int val = irqmap[irq];
 
-	WARN_ON_ONCE(pirq >= 16);
+	WARN_ON_ONCE(pirq > 16);
 	if (val) {
 		write_config_nybble(router, 0x48, pirq-1, val);
 		return 1;
@@ -260,7 +260,7 @@
 {
 	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
 
-	WARN_ON_ONCE(pirq >= 5);
+	WARN_ON_ONCE(pirq > 5);
 	return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
 }
 
@@ -268,7 +268,7 @@
 {
 	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
 
-	WARN_ON_ONCE(pirq >= 5);
+	WARN_ON_ONCE(pirq > 5);
 	write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
 	return 1;
 }
@@ -282,7 +282,7 @@
 {
 	static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
 
-	WARN_ON_ONCE(pirq >= 4);
+	WARN_ON_ONCE(pirq > 4);
 	return read_config_nybble(router,0x43, pirqmap[pirq-1]);
 }
 
@@ -290,7 +290,7 @@
 {
 	static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
 
-	WARN_ON_ONCE(pirq >= 4);
+	WARN_ON_ONCE(pirq > 4);
 	write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
 	return 1;
 }
diff --git a/arch/x86/pci/numa.c b/arch/x86/pci/numa.c
index 55270c2..d9afbae 100644
--- a/arch/x86/pci/numa.c
+++ b/arch/x86/pci/numa.c
@@ -11,11 +11,41 @@
 #define XQUAD_PORTIO_BASE 0xfe400000
 #define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
 
+int mp_bus_id_to_node[MAX_MP_BUSSES];
 #define BUS2QUAD(global) (mp_bus_id_to_node[global])
-#define BUS2LOCAL(global) (mp_bus_id_to_local[global])
-#define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local])
 
-extern void *xquad_portio;    /* Where the IO area was mapped */
+int mp_bus_id_to_local[MAX_MP_BUSSES];
+#define BUS2LOCAL(global) (mp_bus_id_to_local[global])
+
+void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
+	struct mpc_config_translation *translation)
+{
+	int quad = translation->trans_quad;
+	int local = translation->trans_local;
+
+	mp_bus_id_to_node[m->mpc_busid] = quad;
+	mp_bus_id_to_local[m->mpc_busid] = local;
+	printk(KERN_INFO "Bus #%d is %s (node %d)\n",
+	       m->mpc_busid, name, quad);
+}
+
+int quad_local_to_mp_bus_id [NR_CPUS/4][4];
+#define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local])
+void mpc_oem_pci_bus(struct mpc_config_bus *m,
+	struct mpc_config_translation *translation)
+{
+	int quad = translation->trans_quad;
+	int local = translation->trans_local;
+
+	quad_local_to_mp_bus_id[quad][local] = m->mpc_busid;
+}
+
+/* Where the IO area was mapped on multiquad, always 0 otherwise */
+void *xquad_portio;
+#ifdef CONFIG_X86_NUMAQ
+EXPORT_SYMBOL(xquad_portio);
+#endif
+
 #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
 
 #define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c
index 7f9c6da..7dc5d5c 100644
--- a/arch/x86/power/cpu_32.c
+++ b/arch/x86/power/cpu_32.c
@@ -27,17 +27,17 @@
 	/*
 	 * descriptor tables
 	 */
- 	store_gdt(&ctxt->gdt);
- 	store_idt(&ctxt->idt);
- 	store_tr(ctxt->tr);
+	store_gdt(&ctxt->gdt);
+	store_idt(&ctxt->idt);
+	store_tr(ctxt->tr);
 
 	/*
 	 * segment registers
 	 */
- 	savesegment(es, ctxt->es);
- 	savesegment(fs, ctxt->fs);
- 	savesegment(gs, ctxt->gs);
- 	savesegment(ss, ctxt->ss);
+	savesegment(es, ctxt->es);
+	savesegment(fs, ctxt->fs);
+	savesegment(gs, ctxt->gs);
+	savesegment(ss, ctxt->ss);
 
 	/*
 	 * control registers
@@ -48,10 +48,12 @@
 	ctxt->cr4 = read_cr4();
 }
 
+/* Needed by apm.c */
 void save_processor_state(void)
 {
 	__save_processor_state(&saved_context);
 }
+EXPORT_SYMBOL(save_processor_state);
 
 static void do_fpu_end(void)
 {
@@ -64,9 +66,14 @@
 static void fix_processor_context(void)
 {
 	int cpu = smp_processor_id();
-	struct tss_struct * t = &per_cpu(init_tss, cpu);
+	struct tss_struct *t = &per_cpu(init_tss, cpu);
 
-	set_tss_desc(cpu,t);	/* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
+	set_tss_desc(cpu, t);	/*
+				 * This just modifies memory; should not be
+				 * necessary. But... This is necessary, because
+				 * 386 hardware has concept of busy TSS or some
+				 * similar stupidity.
+				 */
 
 	load_TR_desc();				/* This does ltr */
 	load_LDT(&current->active_mm->context);	/* This does lldt */
@@ -100,16 +107,16 @@
 	 * now restore the descriptor tables to their proper values
 	 * ltr is done i fix_processor_context().
 	 */
- 	load_gdt(&ctxt->gdt);
- 	load_idt(&ctxt->idt);
+	load_gdt(&ctxt->gdt);
+	load_idt(&ctxt->idt);
 
 	/*
 	 * segment registers
 	 */
- 	loadsegment(es, ctxt->es);
- 	loadsegment(fs, ctxt->fs);
- 	loadsegment(gs, ctxt->gs);
- 	loadsegment(ss, ctxt->ss);
+	loadsegment(es, ctxt->es);
+	loadsegment(fs, ctxt->fs);
+	loadsegment(gs, ctxt->gs);
+	loadsegment(ss, ctxt->ss);
 
 	/*
 	 * sysenter MSRs
@@ -123,11 +130,9 @@
 	mcheck_init(&boot_cpu_data);
 }
 
+/* Needed by apm.c */
 void restore_processor_state(void)
 {
 	__restore_processor_state(&saved_context);
 }
-
-/* Needed by apm.c */
-EXPORT_SYMBOL(save_processor_state);
 EXPORT_SYMBOL(restore_processor_state);
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 0a8f474..17a6b05 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -39,7 +39,7 @@
 
 CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64
 
-$(vobjs): KBUILD_CFLAGS = $(CFL)
+$(vobjs): KBUILD_CFLAGS += $(CFL)
 
 targets += vdso-syms.lds
 obj-$(VDSO64-y)			+= vdso-syms.lds
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index 348f134..e2af8ee 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -210,8 +210,12 @@
 /* May not be __init: called during resume */
 void syscall32_cpu_init(void)
 {
-	if (use_sysenter < 0)
-		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
+	if (use_sysenter < 0) {
+		if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+			use_sysenter = 1;
+		if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
+			use_sysenter = 1;
+	}
 
 	/* Load these always in case some future AMD CPU supports
 	   SYSENTER from compat mode too. */
@@ -325,6 +329,9 @@
 	int ret = 0;
 	bool compat;
 
+	if (vdso_enabled == VDSO_DISABLED)
+		return 0;
+
 	down_write(&mm->mmap_sem);
 
 	/* Test compat mode once here, in case someone
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 27ee26a..c038822 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -25,6 +25,7 @@
 #include <linux/mm.h>
 #include <linux/page-flags.h>
 #include <linux/highmem.h>
+#include <linux/console.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/physdev.h>
@@ -889,7 +890,6 @@
 		pv_irq_ops.irq_disable = xen_irq_disable_direct;
 		pv_irq_ops.irq_enable = xen_irq_enable_direct;
 		pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
-		pv_cpu_ops.iret = xen_iret_direct;
 	}
 }
 
@@ -993,7 +993,7 @@
 	.read_tsc = native_read_tsc,
 	.read_pmc = native_read_pmc,
 
-	.iret = (void *)&hypercall_page[__HYPERVISOR_iret],
+	.iret = xen_iret,
 	.irq_enable_syscall_ret = NULL,  /* never called */
 
 	.load_tr_desc = paravirt_nop,
@@ -1228,6 +1228,9 @@
 		? __pa(xen_start_info->mod_start) : 0;
 	boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
 
+	if (!is_initial_xendomain())
+		add_preferred_console("hvc", 0, NULL);
+
 	/* Start the world */
 	start_kernel();
 }
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 5e6f36f..5791eb2 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -76,7 +76,7 @@
 		if (ret) {
 			printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
 			       ret, smp_processor_id());
-			for(i = 0; i < b->mcidx; i++) {
+			for (i = 0; i < b->mcidx; i++) {
 				printk("  call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
 				       i+1, b->mcidx,
 				       b->debug[i].op,
@@ -93,7 +93,7 @@
 
 	local_irq_restore(flags);
 
-	for(i = 0; i < b->cbidx; i++) {
+	for (i = 0; i < b->cbidx; i++) {
 		struct callback *cb = &b->callbacks[i];
 
 		(*cb->fn)(cb->data);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index aafc544..e340ff9 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -35,7 +35,7 @@
 #include "xen-ops.h"
 #include "mmu.h"
 
-static cpumask_t cpu_initialized_map;
+static cpumask_t xen_cpu_initialized_map;
 static DEFINE_PER_CPU(int, resched_irq);
 static DEFINE_PER_CPU(int, callfunc_irq);
 
@@ -179,7 +179,7 @@
 	if (xen_smp_intr_init(0))
 		BUG();
 
-	cpu_initialized_map = cpumask_of_cpu(0);
+	xen_cpu_initialized_map = cpumask_of_cpu(0);
 
 	/* Restrict the possible_map according to max_cpus. */
 	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
@@ -210,7 +210,7 @@
 	struct vcpu_guest_context *ctxt;
 	struct gdt_page *gdt = &per_cpu(gdt_page, cpu);
 
-	if (cpu_test_and_set(cpu, cpu_initialized_map))
+	if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
 		return 0;
 
 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 6b71904..fe161ed 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -135,13 +135,8 @@
 	current stack state in whatever form its in, we keep things
 	simple by only using a single register which is pushed/popped
 	on the stack.
-
-	Non-direct iret could be done in the same way, but it would
-	require an annoying amount of code duplication.  We'll assume
-	that direct mode will be the common case once the hypervisor
-	support becomes commonplace.
  */
-ENTRY(xen_iret_direct)
+ENTRY(xen_iret)
 	/* test eflags for special cases */
 	testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
 	jnz hyper_iret
@@ -155,9 +150,9 @@
 	GET_THREAD_INFO(%eax)
 	movl TI_cpu(%eax),%eax
 	movl __per_cpu_offset(,%eax,4),%eax
-	lea per_cpu__xen_vcpu_info(%eax),%eax
+	mov per_cpu__xen_vcpu(%eax),%eax
 #else
-	movl $per_cpu__xen_vcpu_info, %eax
+	movl per_cpu__xen_vcpu, %eax
 #endif
 
 	/* check IF state we're restoring */
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index b02a909..956a491 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -63,5 +63,5 @@
 DECL_ASM(unsigned long, xen_save_fl_direct, void);
 DECL_ASM(void, xen_restore_fl_direct, unsigned long);
 
-void xen_iret_direct(void);
+void xen_iret(void);
 #endif /* XEN_OPS_H */
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index f582d6a..7419dbc 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -5,7 +5,7 @@
 extra-y := head.o vmlinux.lds
 
 
-obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o  \
+obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
 	 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o  \
 	 pci-dma.o init_task.o io.o
 
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c
deleted file mode 100644
index 995c641..0000000
--- a/arch/xtensa/kernel/semaphore.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * arch/xtensa/kernel/semaphore.c
- *
- * Generic semaphore code. Buyer beware. Do your own specific changes
- * in <asm/semaphore-helper.h>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- *
- * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
- * Chris Zankel	<chris@zankel.net>
- * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
- * Kevin Chea
- */
-
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-#include <asm/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-
-static __inline__ void wake_one_more(struct semaphore * sem)
-{
-	atomic_inc((atomic_t *)&sem->sleepers);
-}
-
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->sleepers > 0) {
-		sem->sleepers--;
-		ret = 1;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- *	1	got the lock
- *	0	go to sleep
- *	-EINTR	interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
-						struct task_struct *tsk)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->sleepers > 0) {
-		sem->sleepers--;
-		ret = 1;
-	} else if (signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- *	1	failed to lock
- *	0	got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 1;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->sleepers <= 0)
-		atomic_inc(&sem->count);
-	else {
-		sem->sleepers--;
-		ret = 0;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-DEFINE_SPINLOCK(semaphore_wake_lock);
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
- *
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to
- * wake up and exit.  ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore.  The others will go back
- * to sleep.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-void __up(struct semaphore *sem)
-{
-	wake_one_more(sem);
-	wake_up(&sem->wait);
-}
-
-/*
- * Perform the "down" function.  Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from __down, the return is ignored and the wait loop is
- * not interruptible.  This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from __down_interruptible, the return value gets checked
- * upon return.  If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- *
- */
-
-#define DOWN_VAR				\
-	struct task_struct *tsk = current;	\
-	wait_queue_t wait;			\
-	init_waitqueue_entry(&wait, tsk);
-
-#define DOWN_HEAD(task_state)						\
-									\
-									\
-	tsk->state = (task_state);					\
-	add_wait_queue(&sem->wait, &wait);				\
-									\
-	/*								\
-	 * Ok, we're set up.  sem->count is known to be less than zero	\
-	 * so we must wait.						\
-	 *								\
-	 * We can let go the lock for purposes of waiting.		\
-	 * We re-acquire it after awaking so as to protect		\
-	 * all semaphore operations.					\
-	 *								\
-	 * If "up()" is called before we call waking_non_zero() then	\
-	 * we will catch it right away.  If it is called later then	\
-	 * we will have to go through a wakeup cycle to catch it.	\
-	 *								\
-	 * Multiple waiters contend for the semaphore lock to see	\
-	 * who gets to gate through and who has to wait some more.	\
-	 */								\
-	for (;;) {
-
-#define DOWN_TAIL(task_state)			\
-		tsk->state = (task_state);	\
-	}					\
-	tsk->state = TASK_RUNNING;		\
-	remove_wait_queue(&sem->wait, &wait);
-
-void __sched __down(struct semaphore * sem)
-{
-	DOWN_VAR
-	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
-	if (waking_non_zero(sem))
-		break;
-	schedule();
-	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-	DOWN_VAR
-	DOWN_HEAD(TASK_INTERRUPTIBLE)
-
-	ret = waking_non_zero_interruptible(sem, tsk);
-	if (ret)
-	{
-		if (ret == 1)
-			/* ret != 0 only if we get interrupted -arca */
-			ret = 0;
-		break;
-	}
-	schedule();
-	DOWN_TAIL(TASK_INTERRUPTIBLE)
-	return ret;
-}
-
-int __down_trylock(struct semaphore * sem)
-{
-	return waking_non_zero_trylock(sem);
-}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 60dbdb4..6e52cdd 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -26,7 +26,6 @@
 #include <asm/io.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
-#include <asm/semaphore.h>
 #ifdef CONFIG_BLK_DEV_FD
 #include <asm/floppy.h>
 #endif
@@ -71,14 +70,6 @@
 EXPORT_SYMBOL(__udivdi3);
 EXPORT_SYMBOL(__umoddi3);
 
-/*
- * Semaphore operations
- */
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(__up);
-
 #ifdef CONFIG_NET
 /*
  * Networking support
diff --git a/block/bsg.c b/block/bsg.c
index 8917c51..302ac1f 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -37,7 +37,6 @@
 	struct list_head done_list;
 	struct hlist_node dev_list;
 	atomic_t ref_count;
-	int minor;
 	int queued_cmds;
 	int done_cmds;
 	wait_queue_head_t wq_done;
@@ -368,7 +367,7 @@
 
 	spin_lock_irq(&bd->lock);
 	if (bd->done_cmds) {
-		bc = list_entry(bd->done_list.next, struct bsg_command, list);
+		bc = list_first_entry(&bd->done_list, struct bsg_command, list);
 		list_del(&bc->list);
 		bd->done_cmds--;
 	}
@@ -468,8 +467,6 @@
 
 	dprintk("%s: entered\n", bd->name);
 
-	set_bit(BSG_F_BLOCK, &bd->flags);
-
 	/*
 	 * wait for all commands to complete
 	 */
@@ -705,6 +702,7 @@
 static int bsg_put_device(struct bsg_device *bd)
 {
 	int ret = 0;
+	struct device *dev = bd->queue->bsg_dev.dev;
 
 	mutex_lock(&bsg_mutex);
 
@@ -730,6 +728,7 @@
 	kfree(bd);
 out:
 	mutex_unlock(&bsg_mutex);
+	put_device(dev);
 	return ret;
 }
 
@@ -738,22 +737,26 @@
 					 struct file *file)
 {
 	struct bsg_device *bd;
+	int ret;
 #ifdef BSG_DEBUG
 	unsigned char buf[32];
 #endif
+	ret = blk_get_queue(rq);
+	if (ret)
+		return ERR_PTR(-ENXIO);
 
 	bd = bsg_alloc_device();
-	if (!bd)
+	if (!bd) {
+		blk_put_queue(rq);
 		return ERR_PTR(-ENOMEM);
+	}
 
 	bd->queue = rq;
-	kobject_get(&rq->kobj);
 	bsg_set_block(bd, file);
 
 	atomic_set(&bd->ref_count, 1);
-	bd->minor = iminor(inode);
 	mutex_lock(&bsg_mutex);
-	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor));
+	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
 
 	strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
 	dprintk("bound to <%s>, max queue %d\n",
@@ -763,23 +766,21 @@
 	return bd;
 }
 
-static struct bsg_device *__bsg_get_device(int minor)
+static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
 {
-	struct bsg_device *bd = NULL;
+	struct bsg_device *bd;
 	struct hlist_node *entry;
 
 	mutex_lock(&bsg_mutex);
 
-	hlist_for_each(entry, bsg_dev_idx_hash(minor)) {
-		bd = hlist_entry(entry, struct bsg_device, dev_list);
-		if (bd->minor == minor) {
+	hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
+		if (bd->queue == q) {
 			atomic_inc(&bd->ref_count);
-			break;
+			goto found;
 		}
-
-		bd = NULL;
 	}
-
+	bd = NULL;
+found:
 	mutex_unlock(&bsg_mutex);
 	return bd;
 }
@@ -789,21 +790,27 @@
 	struct bsg_device *bd;
 	struct bsg_class_device *bcd;
 
-	bd = __bsg_get_device(iminor(inode));
-	if (bd)
-		return bd;
-
 	/*
 	 * find the class device
 	 */
 	mutex_lock(&bsg_mutex);
 	bcd = idr_find(&bsg_minor_idr, iminor(inode));
+	if (bcd)
+		get_device(bcd->dev);
 	mutex_unlock(&bsg_mutex);
 
 	if (!bcd)
 		return ERR_PTR(-ENODEV);
 
-	return bsg_add_device(inode, bcd->queue, file);
+	bd = __bsg_get_device(iminor(inode), bcd->queue);
+	if (bd)
+		return bd;
+
+	bd = bsg_add_device(inode, bcd->queue, file);
+	if (IS_ERR(bd))
+		put_device(bcd->dev);
+
+	return bd;
 }
 
 static int bsg_open(struct inode *inode, struct file *file)
@@ -942,7 +949,6 @@
 	class_device_unregister(bcd->class_dev);
 	put_device(bcd->dev);
 	bcd->class_dev = NULL;
-	bcd->dev = NULL;
 	mutex_unlock(&bsg_mutex);
 }
 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index b733732..c70d0b6 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -624,7 +624,6 @@
 	case HDIO_GET_IDENTITY:
 	case HDIO_DRIVE_TASK:
 	case HDIO_DRIVE_CMD:
-	case HDIO_SCAN_HWIF:
 	/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
 	case 0x330:
 	/* 0x02 -- Floppy ioctls */
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 84caa4e..a5eda80 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -77,7 +77,7 @@
 			/* if ack is already set then we cannot be sure
 			 * we are referring to the correct operation
 			 */
-			BUG_ON(depend_tx->ack);
+			BUG_ON(async_tx_test_ack(depend_tx));
 			if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
 				panic("%s: DMA_ERROR waiting for depend_tx\n",
 					__func__);
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 2be3bae..c6e772f 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -89,13 +89,19 @@
 		iter = tx;
 
 		/* find the root of the unsubmitted dependency chain */
-		while (iter->cookie == -EBUSY) {
+		do {
 			parent = iter->parent;
-			if (parent && parent->cookie == -EBUSY)
-				iter = iter->parent;
-			else
+			if (!parent)
 				break;
-		}
+			else
+				iter = parent;
+		} while (parent);
+
+		/* there is a small window for ->parent == NULL and
+		 * ->cookie == -EBUSY
+		 */
+		while (iter->cookie == -EBUSY)
+			cpu_relax();
 
 		status = dma_sync_wait(iter->chan, iter->cookie);
 	} while (status == DMA_IN_PROGRESS || (iter != tx));
@@ -111,24 +117,33 @@
 void
 async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
 {
-	struct dma_async_tx_descriptor *dep_tx, *_dep_tx;
-	struct dma_device *dev;
+	struct dma_async_tx_descriptor *next = tx->next;
 	struct dma_chan *chan;
 
-	list_for_each_entry_safe(dep_tx, _dep_tx, &tx->depend_list,
-		depend_node) {
-		chan = dep_tx->chan;
-		dev = chan->device;
-		/* we can't depend on ourselves */
-		BUG_ON(chan == tx->chan);
-		list_del(&dep_tx->depend_node);
-		tx->tx_submit(dep_tx);
+	if (!next)
+		return;
 
-		/* we need to poke the engine as client code does not
-		 * know about dependency submission events
-		 */
-		dev->device_issue_pending(chan);
+	tx->next = NULL;
+	chan = next->chan;
+
+	/* keep submitting up until a channel switch is detected
+	 * in that case we will be called again as a result of
+	 * processing the interrupt from async_tx_channel_switch
+	 */
+	while (next && next->chan == chan) {
+		struct dma_async_tx_descriptor *_next;
+
+		spin_lock_bh(&next->lock);
+		next->parent = NULL;
+		_next = next->next;
+		next->next = NULL;
+		spin_unlock_bh(&next->lock);
+
+		next->tx_submit(next);
+		next = _next;
 	}
+
+	chan->device->device_issue_pending(chan);
 }
 EXPORT_SYMBOL_GPL(async_tx_run_dependencies);
 
@@ -397,6 +412,92 @@
 }
 #endif
 
+
+/**
+ * async_tx_channel_switch - queue an interrupt descriptor with a dependency
+ * 	pre-attached.
+ * @depend_tx: the operation that must finish before the new operation runs
+ * @tx: the new operation
+ */
+static void
+async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
+			struct dma_async_tx_descriptor *tx)
+{
+	struct dma_chan *chan;
+	struct dma_device *device;
+	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
+
+	/* first check to see if we can still append to depend_tx */
+	spin_lock_bh(&depend_tx->lock);
+	if (depend_tx->parent && depend_tx->chan == tx->chan) {
+		tx->parent = depend_tx;
+		depend_tx->next = tx;
+		intr_tx = NULL;
+	}
+	spin_unlock_bh(&depend_tx->lock);
+
+	if (!intr_tx)
+		return;
+
+	chan = depend_tx->chan;
+	device = chan->device;
+
+	/* see if we can schedule an interrupt
+	 * otherwise poll for completion
+	 */
+	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
+		intr_tx = device->device_prep_dma_interrupt(chan, 0);
+	else
+		intr_tx = NULL;
+
+	if (intr_tx) {
+		intr_tx->callback = NULL;
+		intr_tx->callback_param = NULL;
+		tx->parent = intr_tx;
+		/* safe to set ->next outside the lock since we know we are
+		 * not submitted yet
+		 */
+		intr_tx->next = tx;
+
+		/* check if we need to append */
+		spin_lock_bh(&depend_tx->lock);
+		if (depend_tx->parent) {
+			intr_tx->parent = depend_tx;
+			depend_tx->next = intr_tx;
+			async_tx_ack(intr_tx);
+			intr_tx = NULL;
+		}
+		spin_unlock_bh(&depend_tx->lock);
+
+		if (intr_tx) {
+			intr_tx->parent = NULL;
+			intr_tx->tx_submit(intr_tx);
+			async_tx_ack(intr_tx);
+		}
+	} else {
+		if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
+			panic("%s: DMA_ERROR waiting for depend_tx\n",
+			      __func__);
+		tx->tx_submit(tx);
+	}
+}
+
+
+/**
+ * submit_disposition - while holding depend_tx->lock we must avoid submitting
+ * 	new operations to prevent a circular locking dependency with
+ * 	drivers that already hold a channel lock when calling
+ * 	async_tx_run_dependencies.
+ * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
+ * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
+ * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
+ */
+enum submit_disposition {
+	ASYNC_TX_SUBMITTED,
+	ASYNC_TX_CHANNEL_SWITCH,
+	ASYNC_TX_DIRECT_SUBMIT,
+};
+
 void
 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
 	enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
@@ -405,28 +506,55 @@
 	tx->callback = cb_fn;
 	tx->callback_param = cb_param;
 
-	/* set this new tx to run after depend_tx if:
-	 * 1/ a dependency exists (depend_tx is !NULL)
-	 * 2/ the tx can not be submitted to the current channel
-	 */
-	if (depend_tx && depend_tx->chan != chan) {
-		/* if ack is already set then we cannot be sure
-		 * we are referring to the correct operation
-		 */
-		BUG_ON(depend_tx->ack);
+	if (depend_tx) {
+		enum submit_disposition s;
 
-		tx->parent = depend_tx;
+		/* sanity check the dependency chain:
+		 * 1/ if ack is already set then we cannot be sure
+		 * we are referring to the correct operation
+		 * 2/ dependencies are 1:1 i.e. two transactions can
+		 * not depend on the same parent
+		 */
+		BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next ||
+		       tx->parent);
+
+		/* the lock prevents async_tx_run_dependencies from missing
+		 * the setting of ->next when ->parent != NULL
+		 */
 		spin_lock_bh(&depend_tx->lock);
-		list_add_tail(&tx->depend_node, &depend_tx->depend_list);
-		if (depend_tx->cookie == 0) {
-			struct dma_chan *dep_chan = depend_tx->chan;
-			struct dma_device *dep_dev = dep_chan->device;
-			dep_dev->device_dependency_added(dep_chan);
+		if (depend_tx->parent) {
+			/* we have a parent so we can not submit directly
+			 * if we are staying on the same channel: append
+			 * else: channel switch
+			 */
+			if (depend_tx->chan == chan) {
+				tx->parent = depend_tx;
+				depend_tx->next = tx;
+				s = ASYNC_TX_SUBMITTED;
+			} else
+				s = ASYNC_TX_CHANNEL_SWITCH;
+		} else {
+			/* we do not have a parent so we may be able to submit
+			 * directly if we are staying on the same channel
+			 */
+			if (depend_tx->chan == chan)
+				s = ASYNC_TX_DIRECT_SUBMIT;
+			else
+				s = ASYNC_TX_CHANNEL_SWITCH;
 		}
 		spin_unlock_bh(&depend_tx->lock);
 
-		/* schedule an interrupt to trigger the channel switch */
-		async_trigger_callback(ASYNC_TX_ACK, depend_tx, NULL, NULL);
+		switch (s) {
+		case ASYNC_TX_SUBMITTED:
+			break;
+		case ASYNC_TX_CHANNEL_SWITCH:
+			async_tx_channel_switch(depend_tx, tx);
+			break;
+		case ASYNC_TX_DIRECT_SUBMIT:
+			tx->parent = NULL;
+			tx->tx_submit(tx);
+			break;
+		}
 	} else {
 		tx->parent = NULL;
 		tx->tx_submit(tx);
@@ -467,7 +595,7 @@
 		if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 			device = NULL;
 
-		tx = device ? device->device_prep_dma_interrupt(chan) : NULL;
+		tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
 	} else
 		tx = NULL;
 
@@ -483,7 +611,7 @@
 			/* if ack is already set then we cannot be sure
 			 * we are referring to the correct operation
 			 */
-			BUG_ON(depend_tx->ack);
+			BUG_ON(async_tx_test_ack(depend_tx));
 			if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
 				panic("%s: DMA_ERROR waiting for depend_tx\n",
 					__func__);
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 1c445c7..3a0dddc 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -191,7 +191,7 @@
 				/* if ack is already set then we cannot be sure
 				 * we are referring to the correct operation
 				 */
-				BUG_ON(depend_tx->ack);
+				BUG_ON(async_tx_test_ack(depend_tx));
 				if (dma_wait_for_async_tx(depend_tx) ==
 					DMA_ERROR)
 					panic("%s: DMA_ERROR waiting for "
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index a697fb6..a498a6c 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -4,6 +4,8 @@
  *  Copyright (C) 2000       Andrew Henroid
  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *  Copyright (c) 2008 Intel Corporation
+ *   Author: Matthew Wilcox <willy@linux.intel.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -37,15 +39,18 @@
 #include <linux/workqueue.h>
 #include <linux/nmi.h>
 #include <linux/acpi.h>
-#include <acpi/acpi.h>
-#include <asm/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/processor.h>
-#include <asm/uaccess.h>
-
 #include <linux/efi.h>
 #include <linux/ioport.h>
 #include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/semaphore.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/processor.h>
 
 #define _COMPONENT		ACPI_OS_SERVICES
 ACPI_MODULE_NAME("osl");
@@ -764,7 +769,6 @@
 {
 	struct semaphore *sem = NULL;
 
-
 	sem = acpi_os_allocate(sizeof(struct semaphore));
 	if (!sem)
 		return AE_NO_MEMORY;
@@ -791,12 +795,12 @@
 {
 	struct semaphore *sem = (struct semaphore *)handle;
 
-
 	if (!sem)
 		return AE_BAD_PARAMETER;
 
 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
 
+	BUG_ON(!list_empty(&sem->wait_list));
 	kfree(sem);
 	sem = NULL;
 
@@ -804,21 +808,15 @@
 }
 
 /*
- * TODO: The kernel doesn't have a 'down_timeout' function -- had to
- * improvise.  The process is to sleep for one scheduler quantum
- * until the semaphore becomes available.  Downside is that this
- * may result in starvation for timeout-based waits when there's
- * lots of semaphore activity.
- *
  * TODO: Support for units > 1?
  */
 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
 {
 	acpi_status status = AE_OK;
 	struct semaphore *sem = (struct semaphore *)handle;
+	long jiffies;
 	int ret = 0;
 
-
 	if (!sem || (units < 1))
 		return AE_BAD_PARAMETER;
 
@@ -828,58 +826,14 @@
 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
 			  handle, units, timeout));
 
-	/*
-	 * This can be called during resume with interrupts off.
-	 * Like boot-time, we should be single threaded and will
-	 * always get the lock if we try -- timeout or not.
-	 * If this doesn't succeed, then we will oops courtesy of
-	 * might_sleep() in down().
-	 */
-	if (!down_trylock(sem))
-		return AE_OK;
-
-	switch (timeout) {
-		/*
-		 * No Wait:
-		 * --------
-		 * A zero timeout value indicates that we shouldn't wait - just
-		 * acquire the semaphore if available otherwise return AE_TIME
-		 * (a.k.a. 'would block').
-		 */
-	case 0:
-		if (down_trylock(sem))
-			status = AE_TIME;
-		break;
-
-		/*
-		 * Wait Indefinitely:
-		 * ------------------
-		 */
-	case ACPI_WAIT_FOREVER:
-		down(sem);
-		break;
-
-		/*
-		 * Wait w/ Timeout:
-		 * ----------------
-		 */
-	default:
-		// TODO: A better timeout algorithm?
-		{
-			int i = 0;
-			static const int quantum_ms = 1000 / HZ;
-
-			ret = down_trylock(sem);
-			for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
-				schedule_timeout_interruptible(1);
-				ret = down_trylock(sem);
-			}
-
-			if (ret != 0)
-				status = AE_TIME;
-		}
-		break;
-	}
+	if (timeout == ACPI_WAIT_FOREVER)
+		jiffies = MAX_SCHEDULE_TIMEOUT;
+	else
+		jiffies = msecs_to_jiffies(timeout);
+	
+	ret = down_timeout(sem, jiffies);
+	if (ret)
+		status = AE_TIME;
 
 	if (ACPI_FAILURE(status)) {
 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
@@ -902,7 +856,6 @@
 {
 	struct semaphore *sem = (struct semaphore *)handle;
 
-
 	if (!sem || (units < 1))
 		return AE_BAD_PARAMETER;
 
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 25aba69..292aa9a 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -28,7 +28,7 @@
        default n
 
 config ATA_ACPI
-	bool
+	bool "ATA ACPI Support"
 	depends on ACPI && PCI
 	select ACPI_DOCK
 	default y
@@ -41,6 +41,13 @@
 	  You can disable this at kernel boot time by using the
 	  option libata.noacpi=1
 
+config SATA_PMP
+	bool "SATA Port Multiplier support"
+	default y
+	help
+	  This option adds support for SATA Port Multipliers
+	  (the SATA version of an ethernet hub, or SAS expander).
+
 config SATA_AHCI
 	tristate "AHCI SATA support"
 	depends on PCI
@@ -49,6 +56,43 @@
 
 	  If unsure, say N.
 
+config SATA_SIL24
+	tristate "Silicon Image 3124/3132 SATA support"
+	depends on PCI
+	help
+	  This option enables support for Silicon Image 3124/3132 Serial ATA.
+
+	  If unsure, say N.
+
+config SATA_FSL
+	tristate "Freescale 3.0Gbps SATA support"
+	depends on FSL_SOC
+	help
+	  This option enables support for Freescale 3.0Gbps SATA controller.
+	  It can be found on MPC837x and MPC8315.
+
+	  If unsure, say N.
+
+config ATA_SFF
+	bool "ATA SFF support"
+	default y
+	help
+	  This option adds support for ATA controllers with SFF
+	  compliant or similar programming interface.
+
+	  SFF is the legacy IDE interface that has been around since
+	  the dawn of time.  Almost all PATA controllers have an
+	  SFF interface.  Many SATA controllers have an SFF interface
+	  when configured into a legacy compatibility mode.
+
+	  For users with exclusively modern controllers like AHCI,
+	  Silicon Image 3124, or Marvell 6440, you may choose to
+	  disable this uneeded SFF support.
+
+	  If unsure, say Y.
+
+if ATA_SFF
+
 config SATA_SVW
 	tristate "ServerWorks Frodo / Apple K2 SATA support"
 	depends on PCI
@@ -125,14 +169,6 @@
 
 	  If unsure, say N.
 
-config SATA_SIL24
-	tristate "Silicon Image 3124/3132 SATA support"
-	depends on PCI
-	help
-	  This option enables support for Silicon Image 3124/3132 Serial ATA.
-
-	  If unsure, say N.
-
 config SATA_SIS
 	tristate "SiS 964/965/966/180 SATA support"
 	depends on PCI
@@ -183,15 +219,6 @@
 	  firmware in the BIOS. This driver can sometimes handle
 	  otherwise unsupported hardware.
 
-config SATA_FSL
-	tristate "Freescale 3.0Gbps SATA support"
-	depends on FSL_SOC
-	help
-	  This option enables support for Freescale 3.0Gbps SATA controller.
-	  It can be found on MPC837x and MPC8315.
-
-	  If unsure, say N.
-
 config PATA_ALI
 	tristate "ALi PATA support (Experimental)"
 	depends on PCI && EXPERIMENTAL
@@ -679,4 +706,5 @@
 
 	  If unsure, say N.
 
+endif # ATA_SFF
 endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 0511e6f..1fbc2aa 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -78,6 +78,7 @@
 # Should be last libata driver
 obj-$(CONFIG_PATA_LEGACY)	+= pata_legacy.o
 
-libata-objs	:= libata-core.o libata-scsi.o libata-sff.o libata-eh.o \
-		   libata-pmp.o
+libata-objs	:= libata-core.o libata-scsi.o libata-eh.o
+libata-$(CONFIG_ATA_SFF)	+= libata-sff.o
+libata-$(CONFIG_SATA_PMP)	+= libata-pmp.o
 libata-$(CONFIG_ATA_ACPI)	+= libata-acpi.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b1eb4e2..739ba3f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -62,7 +62,6 @@
 	AHCI_MAX_PORTS		= 32,
 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
 	AHCI_DMA_BOUNDARY	= 0xffffffff,
-	AHCI_USE_CLUSTERING	= 1,
 	AHCI_MAX_CMDS		= 32,
 	AHCI_CMD_SZ		= 32,
 	AHCI_CMD_SLOT_SZ	= AHCI_MAX_CMDS * AHCI_CMD_SZ,
@@ -198,7 +197,6 @@
 					  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
 					  ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
 					  ATA_FLAG_IPM,
-	AHCI_LFLAG_COMMON		= ATA_LFLAG_SKIP_D2H_BSY,
 
 	ICH_MAP				= 0x90, /* ICH MAP register */
 };
@@ -245,19 +243,24 @@
 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
-static void ahci_irq_clear(struct ata_port *ap);
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
 static int ahci_port_start(struct ata_port *ap);
 static void ahci_port_stop(struct ata_port *ap);
-static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 static void ahci_qc_prep(struct ata_queued_cmd *qc);
-static u8 ahci_check_status(struct ata_port *ap);
 static void ahci_freeze(struct ata_port *ap);
 static void ahci_thaw(struct ata_port *ap);
 static void ahci_pmp_attach(struct ata_port *ap);
 static void ahci_pmp_detach(struct ata_port *ap);
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline);
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline);
+static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+				 unsigned long deadline);
+static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline);
+static void ahci_postreset(struct ata_link *link, unsigned int *class);
 static void ahci_error_handler(struct ata_port *ap);
-static void ahci_vt8251_error_handler(struct ata_port *ap);
-static void ahci_p5wdh_error_handler(struct ata_port *ap);
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
 static int ahci_port_resume(struct ata_port *ap);
 static void ahci_dev_config(struct ata_device *dev);
@@ -276,129 +279,54 @@
 };
 
 static struct scsi_host_template ahci_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.change_queue_depth	= ata_scsi_change_queue_depth,
+	ATA_NCQ_SHT(DRV_NAME),
 	.can_queue		= AHCI_MAX_CMDS - 1,
-	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= AHCI_MAX_SG,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= AHCI_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= AHCI_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 	.shost_attrs		= ahci_shost_attrs,
 };
 
-static const struct ata_port_operations ahci_ops = {
-	.check_status		= ahci_check_status,
-	.check_altstatus	= ahci_check_status,
-	.dev_select		= ata_noop_dev_select,
-
-	.dev_config		= ahci_dev_config,
-
-	.tf_read		= ahci_tf_read,
+static struct ata_port_operations ahci_ops = {
+	.inherits		= &sata_pmp_port_ops,
 
 	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
 	.qc_prep		= ahci_qc_prep,
 	.qc_issue		= ahci_qc_issue,
-
-	.irq_clear		= ahci_irq_clear,
-
-	.scr_read		= ahci_scr_read,
-	.scr_write		= ahci_scr_write,
+	.qc_fill_rtf		= ahci_qc_fill_rtf,
 
 	.freeze			= ahci_freeze,
 	.thaw			= ahci_thaw,
-
+	.softreset		= ahci_softreset,
+	.hardreset		= ahci_hardreset,
+	.postreset		= ahci_postreset,
+	.pmp_softreset		= ahci_softreset,
 	.error_handler		= ahci_error_handler,
 	.post_internal_cmd	= ahci_post_internal_cmd,
+	.dev_config		= ahci_dev_config,
 
+	.scr_read		= ahci_scr_read,
+	.scr_write		= ahci_scr_write,
 	.pmp_attach		= ahci_pmp_attach,
 	.pmp_detach		= ahci_pmp_detach,
 
-#ifdef CONFIG_PM
-	.port_suspend		= ahci_port_suspend,
-	.port_resume		= ahci_port_resume,
-#endif
 	.enable_pm		= ahci_enable_alpm,
 	.disable_pm		= ahci_disable_alpm,
-
-	.port_start		= ahci_port_start,
-	.port_stop		= ahci_port_stop,
-};
-
-static const struct ata_port_operations ahci_vt8251_ops = {
-	.check_status		= ahci_check_status,
-	.check_altstatus	= ahci_check_status,
-	.dev_select		= ata_noop_dev_select,
-
-	.tf_read		= ahci_tf_read,
-
-	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
-	.qc_prep		= ahci_qc_prep,
-	.qc_issue		= ahci_qc_issue,
-
-	.irq_clear		= ahci_irq_clear,
-
-	.scr_read		= ahci_scr_read,
-	.scr_write		= ahci_scr_write,
-
-	.freeze			= ahci_freeze,
-	.thaw			= ahci_thaw,
-
-	.error_handler		= ahci_vt8251_error_handler,
-	.post_internal_cmd	= ahci_post_internal_cmd,
-
-	.pmp_attach		= ahci_pmp_attach,
-	.pmp_detach		= ahci_pmp_detach,
-
 #ifdef CONFIG_PM
 	.port_suspend		= ahci_port_suspend,
 	.port_resume		= ahci_port_resume,
 #endif
-
 	.port_start		= ahci_port_start,
 	.port_stop		= ahci_port_stop,
 };
 
-static const struct ata_port_operations ahci_p5wdh_ops = {
-	.check_status		= ahci_check_status,
-	.check_altstatus	= ahci_check_status,
-	.dev_select		= ata_noop_dev_select,
+static struct ata_port_operations ahci_vt8251_ops = {
+	.inherits		= &ahci_ops,
+	.hardreset		= ahci_vt8251_hardreset,
+};
 
-	.tf_read		= ahci_tf_read,
-
-	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
-	.qc_prep		= ahci_qc_prep,
-	.qc_issue		= ahci_qc_issue,
-
-	.irq_clear		= ahci_irq_clear,
-
-	.scr_read		= ahci_scr_read,
-	.scr_write		= ahci_scr_write,
-
-	.freeze			= ahci_freeze,
-	.thaw			= ahci_thaw,
-
-	.error_handler		= ahci_p5wdh_error_handler,
-	.post_internal_cmd	= ahci_post_internal_cmd,
-
-	.pmp_attach		= ahci_pmp_attach,
-	.pmp_detach		= ahci_pmp_detach,
-
-#ifdef CONFIG_PM
-	.port_suspend		= ahci_port_suspend,
-	.port_resume		= ahci_port_resume,
-#endif
-
-	.port_start		= ahci_port_start,
-	.port_stop		= ahci_port_stop,
+static struct ata_port_operations ahci_p5wdh_ops = {
+	.inherits		= &ahci_ops,
+	.hardreset		= ahci_p5wdh_hardreset,
 };
 
 #define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
@@ -407,7 +335,6 @@
 	/* board_ahci */
 	{
 		.flags		= AHCI_FLAG_COMMON,
-		.link_flags	= AHCI_LFLAG_COMMON,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_ops,
@@ -416,7 +343,6 @@
 	{
 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
 		.flags		= AHCI_FLAG_COMMON,
-		.link_flags	= AHCI_LFLAG_COMMON | ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_vt8251_ops,
@@ -425,7 +351,6 @@
 	{
 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_IRQ_IF_ERR),
 		.flags		= AHCI_FLAG_COMMON,
-		.link_flags	= AHCI_LFLAG_COMMON,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_ops,
@@ -436,7 +361,6 @@
 				 AHCI_HFLAG_32BIT_ONLY |
 				 AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP),
 		.flags		= AHCI_FLAG_COMMON,
-		.link_flags	= AHCI_LFLAG_COMMON,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_ops,
@@ -447,7 +371,6 @@
 				 AHCI_HFLAG_MV_PATA),
 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
-		.link_flags	= AHCI_LFLAG_COMMON,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_ops,
@@ -457,7 +380,6 @@
 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL |
 				 AHCI_HFLAG_NO_PMP),
 		.flags		= AHCI_FLAG_COMMON,
-		.link_flags	= AHCI_LFLAG_COMMON,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_ops,
@@ -1255,13 +1177,14 @@
 
 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
 {
-	void __iomem *port_mmio = ap->ioaddr.cmd_addr;
+	void __iomem *port_mmio = ahci_port_base(ap);
 	struct ahci_host_priv *hpriv = ap->host->private_data;
+	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
 	u32 tmp;
 	int busy, rc;
 
 	/* do we need to kick the port? */
-	busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ);
+	busy = status & (ATA_BUSY | ATA_DRQ);
 	if (!busy && !force_restart)
 		return 0;
 
@@ -1328,10 +1251,21 @@
 	return 0;
 }
 
-static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
-			     int pmp, unsigned long deadline)
+static int ahci_check_ready(struct ata_link *link)
+{
+	void __iomem *port_mmio = ahci_port_base(link->ap);
+	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+
+	if (!(status & ATA_BUSY))
+		return 1;
+	return 0;
+}
+
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline)
 {
 	struct ata_port *ap = link->ap;
+	int pmp = sata_srst_pmp(link);
 	const char *reason = NULL;
 	unsigned long now, msecs;
 	struct ata_taskfile tf;
@@ -1339,12 +1273,6 @@
 
 	DPRINTK("ENTER\n");
 
-	if (ata_link_offline(link)) {
-		DPRINTK("PHY reports no device\n");
-		*class = ATA_DEV_NONE;
-		return 0;
-	}
-
 	/* prepare for SRST (AHCI-1.1 10.4.1) */
 	rc = ahci_kick_engine(ap, 1);
 	if (rc && rc != -EOPNOTSUPP)
@@ -1374,10 +1302,8 @@
 	tf.ctl &= ~ATA_SRST;
 	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
 
-	/* wait a while before checking status */
-	ata_wait_after_reset(ap, deadline);
-
-	rc = ata_wait_ready(ap, deadline);
+	/* wait for link to become ready */
+	rc = ata_wait_after_reset(link, deadline, ahci_check_ready);
 	/* link occupied, -ENODEV too is an error */
 	if (rc) {
 		reason = "device not ready";
@@ -1393,24 +1319,15 @@
 	return rc;
 }
 
-static int ahci_softreset(struct ata_link *link, unsigned int *class,
-			  unsigned long deadline)
-{
-	int pmp = 0;
-
-	if (link->ap->flags & ATA_FLAG_PMP)
-		pmp = SATA_PMP_CTRL_PORT;
-
-	return ahci_do_softreset(link, class, pmp, deadline);
-}
-
 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
 			  unsigned long deadline)
 {
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
 	struct ata_port *ap = link->ap;
 	struct ahci_port_priv *pp = ap->private_data;
 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
 	struct ata_taskfile tf;
+	bool online;
 	int rc;
 
 	DPRINTK("ENTER\n");
@@ -1422,14 +1339,13 @@
 	tf.command = 0x80;
 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
 
-	rc = sata_std_hardreset(link, class, deadline);
+	rc = sata_link_hardreset(link, timing, deadline, &online,
+				 ahci_check_ready);
 
 	ahci_start_engine(ap);
 
-	if (rc == 0 && ata_link_online(link))
+	if (online)
 		*class = ahci_dev_classify(ap);
-	if (rc != -EAGAIN && *class == ATA_DEV_UNKNOWN)
-		*class = ATA_DEV_NONE;
 
 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
 	return rc;
@@ -1439,7 +1355,7 @@
 				 unsigned long deadline)
 {
 	struct ata_port *ap = link->ap;
-	u32 serror;
+	bool online;
 	int rc;
 
 	DPRINTK("ENTER\n");
@@ -1447,11 +1363,7 @@
 	ahci_stop_engine(ap);
 
 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
-				 deadline);
-
-	/* vt8251 needs SError cleared for the port to operate */
-	ahci_scr_read(ap, SCR_ERROR, &serror);
-	ahci_scr_write(ap, SCR_ERROR, serror);
+				 deadline, &online, NULL);
 
 	ahci_start_engine(ap);
 
@@ -1460,7 +1372,7 @@
 	/* vt8251 doesn't clear BSY on signature FIS reception,
 	 * request follow-up softreset.
 	 */
-	return rc ?: -EAGAIN;
+	return online ? -EAGAIN : rc;
 }
 
 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
@@ -1470,6 +1382,7 @@
 	struct ahci_port_priv *pp = ap->private_data;
 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
 	struct ata_taskfile tf;
+	bool online;
 	int rc;
 
 	ahci_stop_engine(ap);
@@ -1480,16 +1393,10 @@
 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
 
 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
-				 deadline);
+				 deadline, &online, NULL);
 
 	ahci_start_engine(ap);
 
-	if (rc || ata_link_offline(link))
-		return rc;
-
-	/* spec mandates ">= 2ms" before checking status */
-	msleep(150);
-
 	/* The pseudo configuration device on SIMG4726 attached to
 	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
 	 * hardreset if no device is attached to the first downstream
@@ -1503,11 +1410,13 @@
 	 * have to be reset again.  For most cases, this should
 	 * suffice while making probing snappish enough.
 	 */
-	rc = ata_wait_ready(ap, jiffies + 2 * HZ);
-	if (rc)
-		ahci_kick_engine(ap, 0);
-
-	return 0;
+	if (online) {
+		rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
+					  ahci_check_ready);
+		if (rc)
+			ahci_kick_engine(ap, 0);
+	}
+	return rc;
 }
 
 static void ahci_postreset(struct ata_link *link, unsigned int *class)
@@ -1530,27 +1439,6 @@
 	}
 }
 
-static int ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
-			      unsigned long deadline)
-{
-	return ahci_do_softreset(link, class, link->pmp, deadline);
-}
-
-static u8 ahci_check_status(struct ata_port *ap)
-{
-	void __iomem *mmio = ap->ioaddr.cmd_addr;
-
-	return readl(mmio + PORT_TFDATA) & 0xFF;
-}
-
-static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
-{
-	struct ahci_port_priv *pp = ap->private_data;
-	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
-
-	ata_tf_from_fis(d2h_fis, tf);
-}
-
 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
 {
 	struct scatterlist *sg;
@@ -1663,27 +1551,27 @@
 		u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
 
 		active_ehi->err_mask |= AC_ERR_HSM;
-		active_ehi->action |= ATA_EH_SOFTRESET;
+		active_ehi->action |= ATA_EH_RESET;
 		ata_ehi_push_desc(active_ehi,
 				  "unknown FIS %08x %08x %08x %08x" ,
 				  unk[0], unk[1], unk[2], unk[3]);
 	}
 
-	if (ap->nr_pmp_links && (irq_stat & PORT_IRQ_BAD_PMP)) {
+	if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
 		active_ehi->err_mask |= AC_ERR_HSM;
-		active_ehi->action |= ATA_EH_SOFTRESET;
+		active_ehi->action |= ATA_EH_RESET;
 		ata_ehi_push_desc(active_ehi, "incorrect PMP");
 	}
 
 	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
 		host_ehi->err_mask |= AC_ERR_HOST_BUS;
-		host_ehi->action |= ATA_EH_SOFTRESET;
+		host_ehi->action |= ATA_EH_RESET;
 		ata_ehi_push_desc(host_ehi, "host bus error");
 	}
 
 	if (irq_stat & PORT_IRQ_IF_ERR) {
 		host_ehi->err_mask |= AC_ERR_ATA_BUS;
-		host_ehi->action |= ATA_EH_SOFTRESET;
+		host_ehi->action |= ATA_EH_RESET;
 		ata_ehi_push_desc(host_ehi, "interface fatal error");
 	}
 
@@ -1704,7 +1592,7 @@
 
 static void ahci_port_intr(struct ata_port *ap)
 {
-	void __iomem *port_mmio = ap->ioaddr.cmd_addr;
+	void __iomem *port_mmio = ahci_port_base(ap);
 	struct ata_eh_info *ehi = &ap->link.eh_info;
 	struct ahci_port_priv *pp = ap->private_data;
 	struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -1766,21 +1654,16 @@
 	else
 		qc_active = readl(port_mmio + PORT_CMD_ISSUE);
 
-	rc = ata_qc_complete_multiple(ap, qc_active, NULL);
+	rc = ata_qc_complete_multiple(ap, qc_active);
 
 	/* while resetting, invalid completions are expected */
 	if (unlikely(rc < 0 && !resetting)) {
 		ehi->err_mask |= AC_ERR_HSM;
-		ehi->action |= ATA_EH_SOFTRESET;
+		ehi->action |= ATA_EH_RESET;
 		ata_port_freeze(ap);
 	}
 }
 
-static void ahci_irq_clear(struct ata_port *ap)
-{
-	/* TODO */
-}
-
 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
 {
 	struct ata_host *host = dev_instance;
@@ -1854,6 +1737,15 @@
 	return 0;
 }
 
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	struct ahci_port_priv *pp = qc->ap->private_data;
+	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+
+	ata_tf_from_fis(d2h_fis, &qc->result_tf);
+	return true;
+}
+
 static void ahci_freeze(struct ata_port *ap)
 {
 	void __iomem *port_mmio = ahci_port_base(ap);
@@ -1886,37 +1778,7 @@
 		ahci_start_engine(ap);
 	}
 
-	/* perform recovery */
-	sata_pmp_do_eh(ap, ata_std_prereset, ahci_softreset,
-		       ahci_hardreset, ahci_postreset,
-		       sata_pmp_std_prereset, ahci_pmp_softreset,
-		       sata_pmp_std_hardreset, sata_pmp_std_postreset);
-}
-
-static void ahci_vt8251_error_handler(struct ata_port *ap)
-{
-	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
-		/* restart engine */
-		ahci_stop_engine(ap);
-		ahci_start_engine(ap);
-	}
-
-	/* perform recovery */
-	ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset,
-		  ahci_postreset);
-}
-
-static void ahci_p5wdh_error_handler(struct ata_port *ap)
-{
-	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
-		/* restart engine */
-		ahci_stop_engine(ap);
-		ahci_start_engine(ap);
-	}
-
-	/* perform recovery */
-	ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_p5wdh_hardreset,
-		  ahci_postreset);
+	sata_pmp_error_handler(ap);
 }
 
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -1961,7 +1823,7 @@
 	ahci_power_up(ap);
 	ahci_start_port(ap);
 
-	if (ap->nr_pmp_links)
+	if (sata_pmp_attached(ap))
 		ahci_pmp_attach(ap);
 	else
 		ahci_pmp_detach(ap);
@@ -2324,7 +2186,6 @@
 
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
-		void __iomem *port_mmio = ahci_port_base(ap);
 
 		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
 		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
@@ -2333,12 +2194,8 @@
 		/* set initial link pm policy */
 		ap->pm_policy = NOT_AVAILABLE;
 
-		/* standard SATA port setup */
-		if (hpriv->port_map & (1 << i))
-			ap->ioaddr.cmd_addr = port_mmio;
-
 		/* disabled/not-implemented port */
-		else
+		if (!(hpriv->port_map & (1 << i)))
 			ap->ops = &ata_dummy_port_ops;
 	}
 
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 2053420..47aeccd 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -95,53 +95,13 @@
 }
 
 static struct scsi_host_template generic_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations generic_port_ops = {
-	.set_mode	= generic_set_mode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.data_xfer	= ata_data_xfer,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_bmdma_port_ops,
 	.cable_detect	= ata_cable_unknown,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_mode	= generic_set_mode,
 };
 
 static int all_generic_ide;		/* Set to claim all devices */
@@ -160,7 +120,6 @@
 {
 	u16 command;
 	static const struct ata_port_info info = {
-		.sht = &generic_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -191,9 +150,9 @@
 		return -ENODEV;
 
 	if (dev->vendor == PCI_VENDOR_ID_AL)
-	    	ata_pci_clear_simplex(dev);
+		ata_pci_bmdma_clear_simplex(dev);
 
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL);
 }
 
 static struct pci_device_id ata_generic[] = {
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index fae8404..b7c38ee 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -100,13 +100,11 @@
 	PIIX_IOCFG		= 0x54, /* IDE I/O configuration register */
 	ICH5_PMR		= 0x90, /* port mapping register */
 	ICH5_PCS		= 0x92,	/* port control and status */
-	PIIX_SCC		= 0x0A, /* sub-class code register */
 	PIIX_SIDPR_BAR		= 5,
 	PIIX_SIDPR_LEN		= 16,
 	PIIX_SIDPR_IDX		= 0,
 	PIIX_SIDPR_DATA		= 4,
 
-	PIIX_FLAG_AHCI		= (1 << 27), /* AHCI possible */
 	PIIX_FLAG_CHECKINTR	= (1 << 28), /* make sure PCI INTx enabled */
 	PIIX_FLAG_SIDPR		= (1 << 29), /* SATA idx/data pair regs */
 
@@ -140,12 +138,11 @@
 	ich_pata_100,		/* ICH up to UDMA 100 */
 	ich5_sata,
 	ich6_sata,
-	ich6_sata_ahci,
-	ich6m_sata_ahci,
-	ich8_sata_ahci,
+	ich6m_sata,
+	ich8_sata,
 	ich8_2port_sata,
-	ich8m_apple_sata_ahci,	/* locks up on second port enable */
-	tolapai_sata_ahci,
+	ich8m_apple_sata,	/* locks up on second port enable */
+	tolapai_sata,
 	piix_pata_vmw,			/* PIIX4 for VMware, spurious DMA_ERR */
 };
 
@@ -162,7 +159,7 @@
 
 static int piix_init_one(struct pci_dev *pdev,
 			 const struct pci_device_id *ent);
-static void piix_pata_error_handler(struct ata_port *ap);
+static int piix_pata_prereset(struct ata_link *link, unsigned long deadline);
 static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
 static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
 static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
@@ -170,7 +167,6 @@
 static u8 piix_vmw_bmdma_status(struct ata_port *ap);
 static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val);
 static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val);
-static void piix_sidpr_error_handler(struct ata_port *ap);
 #ifdef CONFIG_PM
 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
 static int piix_pci_device_resume(struct pci_dev *pdev);
@@ -236,25 +232,27 @@
 	/* 82801FB/FW (ICH6/ICH6W) */
 	{ 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
 	/* 82801FR/FRW (ICH6R/ICH6RW) */
-	{ 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
-	/* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
-	{ 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
+	{ 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
+	/* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented).
+	 * Attach iff the controller is in IDE mode. */
+	{ 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
 	/* 82801GB/GR/GH (ICH7, identical to ICH6) */
-	{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+	{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
 	/* 2801GBM/GHM (ICH7M, identical to ICH6M) */
-	{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
+	{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
 	/* Enterprise Southbridge 2 (631xESB/632xESB) */
-	{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+	{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
 	/* SATA Controller 1 IDE (ICH8) */
-	{ 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
 	/* SATA Controller 2 IDE (ICH8) */
 	{ 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* Mobile SATA Controller IDE (ICH8M) */
-	{ 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
 	/* Mobile SATA Controller IDE (ICH8M), Apple */
-	{ 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata_ahci },
+	{ 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata },
 	/* SATA Controller IDE (ICH9) */
-	{ 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
 	/* SATA Controller IDE (ICH9) */
 	{ 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* SATA Controller IDE (ICH9) */
@@ -264,15 +262,15 @@
 	/* SATA Controller IDE (ICH9M) */
 	{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* SATA Controller IDE (ICH9M) */
-	{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
 	/* SATA Controller IDE (Tolapai) */
-	{ 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci },
+	{ 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata },
 	/* SATA Controller IDE (ICH10) */
-	{ 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
 	/* SATA Controller IDE (ICH10) */
 	{ 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* SATA Controller IDE (ICH10) */
-	{ 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
 	/* SATA Controller IDE (ICH10) */
 	{ 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 
@@ -291,170 +289,37 @@
 };
 
 static struct scsi_host_template piix_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations piix_pata_ops = {
-	.set_piomode		= piix_set_piomode,
-	.set_dmamode		= piix_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= piix_pata_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+static struct ata_port_operations piix_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
 	.cable_detect		= ata_cable_40wire,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_port_start,
-};
-
-static const struct ata_port_operations ich_pata_ops = {
-	.set_piomode		= piix_set_piomode,
-	.set_dmamode		= ich_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= piix_pata_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= ich_pata_cable_detect,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_port_start,
-};
-
-static const struct ata_port_operations piix_sata_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_port_start,
-};
-
-static const struct ata_port_operations piix_vmw_ops = {
 	.set_piomode		= piix_set_piomode,
 	.set_dmamode		= piix_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
+	.prereset		= piix_pata_prereset,
+};
 
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
+static struct ata_port_operations piix_vmw_ops = {
+	.inherits		= &piix_pata_ops,
 	.bmdma_status		= piix_vmw_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= piix_pata_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= ata_cable_40wire,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_port_start,
 };
 
-static const struct ata_port_operations piix_sidpr_sata_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
+static struct ata_port_operations ich_pata_ops = {
+	.inherits		= &piix_pata_ops,
+	.cable_detect		= ich_pata_cable_detect,
+	.set_dmamode		= ich_set_dmamode,
+};
 
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
+static struct ata_port_operations piix_sata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+};
 
+static struct ata_port_operations piix_sidpr_sata_ops = {
+	.inherits		= &piix_sata_ops,
+	.hardreset		= sata_std_hardreset,
 	.scr_read		= piix_sidpr_scr_read,
 	.scr_write		= piix_sidpr_scr_write,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= piix_sidpr_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_port_start,
 };
 
 static const struct piix_map_db ich5_map_db = {
@@ -553,12 +418,11 @@
 static const struct piix_map_db *piix_map_db_table[] = {
 	[ich5_sata]		= &ich5_map_db,
 	[ich6_sata]		= &ich6_map_db,
-	[ich6_sata_ahci]	= &ich6_map_db,
-	[ich6m_sata_ahci]	= &ich6m_map_db,
-	[ich8_sata_ahci]	= &ich8_map_db,
+	[ich6m_sata]		= &ich6m_map_db,
+	[ich8_sata]		= &ich8_map_db,
 	[ich8_2port_sata]	= &ich8_2port_map_db,
-	[ich8m_apple_sata_ahci]	= &ich8m_apple_map_db,
-	[tolapai_sata_ahci]	= &tolapai_map_db,
+	[ich8m_apple_sata]	= &ich8m_apple_map_db,
+	[tolapai_sata]		= &tolapai_map_db,
 };
 
 static struct ata_port_info piix_port_info[] = {
@@ -624,28 +488,18 @@
 		.port_ops	= &piix_sata_ops,
 	},
 
-	[ich6_sata_ahci] =
+	[ich6m_sata] =
 	{
-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
+		.flags		= PIIX_SATA_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &piix_sata_ops,
 	},
 
-	[ich6m_sata_ahci] =
+	[ich8_sata] =
 	{
-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
-		.pio_mask	= 0x1f,	/* pio0-4 */
-		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= ATA_UDMA6,
-		.port_ops	= &piix_sata_ops,
-	},
-
-	[ich8_sata_ahci] =
-	{
-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
-				  PIIX_FLAG_SIDPR,
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= ATA_UDMA6,
@@ -654,27 +508,25 @@
 
 	[ich8_2port_sata] =
 	{
-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
-				  PIIX_FLAG_SIDPR,
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &piix_sata_ops,
 	},
 
-	[tolapai_sata_ahci] =
+	[tolapai_sata] =
 	{
-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
+		.flags		= PIIX_SATA_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &piix_sata_ops,
 	},
 
-	[ich8m_apple_sata_ahci] =
+	[ich8m_apple_sata] =
 	{
-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
-				  PIIX_FLAG_SIDPR,
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= ATA_UDMA6,
@@ -683,7 +535,6 @@
 
 	[piix_pata_vmw] =
 	{
-		.sht		= &piix_sht,
 		.flags		= PIIX_PATA_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
@@ -776,13 +627,7 @@
 
 	if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
 		return -ENOENT;
-	return ata_std_prereset(link, deadline);
-}
-
-static void piix_pata_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL,
-			   ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -1168,35 +1013,6 @@
 	return 0;
 }
 
-static int piix_sidpr_hardreset(struct ata_link *link, unsigned int *class,
-				unsigned long deadline)
-{
-	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
-	int rc;
-
-	/* do hardreset */
-	rc = sata_link_hardreset(link, timing, deadline);
-	if (rc) {
-		ata_link_printk(link, KERN_ERR,
-				"COMRESET failed (errno=%d)\n", rc);
-		return rc;
-	}
-
-	/* TODO: phy layer with polling, timeouts, etc. */
-	if (ata_link_offline(link)) {
-		*class = ATA_DEV_NONE;
-		return 0;
-	}
-
-	return -EAGAIN;
-}
-
-static void piix_sidpr_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
-			   piix_sidpr_hardreset, ata_std_postreset);
-}
-
 #ifdef CONFIG_PM
 static int piix_broken_suspend(void)
 {
@@ -1633,6 +1449,16 @@
 	if (rc)
 		return rc;
 
+	/* ICH6R may be driven by either ata_piix or ahci driver
+	 * regardless of BIOS configuration.  Make sure AHCI mode is
+	 * off.
+	 */
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) {
+		int rc = piix_disable_ahci(pdev);
+		if (rc)
+			return rc;
+	}
+
 	/* SATA map init can change port_info, do it before prepping host */
 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
 	if (!hpriv)
@@ -1642,22 +1468,12 @@
 		hpriv->map = piix_init_sata_map(pdev, port_info,
 					piix_map_db_table[ent->driver_data]);
 
-	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
+	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
 	if (rc)
 		return rc;
 	host->private_data = hpriv;
 
 	/* initialize controller */
-	if (port_flags & PIIX_FLAG_AHCI) {
-		u8 tmp;
-		pci_read_config_byte(pdev, PIIX_SCC, &tmp);
-		if (tmp == PIIX_AHCI_DEVICE) {
-			rc = piix_disable_ahci(pdev);
-			if (rc)
-				return rc;
-		}
-	}
-
 	if (port_flags & ATA_FLAG_SATA) {
 		piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
 		piix_init_sidpr(host);
@@ -1686,7 +1502,7 @@
 	}
 
 	pci_set_master(pdev);
-	return ata_pci_activate_sff_host(host, ata_interrupt, &piix_sht);
+	return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
 }
 
 static int __init piix_init(void)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index bf98a56..8c1cfc6 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -77,7 +77,7 @@
 {
 	WARN_ON(!(ap->flags & ATA_FLAG_ACPI_SATA));
 
-	if (!ap->nr_pmp_links) {
+	if (!sata_pmp_attached(ap)) {
 		acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
 
 		ap->link.device->acpi_handle =
@@ -839,7 +839,8 @@
 		 */
 		ata_link_for_each_dev(dev, &ap->link) {
 			ata_acpi_clear_gtf(dev);
-			if (ata_dev_get_GTF(dev, NULL) >= 0)
+			if (ata_dev_enabled(dev) &&
+			    ata_dev_get_GTF(dev, NULL) >= 0)
 				dev->flags |= ATA_DFLAG_ACPI_PENDING;
 		}
 	} else {
@@ -849,7 +850,8 @@
 		 */
 		ata_link_for_each_dev(dev, &ap->link) {
 			ata_acpi_clear_gtf(dev);
-			dev->flags |= ATA_DFLAG_ACPI_PENDING;
+			if (ata_dev_enabled(dev))
+				dev->flags |= ATA_DFLAG_ACPI_PENDING;
 		}
 	}
 }
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index be95fdb..733eb94 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -46,7 +46,6 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/mm.h>
-#include <linux/highmem.h>
 #include <linux/spinlock.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
@@ -74,6 +73,19 @@
 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
 
+const struct ata_port_operations ata_base_port_ops = {
+	.prereset		= ata_std_prereset,
+	.postreset		= ata_std_postreset,
+	.error_handler		= ata_std_error_handler,
+};
+
+const struct ata_port_operations sata_port_ops = {
+	.inherits		= &ata_base_port_ops,
+
+	.qc_defer		= ata_std_qc_defer,
+	.hardreset		= sata_std_hardreset,
+};
+
 static unsigned int ata_dev_init_params(struct ata_device *dev,
 					u16 heads, u16 sectors);
 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
@@ -1043,50 +1055,6 @@
 }
 #endif	/* CONFIG_PM */
 
-
-/**
- *	ata_devchk - PATA device presence detection
- *	@ap: ATA channel to examine
- *	@device: Device to examine (starting at zero)
- *
- *	This technique was originally described in
- *	Hale Landis's ATADRVR (www.ata-atapi.com), and
- *	later found its way into the ATA/ATAPI spec.
- *
- *	Write a pattern to the ATA shadow registers,
- *	and if a device is present, it will respond by
- *	correctly storing and echoing back the
- *	ATA shadow register contents.
- *
- *	LOCKING:
- *	caller.
- */
-
-static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
-{
-	struct ata_ioports *ioaddr = &ap->ioaddr;
-	u8 nsect, lbal;
-
-	ap->ops->dev_select(ap, device);
-
-	iowrite8(0x55, ioaddr->nsect_addr);
-	iowrite8(0xaa, ioaddr->lbal_addr);
-
-	iowrite8(0xaa, ioaddr->nsect_addr);
-	iowrite8(0x55, ioaddr->lbal_addr);
-
-	iowrite8(0x55, ioaddr->nsect_addr);
-	iowrite8(0xaa, ioaddr->lbal_addr);
-
-	nsect = ioread8(ioaddr->nsect_addr);
-	lbal = ioread8(ioaddr->lbal_addr);
-
-	if ((nsect == 0x55) && (lbal == 0xaa))
-		return 1;	/* we found a device */
-
-	return 0;		/* nothing found */
-}
-
 /**
  *	ata_dev_classify - determine device type based on ATA-spec signature
  *	@tf: ATA taskfile register set for device to be identified
@@ -1147,75 +1115,6 @@
 }
 
 /**
- *	ata_dev_try_classify - Parse returned ATA device signature
- *	@dev: ATA device to classify (starting at zero)
- *	@present: device seems present
- *	@r_err: Value of error register on completion
- *
- *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
- *	an ATA/ATAPI-defined set of values is placed in the ATA
- *	shadow registers, indicating the results of device detection
- *	and diagnostics.
- *
- *	Select the ATA device, and read the values from the ATA shadow
- *	registers.  Then parse according to the Error register value,
- *	and the spec-defined values examined by ata_dev_classify().
- *
- *	LOCKING:
- *	caller.
- *
- *	RETURNS:
- *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
- */
-unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
-				  u8 *r_err)
-{
-	struct ata_port *ap = dev->link->ap;
-	struct ata_taskfile tf;
-	unsigned int class;
-	u8 err;
-
-	ap->ops->dev_select(ap, dev->devno);
-
-	memset(&tf, 0, sizeof(tf));
-
-	ap->ops->tf_read(ap, &tf);
-	err = tf.feature;
-	if (r_err)
-		*r_err = err;
-
-	/* see if device passed diags: continue and warn later */
-	if (err == 0)
-		/* diagnostic fail : do nothing _YET_ */
-		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
-	else if (err == 1)
-		/* do nothing */ ;
-	else if ((dev->devno == 0) && (err == 0x81))
-		/* do nothing */ ;
-	else
-		return ATA_DEV_NONE;
-
-	/* determine if device is ATA or ATAPI */
-	class = ata_dev_classify(&tf);
-
-	if (class == ATA_DEV_UNKNOWN) {
-		/* If the device failed diagnostic, it's likely to
-		 * have reported incorrect device signature too.
-		 * Assume ATA device if the device seems present but
-		 * device signature is invalid with diagnostic
-		 * failure.
-		 */
-		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
-			class = ATA_DEV_ATA;
-		else
-			class = ATA_DEV_NONE;
-	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
-		class = ATA_DEV_NONE;
-
-	return class;
-}
-
-/**
  *	ata_id_string - Convert IDENTIFY DEVICE page into string
  *	@id: IDENTIFY DEVICE results we will examine
  *	@s: string into which data is output
@@ -1293,7 +1192,7 @@
 	}
 }
 
-static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
+u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
 {
 	u64 sectors = 0;
 
@@ -1304,10 +1203,10 @@
 	sectors |= (tf->lbam & 0xff) << 8;
 	sectors |= (tf->lbal & 0xff);
 
-	return ++sectors;
+	return sectors;
 }
 
-static u64 ata_tf_to_lba(struct ata_taskfile *tf)
+u64 ata_tf_to_lba(const struct ata_taskfile *tf)
 {
 	u64 sectors = 0;
 
@@ -1316,7 +1215,7 @@
 	sectors |= (tf->lbam & 0xff) << 8;
 	sectors |= (tf->lbal & 0xff);
 
-	return ++sectors;
+	return sectors;
 }
 
 /**
@@ -1361,9 +1260,9 @@
 	}
 
 	if (lba48)
-		*max_sectors = ata_tf_to_lba48(&tf);
+		*max_sectors = ata_tf_to_lba48(&tf) + 1;
 	else
-		*max_sectors = ata_tf_to_lba(&tf);
+		*max_sectors = ata_tf_to_lba(&tf) + 1;
 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
 		(*max_sectors)--;
 	return 0;
@@ -1523,89 +1422,6 @@
 }
 
 /**
- *	ata_noop_dev_select - Select device 0/1 on ATA bus
- *	@ap: ATA channel to manipulate
- *	@device: ATA device (numbered from zero) to select
- *
- *	This function performs no actual function.
- *
- *	May be used as the dev_select() entry in ata_port_operations.
- *
- *	LOCKING:
- *	caller.
- */
-void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
-{
-}
-
-
-/**
- *	ata_std_dev_select - Select device 0/1 on ATA bus
- *	@ap: ATA channel to manipulate
- *	@device: ATA device (numbered from zero) to select
- *
- *	Use the method defined in the ATA specification to
- *	make either device 0, or device 1, active on the
- *	ATA channel.  Works with both PIO and MMIO.
- *
- *	May be used as the dev_select() entry in ata_port_operations.
- *
- *	LOCKING:
- *	caller.
- */
-
-void ata_std_dev_select(struct ata_port *ap, unsigned int device)
-{
-	u8 tmp;
-
-	if (device == 0)
-		tmp = ATA_DEVICE_OBS;
-	else
-		tmp = ATA_DEVICE_OBS | ATA_DEV1;
-
-	iowrite8(tmp, ap->ioaddr.device_addr);
-	ata_pause(ap);		/* needed; also flushes, for mmio */
-}
-
-/**
- *	ata_dev_select - Select device 0/1 on ATA bus
- *	@ap: ATA channel to manipulate
- *	@device: ATA device (numbered from zero) to select
- *	@wait: non-zero to wait for Status register BSY bit to clear
- *	@can_sleep: non-zero if context allows sleeping
- *
- *	Use the method defined in the ATA specification to
- *	make either device 0, or device 1, active on the
- *	ATA channel.
- *
- *	This is a high-level version of ata_std_dev_select(),
- *	which additionally provides the services of inserting
- *	the proper pauses and status polling, where needed.
- *
- *	LOCKING:
- *	caller.
- */
-
-void ata_dev_select(struct ata_port *ap, unsigned int device,
-			   unsigned int wait, unsigned int can_sleep)
-{
-	if (ata_msg_probe(ap))
-		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
-				"device %u, wait %u\n", device, wait);
-
-	if (wait)
-		ata_wait_idle(ap);
-
-	ap->ops->dev_select(ap, device);
-
-	if (wait) {
-		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
-			msleep(150);
-		ata_wait_idle(ap);
-	}
-}
-
-/**
  *	ata_dump_id - IDENTIFY DEVICE info debugging output
  *	@id: IDENTIFY DEVICE page to dump
  *
@@ -1732,8 +1548,7 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-static void ata_pio_queue_task(struct ata_port *ap, void *data,
-			       unsigned long delay)
+void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
 {
 	ap->port_task_data = data;
 
@@ -2097,7 +1912,6 @@
 	if (ata_msg_ctl(ap))
 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
 
-	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
  retry:
 	ata_tf_init(dev, &tf);
 
@@ -2464,7 +2278,7 @@
 		 * changed notifications and ATAPI ANs.
 		 */
 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
-		    (!ap->nr_pmp_links ||
+		    (!sata_pmp_attached(ap) ||
 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
 			unsigned int err_mask;
 
@@ -2558,9 +2372,6 @@
 		}
 	}
 
-	if (ata_msg_probe(ap))
-		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
-			__func__, ata_chk_status(ap));
 	return 0;
 
 err_out_nosup:
@@ -3321,16 +3132,21 @@
 	if (rc)
 		return rc;
 
-	/* Old CFA may refuse this command, which is just fine */
-	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
-		ign_dev_err = 1;
-
-	/* Some very old devices and some bad newer ones fail any kind of
-	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
-	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
-			dev->pio_mode <= XFER_PIO_2)
-		ign_dev_err = 1;
-
+	if (dev->xfer_shift == ATA_SHIFT_PIO) {
+		/* Old CFA may refuse this command, which is just fine */
+		if (ata_id_is_cfa(dev->id))
+			ign_dev_err = 1;
+		/* Catch several broken garbage emulations plus some pre
+		   ATA devices */
+		if (ata_id_major_version(dev->id) == 0 &&
+					dev->pio_mode <= XFER_PIO_2)
+			ign_dev_err = 1;
+		/* Some very old devices and some bad newer ones fail
+		   any kind of SET_XFERMODE request but support PIO0-2
+		   timings and no IORDY */
+		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
+			ign_dev_err = 1;
+	}
 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
@@ -3474,170 +3290,73 @@
 }
 
 /**
- *	ata_tf_to_host - issue ATA taskfile to host controller
- *	@ap: port to which command is being issued
- *	@tf: ATA taskfile register set
+ *	ata_wait_ready - wait for link to become ready
+ *	@link: link to be waited on
+ *	@deadline: deadline jiffies for the operation
+ *	@check_ready: callback to check link readiness
  *
- *	Issues ATA taskfile register set to ATA host controller,
- *	with proper synchronization with interrupt handler and
- *	other threads.
+ *	Wait for @link to become ready.  @check_ready should return
+ *	positive number if @link is ready, 0 if it isn't, -ENODEV if
+ *	link doesn't seem to be occupied, other errno for other error
+ *	conditions.
+ *
+ *	Transient -ENODEV conditions are allowed for
+ *	ATA_TMOUT_FF_WAIT.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host lock)
- */
-
-static inline void ata_tf_to_host(struct ata_port *ap,
-				  const struct ata_taskfile *tf)
-{
-	ap->ops->tf_load(ap, tf);
-	ap->ops->exec_command(ap, tf);
-}
-
-/**
- *	ata_busy_sleep - sleep until BSY clears, or timeout
- *	@ap: port containing status register to be polled
- *	@tmout_pat: impatience timeout
- *	@tmout: overall timeout
- *
- *	Sleep until ATA Status register bit BSY clears,
- *	or a timeout occurs.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep).
+ *	EH context.
  *
  *	RETURNS:
- *	0 on success, -errno otherwise.
+ *	0 if @linke is ready before @deadline; otherwise, -errno.
  */
-int ata_busy_sleep(struct ata_port *ap,
-		   unsigned long tmout_pat, unsigned long tmout)
-{
-	unsigned long timer_start, timeout;
-	u8 status;
-
-	status = ata_busy_wait(ap, ATA_BUSY, 300);
-	timer_start = jiffies;
-	timeout = timer_start + tmout_pat;
-	while (status != 0xff && (status & ATA_BUSY) &&
-	       time_before(jiffies, timeout)) {
-		msleep(50);
-		status = ata_busy_wait(ap, ATA_BUSY, 3);
-	}
-
-	if (status != 0xff && (status & ATA_BUSY))
-		ata_port_printk(ap, KERN_WARNING,
-				"port is slow to respond, please be patient "
-				"(Status 0x%x)\n", status);
-
-	timeout = timer_start + tmout;
-	while (status != 0xff && (status & ATA_BUSY) &&
-	       time_before(jiffies, timeout)) {
-		msleep(50);
-		status = ata_chk_status(ap);
-	}
-
-	if (status == 0xff)
-		return -ENODEV;
-
-	if (status & ATA_BUSY) {
-		ata_port_printk(ap, KERN_ERR, "port failed to respond "
-				"(%lu secs, Status 0x%x)\n",
-				tmout / HZ, status);
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-/**
- *	ata_wait_after_reset - wait before checking status after reset
- *	@ap: port containing status register to be polled
- *	@deadline: deadline jiffies for the operation
- *
- *	After reset, we need to pause a while before reading status.
- *	Also, certain combination of controller and device report 0xff
- *	for some duration (e.g. until SATA PHY is up and running)
- *	which is interpreted as empty port in ATA world.  This
- *	function also waits for such devices to get out of 0xff
- *	status.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep).
- */
-void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
-{
-	unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
-
-	if (time_before(until, deadline))
-		deadline = until;
-
-	/* Spec mandates ">= 2ms" before checking status.  We wait
-	 * 150ms, because that was the magic delay used for ATAPI
-	 * devices in Hale Landis's ATADRVR, for the period of time
-	 * between when the ATA command register is written, and then
-	 * status is checked.  Because waiting for "a while" before
-	 * checking status is fine, post SRST, we perform this magic
-	 * delay here as well.
-	 *
-	 * Old drivers/ide uses the 2mS rule and then waits for ready.
-	 */
-	msleep(150);
-
-	/* Wait for 0xff to clear.  Some SATA devices take a long time
-	 * to clear 0xff after reset.  For example, HHD424020F7SV00
-	 * iVDR needs >= 800ms while.  Quantum GoVault needs even more
-	 * than that.
-	 *
-	 * Note that some PATA controllers (pata_ali) explode if
-	 * status register is read more than once when there's no
-	 * device attached.
-	 */
-	if (ap->flags & ATA_FLAG_SATA) {
-		while (1) {
-			u8 status = ata_chk_status(ap);
-
-			if (status != 0xff || time_after(jiffies, deadline))
-				return;
-
-			msleep(50);
-		}
-	}
-}
-
-/**
- *	ata_wait_ready - sleep until BSY clears, or timeout
- *	@ap: port containing status register to be polled
- *	@deadline: deadline jiffies for the operation
- *
- *	Sleep until ATA Status register bit BSY clears, or timeout
- *	occurs.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep).
- *
- *	RETURNS:
- *	0 on success, -errno otherwise.
- */
-int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
+int ata_wait_ready(struct ata_link *link, unsigned long deadline,
+		   int (*check_ready)(struct ata_link *link))
 {
 	unsigned long start = jiffies;
+	unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT;
 	int warned = 0;
 
-	while (1) {
-		u8 status = ata_chk_status(ap);
-		unsigned long now = jiffies;
+	if (time_after(nodev_deadline, deadline))
+		nodev_deadline = deadline;
 
-		if (!(status & ATA_BUSY))
+	while (1) {
+		unsigned long now = jiffies;
+		int ready, tmp;
+
+		ready = tmp = check_ready(link);
+		if (ready > 0)
 			return 0;
-		if (!ata_link_online(&ap->link) && status == 0xff)
-			return -ENODEV;
+
+		/* -ENODEV could be transient.  Ignore -ENODEV if link
+		 * is online.  Also, some SATA devices take a long
+		 * time to clear 0xff after reset.  For example,
+		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
+		 * GoVault needs even more than that.  Wait for
+		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
+		 *
+		 * Note that some PATA controllers (pata_ali) explode
+		 * if status register is read more than once when
+		 * there's no device attached.
+		 */
+		if (ready == -ENODEV) {
+			if (ata_link_online(link))
+				ready = 0;
+			else if ((link->ap->flags & ATA_FLAG_SATA) &&
+				 !ata_link_offline(link) &&
+				 time_before(now, nodev_deadline))
+				ready = 0;
+		}
+
+		if (ready)
+			return ready;
 		if (time_after(now, deadline))
 			return -EBUSY;
 
 		if (!warned && time_after(now, start + 5 * HZ) &&
 		    (deadline - now > 3 * HZ)) {
-			ata_port_printk(ap, KERN_WARNING,
-				"port is slow to respond, please be patient "
-				"(Status 0x%x)\n", status);
+			ata_link_printk(link, KERN_WARNING,
+				"link is slow to respond, please be patient "
+				"(ready=%d)\n", tmp);
 			warned = 1;
 		}
 
@@ -3645,179 +3364,26 @@
 	}
 }
 
-static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
-			      unsigned long deadline)
-{
-	struct ata_ioports *ioaddr = &ap->ioaddr;
-	unsigned int dev0 = devmask & (1 << 0);
-	unsigned int dev1 = devmask & (1 << 1);
-	int rc, ret = 0;
-
-	/* if device 0 was found in ata_devchk, wait for its
-	 * BSY bit to clear
-	 */
-	if (dev0) {
-		rc = ata_wait_ready(ap, deadline);
-		if (rc) {
-			if (rc != -ENODEV)
-				return rc;
-			ret = rc;
-		}
-	}
-
-	/* if device 1 was found in ata_devchk, wait for register
-	 * access briefly, then wait for BSY to clear.
-	 */
-	if (dev1) {
-		int i;
-
-		ap->ops->dev_select(ap, 1);
-
-		/* Wait for register access.  Some ATAPI devices fail
-		 * to set nsect/lbal after reset, so don't waste too
-		 * much time on it.  We're gonna wait for !BSY anyway.
-		 */
-		for (i = 0; i < 2; i++) {
-			u8 nsect, lbal;
-
-			nsect = ioread8(ioaddr->nsect_addr);
-			lbal = ioread8(ioaddr->lbal_addr);
-			if ((nsect == 1) && (lbal == 1))
-				break;
-			msleep(50);	/* give drive a breather */
-		}
-
-		rc = ata_wait_ready(ap, deadline);
-		if (rc) {
-			if (rc != -ENODEV)
-				return rc;
-			ret = rc;
-		}
-	}
-
-	/* is all this really necessary? */
-	ap->ops->dev_select(ap, 0);
-	if (dev1)
-		ap->ops->dev_select(ap, 1);
-	if (dev0)
-		ap->ops->dev_select(ap, 0);
-
-	return ret;
-}
-
-static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
-			     unsigned long deadline)
-{
-	struct ata_ioports *ioaddr = &ap->ioaddr;
-
-	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
-
-	/* software reset.  causes dev0 to be selected */
-	iowrite8(ap->ctl, ioaddr->ctl_addr);
-	udelay(20);	/* FIXME: flush */
-	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
-	udelay(20);	/* FIXME: flush */
-	iowrite8(ap->ctl, ioaddr->ctl_addr);
-
-	/* wait a while before checking status */
-	ata_wait_after_reset(ap, deadline);
-
-	/* Before we perform post reset processing we want to see if
-	 * the bus shows 0xFF because the odd clown forgets the D7
-	 * pulldown resistor.
-	 */
-	if (ata_chk_status(ap) == 0xFF)
-		return -ENODEV;
-
-	return ata_bus_post_reset(ap, devmask, deadline);
-}
-
 /**
- *	ata_bus_reset - reset host port and associated ATA channel
- *	@ap: port to reset
+ *	ata_wait_after_reset - wait for link to become ready after reset
+ *	@link: link to be waited on
+ *	@deadline: deadline jiffies for the operation
+ *	@check_ready: callback to check link readiness
  *
- *	This is typically the first time we actually start issuing
- *	commands to the ATA channel.  We wait for BSY to clear, then
- *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
- *	result.  Determine what devices, if any, are on the channel
- *	by looking at the device 0/1 error register.  Look at the signature
- *	stored in each device's taskfile registers, to determine if
- *	the device is ATA or ATAPI.
+ *	Wait for @link to become ready after reset.
  *
  *	LOCKING:
- *	PCI/etc. bus probe sem.
- *	Obtains host lock.
+ *	EH context.
  *
- *	SIDE EFFECTS:
- *	Sets ATA_FLAG_DISABLED if bus reset fails.
+ *	RETURNS:
+ *	0 if @linke is ready before @deadline; otherwise, -errno.
  */
-
-void ata_bus_reset(struct ata_port *ap)
+extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
+				int (*check_ready)(struct ata_link *link))
 {
-	struct ata_device *device = ap->link.device;
-	struct ata_ioports *ioaddr = &ap->ioaddr;
-	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
-	u8 err;
-	unsigned int dev0, dev1 = 0, devmask = 0;
-	int rc;
+	msleep(ATA_WAIT_AFTER_RESET_MSECS);
 
-	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
-
-	/* determine if device 0/1 are present */
-	if (ap->flags & ATA_FLAG_SATA_RESET)
-		dev0 = 1;
-	else {
-		dev0 = ata_devchk(ap, 0);
-		if (slave_possible)
-			dev1 = ata_devchk(ap, 1);
-	}
-
-	if (dev0)
-		devmask |= (1 << 0);
-	if (dev1)
-		devmask |= (1 << 1);
-
-	/* select device 0 again */
-	ap->ops->dev_select(ap, 0);
-
-	/* issue bus reset */
-	if (ap->flags & ATA_FLAG_SRST) {
-		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
-		if (rc && rc != -ENODEV)
-			goto err_out;
-	}
-
-	/*
-	 * determine by signature whether we have ATA or ATAPI devices
-	 */
-	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
-	if ((slave_possible) && (err != 0x81))
-		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
-
-	/* is double-select really necessary? */
-	if (device[1].class != ATA_DEV_NONE)
-		ap->ops->dev_select(ap, 1);
-	if (device[0].class != ATA_DEV_NONE)
-		ap->ops->dev_select(ap, 0);
-
-	/* if no devices were detected, disable this port */
-	if ((device[0].class == ATA_DEV_NONE) &&
-	    (device[1].class == ATA_DEV_NONE))
-		goto err_out;
-
-	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
-		/* set up device control for ATA_FLAG_SATA_RESET */
-		iowrite8(ap->ctl, ioaddr->ctl_addr);
-	}
-
-	DPRINTK("EXIT\n");
-	return;
-
-err_out:
-	ata_port_printk(ap, KERN_ERR, "disabling port\n");
-	ata_port_disable(ap);
-
-	DPRINTK("EXIT\n");
+	return ata_wait_ready(link, deadline, check_ready);
 }
 
 /**
@@ -3906,7 +3472,7 @@
 int sata_link_resume(struct ata_link *link, const unsigned long *params,
 		     unsigned long deadline)
 {
-	u32 scontrol;
+	u32 scontrol, serror;
 	int rc;
 
 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
@@ -3922,7 +3488,25 @@
 	 */
 	msleep(200);
 
-	return sata_link_debounce(link, params, deadline);
+	if ((rc = sata_link_debounce(link, params, deadline)))
+		return rc;
+
+	/* Clear SError.  PMP and some host PHYs require this to
+	 * operate and clearing should be done before checking PHY
+	 * online status to avoid race condition (hotplugging between
+	 * link resume and status check).
+	 */
+	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
+		rc = sata_scr_write(link, SCR_ERROR, serror);
+	if (rc == 0 || rc == -EINVAL) {
+		unsigned long flags;
+
+		spin_lock_irqsave(link->ap->lock, flags);
+		link->eh_info.serror = 0;
+		spin_unlock_irqrestore(link->ap->lock, flags);
+		rc = 0;
+	}
+	return rc;
 }
 
 /**
@@ -3949,17 +3533,6 @@
 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
 	int rc;
 
-	/* handle link resume */
-	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
-	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
-		ehc->i.action |= ATA_EH_HARDRESET;
-
-	/* Some PMPs don't work with only SRST, force hardreset if PMP
-	 * is supported.
-	 */
-	if (ap->flags & ATA_FLAG_PMP)
-		ehc->i.action |= ATA_EH_HARDRESET;
-
 	/* if we're about to do hardreset, nothing more to do */
 	if (ehc->i.action & ATA_EH_HARDRESET)
 		return 0;
@@ -3973,88 +3546,30 @@
 					"link for reset (errno=%d)\n", rc);
 	}
 
-	/* Wait for !BSY if the controller can wait for the first D2H
-	 * Reg FIS and we don't know that no device is attached.
-	 */
-	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
-		rc = ata_wait_ready(ap, deadline);
-		if (rc && rc != -ENODEV) {
-			ata_link_printk(link, KERN_WARNING, "device not ready "
-					"(errno=%d), forcing hardreset\n", rc);
-			ehc->i.action |= ATA_EH_HARDRESET;
-		}
-	}
+	/* no point in trying softreset on offline link */
+	if (ata_link_offline(link))
+		ehc->i.action &= ~ATA_EH_SOFTRESET;
 
 	return 0;
 }
 
 /**
- *	ata_std_softreset - reset host port via ATA SRST
- *	@link: ATA link to reset
- *	@classes: resulting classes of attached devices
- *	@deadline: deadline jiffies for the operation
- *
- *	Reset host port using ATA SRST.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep)
- *
- *	RETURNS:
- *	0 on success, -errno otherwise.
- */
-int ata_std_softreset(struct ata_link *link, unsigned int *classes,
-		      unsigned long deadline)
-{
-	struct ata_port *ap = link->ap;
-	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
-	unsigned int devmask = 0;
-	int rc;
-	u8 err;
-
-	DPRINTK("ENTER\n");
-
-	if (ata_link_offline(link)) {
-		classes[0] = ATA_DEV_NONE;
-		goto out;
-	}
-
-	/* determine if device 0/1 are present */
-	if (ata_devchk(ap, 0))
-		devmask |= (1 << 0);
-	if (slave_possible && ata_devchk(ap, 1))
-		devmask |= (1 << 1);
-
-	/* select device 0 again */
-	ap->ops->dev_select(ap, 0);
-
-	/* issue bus reset */
-	DPRINTK("about to softreset, devmask=%x\n", devmask);
-	rc = ata_bus_softreset(ap, devmask, deadline);
-	/* if link is occupied, -ENODEV too is an error */
-	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
-		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
-		return rc;
-	}
-
-	/* determine by signature whether we have ATA or ATAPI devices */
-	classes[0] = ata_dev_try_classify(&link->device[0],
-					  devmask & (1 << 0), &err);
-	if (slave_possible && err != 0x81)
-		classes[1] = ata_dev_try_classify(&link->device[1],
-						  devmask & (1 << 1), &err);
-
- out:
-	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
-	return 0;
-}
-
-/**
  *	sata_link_hardreset - reset link via SATA phy reset
  *	@link: link to reset
  *	@timing: timing parameters { interval, duratinon, timeout } in msec
  *	@deadline: deadline jiffies for the operation
+ *	@online: optional out parameter indicating link onlineness
+ *	@check_ready: optional callback to check link readiness
  *
  *	SATA phy-reset @link using DET bits of SControl register.
+ *	After hardreset, link readiness is waited upon using
+ *	ata_wait_ready() if @check_ready is specified.  LLDs are
+ *	allowed to not specify @check_ready and wait itself after this
+ *	function returns.  Device classification is LLD's
+ *	responsibility.
+ *
+ *	*@online is set to one iff reset succeeded and @link is online
+ *	after reset.
  *
  *	LOCKING:
  *	Kernel thread context (may sleep)
@@ -4063,13 +3578,17 @@
  *	0 on success, -errno otherwise.
  */
 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
-			unsigned long deadline)
+			unsigned long deadline,
+			bool *online, int (*check_ready)(struct ata_link *))
 {
 	u32 scontrol;
 	int rc;
 
 	DPRINTK("ENTER\n");
 
+	if (online)
+		*online = false;
+
 	if (sata_set_spd_needed(link)) {
 		/* SATA spec says nothing about how to reconfigure
 		 * spd.  To be on the safe side, turn off phy during
@@ -4103,77 +3622,69 @@
 
 	/* bring link back */
 	rc = sata_link_resume(link, timing, deadline);
+	if (rc)
+		goto out;
+	/* if link is offline nothing more to do */
+	if (ata_link_offline(link))
+		goto out;
+
+	/* Link is online.  From this point, -ENODEV too is an error. */
+	if (online)
+		*online = true;
+
+	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
+		/* If PMP is supported, we have to do follow-up SRST.
+		 * Some PMPs don't send D2H Reg FIS after hardreset if
+		 * the first port is empty.  Wait only for
+		 * ATA_TMOUT_PMP_SRST_WAIT.
+		 */
+		if (check_ready) {
+			unsigned long pmp_deadline;
+
+			pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT;
+			if (time_after(pmp_deadline, deadline))
+				pmp_deadline = deadline;
+			ata_wait_ready(link, pmp_deadline, check_ready);
+		}
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	rc = 0;
+	if (check_ready)
+		rc = ata_wait_ready(link, deadline, check_ready);
  out:
+	if (rc && rc != -EAGAIN)
+		ata_link_printk(link, KERN_ERR,
+				"COMRESET failed (errno=%d)\n", rc);
 	DPRINTK("EXIT, rc=%d\n", rc);
 	return rc;
 }
 
 /**
- *	sata_std_hardreset - reset host port via SATA phy reset
+ *	sata_std_hardreset - COMRESET w/o waiting or classification
  *	@link: link to reset
  *	@class: resulting class of attached device
  *	@deadline: deadline jiffies for the operation
  *
- *	SATA phy-reset host port using DET bits of SControl register,
- *	wait for !BSY and classify the attached device.
+ *	Standard SATA COMRESET w/o waiting or classification.
  *
  *	LOCKING:
  *	Kernel thread context (may sleep)
  *
  *	RETURNS:
- *	0 on success, -errno otherwise.
+ *	0 if link offline, -EAGAIN if link online, -errno on errors.
  */
 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
 		       unsigned long deadline)
 {
-	struct ata_port *ap = link->ap;
 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+	bool online;
 	int rc;
 
-	DPRINTK("ENTER\n");
-
 	/* do hardreset */
-	rc = sata_link_hardreset(link, timing, deadline);
-	if (rc) {
-		ata_link_printk(link, KERN_ERR,
-				"COMRESET failed (errno=%d)\n", rc);
-		return rc;
-	}
-
-	/* TODO: phy layer with polling, timeouts, etc. */
-	if (ata_link_offline(link)) {
-		*class = ATA_DEV_NONE;
-		DPRINTK("EXIT, link offline\n");
-		return 0;
-	}
-
-	/* wait a while before checking status */
-	ata_wait_after_reset(ap, deadline);
-
-	/* If PMP is supported, we have to do follow-up SRST.  Note
-	 * that some PMPs don't send D2H Reg FIS after hardreset at
-	 * all if the first port is empty.  Wait for it just for a
-	 * second and request follow-up SRST.
-	 */
-	if (ap->flags & ATA_FLAG_PMP) {
-		ata_wait_ready(ap, jiffies + HZ);
-		return -EAGAIN;
-	}
-
-	rc = ata_wait_ready(ap, deadline);
-	/* link occupied, -ENODEV too is an error */
-	if (rc) {
-		ata_link_printk(link, KERN_ERR,
-				"COMRESET failed (errno=%d)\n", rc);
-		return rc;
-	}
-
-	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
-
-	*class = ata_dev_try_classify(link->device, 1, NULL);
-
-	DPRINTK("EXIT, class=%u\n", *class);
-	return 0;
+	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+	return online ? -EAGAIN : rc;
 }
 
 /**
@@ -4190,35 +3701,11 @@
  */
 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
 {
-	struct ata_port *ap = link->ap;
-	u32 serror;
-
 	DPRINTK("ENTER\n");
 
 	/* print link status */
 	sata_print_link_status(link);
 
-	/* clear SError */
-	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
-		sata_scr_write(link, SCR_ERROR, serror);
-	link->eh_info.serror = 0;
-
-	/* is double-select really necessary? */
-	if (classes[0] != ATA_DEV_NONE)
-		ap->ops->dev_select(ap, 1);
-	if (classes[1] != ATA_DEV_NONE)
-		ap->ops->dev_select(ap, 0);
-
-	/* bail out if no device is present */
-	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
-		DPRINTK("EXIT, no device\n");
-		return;
-	}
-
-	/* set up device control */
-	if (ap->ioaddr.ctl_addr)
-		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
-
 	DPRINTK("EXIT\n");
 }
 
@@ -4528,6 +4015,53 @@
 }
 
 /**
+ *	cable_is_40wire		-	40/80/SATA decider
+ *	@ap: port to consider
+ *
+ *	This function encapsulates the policy for speed management
+ *	in one place. At the moment we don't cache the result but
+ *	there is a good case for setting ap->cbl to the result when
+ *	we are called with unknown cables (and figuring out if it
+ *	impacts hotplug at all).
+ *
+ *	Return 1 if the cable appears to be 40 wire.
+ */
+
+static int cable_is_40wire(struct ata_port *ap)
+{
+	struct ata_link *link;
+	struct ata_device *dev;
+
+	/* If the controller thinks we are 40 wire, we are */
+	if (ap->cbl == ATA_CBL_PATA40)
+		return 1;
+	/* If the controller thinks we are 80 wire, we are */
+	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
+		return 0;
+	/* If the system is known to be 40 wire short cable (eg laptop),
+	   then we allow 80 wire modes even if the drive isn't sure */
+	if (ap->cbl == ATA_CBL_PATA40_SHORT)
+		return 0;
+	/* If the controller doesn't know we scan
+
+	   - Note: We look for all 40 wire detects at this point.
+	     Any 80 wire detect is taken to be 80 wire cable
+	     because
+	     - In many setups only the one drive (slave if present)
+               will give a valid detect
+             - If you have a non detect capable drive you don't
+               want it to colour the choice
+        */
+	ata_port_for_each_link(link, ap) {
+		ata_link_for_each_dev(dev, link) {
+			if (!ata_is_40wire(dev))
+				return 0;
+		}
+	}
+	return 1;
+}
+
+/**
  *	ata_dev_xfermask - Compute supported xfermask of the given device
  *	@dev: Device to compute xfermask for
  *
@@ -4595,10 +4129,7 @@
 	 */
 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
 		/* UDMA/44 or higher would be available */
-		if ((ap->cbl == ATA_CBL_PATA40) ||
-		    (ata_is_40wire(dev) &&
-		    (ap->cbl == ATA_CBL_PATA_UNK ||
-		     ap->cbl == ATA_CBL_PATA80))) {
+		if (cable_is_40wire(ap)) {
 			ata_dev_printk(dev, KERN_WARNING,
 				 "limited to UDMA/33 due to 40-wire cable\n");
 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
@@ -4759,112 +4290,6 @@
 }
 
 /**
- *	ata_fill_sg - Fill PCI IDE PRD table
- *	@qc: Metadata associated with taskfile to be transferred
- *
- *	Fill PCI IDE PRD (scatter-gather) table with segments
- *	associated with the current disk command.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg(struct ata_queued_cmd *qc)
-{
-	struct ata_port *ap = qc->ap;
-	struct scatterlist *sg;
-	unsigned int si, pi;
-
-	pi = 0;
-	for_each_sg(qc->sg, sg, qc->n_elem, si) {
-		u32 addr, offset;
-		u32 sg_len, len;
-
-		/* determine if physical DMA addr spans 64K boundary.
-		 * Note h/w doesn't support 64-bit, so we unconditionally
-		 * truncate dma_addr_t to u32.
-		 */
-		addr = (u32) sg_dma_address(sg);
-		sg_len = sg_dma_len(sg);
-
-		while (sg_len) {
-			offset = addr & 0xffff;
-			len = sg_len;
-			if ((offset + sg_len) > 0x10000)
-				len = 0x10000 - offset;
-
-			ap->prd[pi].addr = cpu_to_le32(addr);
-			ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
-			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
-			pi++;
-			sg_len -= len;
-			addr += len;
-		}
-	}
-
-	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- *	ata_fill_sg_dumb - Fill PCI IDE PRD table
- *	@qc: Metadata associated with taskfile to be transferred
- *
- *	Fill PCI IDE PRD (scatter-gather) table with segments
- *	associated with the current disk command. Perform the fill
- *	so that we avoid writing any length 64K records for
- *	controllers that don't follow the spec.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
-{
-	struct ata_port *ap = qc->ap;
-	struct scatterlist *sg;
-	unsigned int si, pi;
-
-	pi = 0;
-	for_each_sg(qc->sg, sg, qc->n_elem, si) {
-		u32 addr, offset;
-		u32 sg_len, len, blen;
-
-		/* determine if physical DMA addr spans 64K boundary.
-		 * Note h/w doesn't support 64-bit, so we unconditionally
-		 * truncate dma_addr_t to u32.
-		 */
-		addr = (u32) sg_dma_address(sg);
-		sg_len = sg_dma_len(sg);
-
-		while (sg_len) {
-			offset = addr & 0xffff;
-			len = sg_len;
-			if ((offset + sg_len) > 0x10000)
-				len = 0x10000 - offset;
-
-			blen = len & 0xffff;
-			ap->prd[pi].addr = cpu_to_le32(addr);
-			if (blen == 0) {
-			   /* Some PATA chipsets like the CS5530 can't
-			      cope with 0x0000 meaning 64K as the spec says */
-				ap->prd[pi].flags_len = cpu_to_le32(0x8000);
-				blen = 0x8000;
-				ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
-			}
-			ap->prd[pi].flags_len = cpu_to_le32(blen);
-			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
-			pi++;
-			sg_len -= len;
-			addr += len;
-		}
-	}
-
-	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
  *	@qc: Metadata associated with taskfile to check
  *
@@ -4924,40 +4349,6 @@
 	return ATA_DEFER_LINK;
 }
 
-/**
- *	ata_qc_prep - Prepare taskfile for submission
- *	@qc: Metadata associated with taskfile to be prepared
- *
- *	Prepare ATA taskfile for submission.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- */
-void ata_qc_prep(struct ata_queued_cmd *qc)
-{
-	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
-		return;
-
-	ata_fill_sg(qc);
-}
-
-/**
- *	ata_dumb_qc_prep - Prepare taskfile for submission
- *	@qc: Metadata associated with taskfile to be prepared
- *
- *	Prepare ATA taskfile for submission.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- */
-void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
-{
-	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
-		return;
-
-	ata_fill_sg_dumb(qc);
-}
-
 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
 
 /**
@@ -5036,698 +4427,6 @@
 }
 
 /**
- *	ata_data_xfer - Transfer data by PIO
- *	@dev: device to target
- *	@buf: data buffer
- *	@buflen: buffer length
- *	@rw: read/write
- *
- *	Transfer data from/to the device data register by PIO.
- *
- *	LOCKING:
- *	Inherited from caller.
- *
- *	RETURNS:
- *	Bytes consumed.
- */
-unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
-			   unsigned int buflen, int rw)
-{
-	struct ata_port *ap = dev->link->ap;
-	void __iomem *data_addr = ap->ioaddr.data_addr;
-	unsigned int words = buflen >> 1;
-
-	/* Transfer multiple of 2 bytes */
-	if (rw == READ)
-		ioread16_rep(data_addr, buf, words);
-	else
-		iowrite16_rep(data_addr, buf, words);
-
-	/* Transfer trailing 1 byte, if any. */
-	if (unlikely(buflen & 0x01)) {
-		__le16 align_buf[1] = { 0 };
-		unsigned char *trailing_buf = buf + buflen - 1;
-
-		if (rw == READ) {
-			align_buf[0] = cpu_to_le16(ioread16(data_addr));
-			memcpy(trailing_buf, align_buf, 1);
-		} else {
-			memcpy(align_buf, trailing_buf, 1);
-			iowrite16(le16_to_cpu(align_buf[0]), data_addr);
-		}
-		words++;
-	}
-
-	return words << 1;
-}
-
-/**
- *	ata_data_xfer_noirq - Transfer data by PIO
- *	@dev: device to target
- *	@buf: data buffer
- *	@buflen: buffer length
- *	@rw: read/write
- *
- *	Transfer data from/to the device data register by PIO. Do the
- *	transfer with interrupts disabled.
- *
- *	LOCKING:
- *	Inherited from caller.
- *
- *	RETURNS:
- *	Bytes consumed.
- */
-unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
-				 unsigned int buflen, int rw)
-{
-	unsigned long flags;
-	unsigned int consumed;
-
-	local_irq_save(flags);
-	consumed = ata_data_xfer(dev, buf, buflen, rw);
-	local_irq_restore(flags);
-
-	return consumed;
-}
-
-
-/**
- *	ata_pio_sector - Transfer a sector of data.
- *	@qc: Command on going
- *
- *	Transfer qc->sect_size bytes of data from/to the ATA device.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-static void ata_pio_sector(struct ata_queued_cmd *qc)
-{
-	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
-	struct ata_port *ap = qc->ap;
-	struct page *page;
-	unsigned int offset;
-	unsigned char *buf;
-
-	if (qc->curbytes == qc->nbytes - qc->sect_size)
-		ap->hsm_task_state = HSM_ST_LAST;
-
-	page = sg_page(qc->cursg);
-	offset = qc->cursg->offset + qc->cursg_ofs;
-
-	/* get the current page and offset */
-	page = nth_page(page, (offset >> PAGE_SHIFT));
-	offset %= PAGE_SIZE;
-
-	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
-
-	if (PageHighMem(page)) {
-		unsigned long flags;
-
-		/* FIXME: use a bounce buffer */
-		local_irq_save(flags);
-		buf = kmap_atomic(page, KM_IRQ0);
-
-		/* do the actual data transfer */
-		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
-
-		kunmap_atomic(buf, KM_IRQ0);
-		local_irq_restore(flags);
-	} else {
-		buf = page_address(page);
-		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
-	}
-
-	qc->curbytes += qc->sect_size;
-	qc->cursg_ofs += qc->sect_size;
-
-	if (qc->cursg_ofs == qc->cursg->length) {
-		qc->cursg = sg_next(qc->cursg);
-		qc->cursg_ofs = 0;
-	}
-}
-
-/**
- *	ata_pio_sectors - Transfer one or many sectors.
- *	@qc: Command on going
- *
- *	Transfer one or many sectors of data from/to the
- *	ATA device for the DRQ request.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-static void ata_pio_sectors(struct ata_queued_cmd *qc)
-{
-	if (is_multi_taskfile(&qc->tf)) {
-		/* READ/WRITE MULTIPLE */
-		unsigned int nsect;
-
-		WARN_ON(qc->dev->multi_count == 0);
-
-		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
-			    qc->dev->multi_count);
-		while (nsect--)
-			ata_pio_sector(qc);
-	} else
-		ata_pio_sector(qc);
-
-	ata_altstatus(qc->ap); /* flush */
-}
-
-/**
- *	atapi_send_cdb - Write CDB bytes to hardware
- *	@ap: Port to which ATAPI device is attached.
- *	@qc: Taskfile currently active
- *
- *	When device has indicated its readiness to accept
- *	a CDB, this function is called.  Send the CDB.
- *
- *	LOCKING:
- *	caller.
- */
-
-static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
-{
-	/* send SCSI cdb */
-	DPRINTK("send cdb\n");
-	WARN_ON(qc->dev->cdb_len < 12);
-
-	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
-	ata_altstatus(ap); /* flush */
-
-	switch (qc->tf.protocol) {
-	case ATAPI_PROT_PIO:
-		ap->hsm_task_state = HSM_ST;
-		break;
-	case ATAPI_PROT_NODATA:
-		ap->hsm_task_state = HSM_ST_LAST;
-		break;
-	case ATAPI_PROT_DMA:
-		ap->hsm_task_state = HSM_ST_LAST;
-		/* initiate bmdma */
-		ap->ops->bmdma_start(qc);
-		break;
-	}
-}
-
-/**
- *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
- *	@qc: Command on going
- *	@bytes: number of bytes
- *
- *	Transfer Transfer data from/to the ATAPI device.
- *
- *	LOCKING:
- *	Inherited from caller.
- *
- */
-static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
-{
-	int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
-	struct ata_port *ap = qc->ap;
-	struct ata_device *dev = qc->dev;
-	struct ata_eh_info *ehi = &dev->link->eh_info;
-	struct scatterlist *sg;
-	struct page *page;
-	unsigned char *buf;
-	unsigned int offset, count, consumed;
-
-next_sg:
-	sg = qc->cursg;
-	if (unlikely(!sg)) {
-		ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
-				  "buf=%u cur=%u bytes=%u",
-				  qc->nbytes, qc->curbytes, bytes);
-		return -1;
-	}
-
-	page = sg_page(sg);
-	offset = sg->offset + qc->cursg_ofs;
-
-	/* get the current page and offset */
-	page = nth_page(page, (offset >> PAGE_SHIFT));
-	offset %= PAGE_SIZE;
-
-	/* don't overrun current sg */
-	count = min(sg->length - qc->cursg_ofs, bytes);
-
-	/* don't cross page boundaries */
-	count = min(count, (unsigned int)PAGE_SIZE - offset);
-
-	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
-
-	if (PageHighMem(page)) {
-		unsigned long flags;
-
-		/* FIXME: use bounce buffer */
-		local_irq_save(flags);
-		buf = kmap_atomic(page, KM_IRQ0);
-
-		/* do the actual data transfer */
-		consumed = ap->ops->data_xfer(dev,  buf + offset, count, rw);
-
-		kunmap_atomic(buf, KM_IRQ0);
-		local_irq_restore(flags);
-	} else {
-		buf = page_address(page);
-		consumed = ap->ops->data_xfer(dev,  buf + offset, count, rw);
-	}
-
-	bytes -= min(bytes, consumed);
-	qc->curbytes += count;
-	qc->cursg_ofs += count;
-
-	if (qc->cursg_ofs == sg->length) {
-		qc->cursg = sg_next(qc->cursg);
-		qc->cursg_ofs = 0;
-	}
-
-	/* consumed can be larger than count only for the last transfer */
-	WARN_ON(qc->cursg && count != consumed);
-
-	if (bytes)
-		goto next_sg;
-	return 0;
-}
-
-/**
- *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
- *	@qc: Command on going
- *
- *	Transfer Transfer data from/to the ATAPI device.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-static void atapi_pio_bytes(struct ata_queued_cmd *qc)
-{
-	struct ata_port *ap = qc->ap;
-	struct ata_device *dev = qc->dev;
-	struct ata_eh_info *ehi = &dev->link->eh_info;
-	unsigned int ireason, bc_lo, bc_hi, bytes;
-	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
-
-	/* Abuse qc->result_tf for temp storage of intermediate TF
-	 * here to save some kernel stack usage.
-	 * For normal completion, qc->result_tf is not relevant. For
-	 * error, qc->result_tf is later overwritten by ata_qc_complete().
-	 * So, the correctness of qc->result_tf is not affected.
-	 */
-	ap->ops->tf_read(ap, &qc->result_tf);
-	ireason = qc->result_tf.nsect;
-	bc_lo = qc->result_tf.lbam;
-	bc_hi = qc->result_tf.lbah;
-	bytes = (bc_hi << 8) | bc_lo;
-
-	/* shall be cleared to zero, indicating xfer of data */
-	if (unlikely(ireason & (1 << 0)))
-		goto atapi_check;
-
-	/* make sure transfer direction matches expected */
-	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
-	if (unlikely(do_write != i_write))
-		goto atapi_check;
-
-	if (unlikely(!bytes))
-		goto atapi_check;
-
-	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
-
-	if (unlikely(__atapi_pio_bytes(qc, bytes)))
-		goto err_out;
-	ata_altstatus(ap); /* flush */
-
-	return;
-
- atapi_check:
-	ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
-			  ireason, bytes);
- err_out:
-	qc->err_mask |= AC_ERR_HSM;
-	ap->hsm_task_state = HSM_ST_ERR;
-}
-
-/**
- *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
- *	@ap: the target ata_port
- *	@qc: qc on going
- *
- *	RETURNS:
- *	1 if ok in workqueue, 0 otherwise.
- */
-
-static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
-{
-	if (qc->tf.flags & ATA_TFLAG_POLLING)
-		return 1;
-
-	if (ap->hsm_task_state == HSM_ST_FIRST) {
-		if (qc->tf.protocol == ATA_PROT_PIO &&
-		    (qc->tf.flags & ATA_TFLAG_WRITE))
-		    return 1;
-
-		if (ata_is_atapi(qc->tf.protocol) &&
-		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
-			return 1;
-	}
-
-	return 0;
-}
-
-/**
- *	ata_hsm_qc_complete - finish a qc running on standard HSM
- *	@qc: Command to complete
- *	@in_wq: 1 if called from workqueue, 0 otherwise
- *
- *	Finish @qc which is running on standard HSM.
- *
- *	LOCKING:
- *	If @in_wq is zero, spin_lock_irqsave(host lock).
- *	Otherwise, none on entry and grabs host lock.
- */
-static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
-{
-	struct ata_port *ap = qc->ap;
-	unsigned long flags;
-
-	if (ap->ops->error_handler) {
-		if (in_wq) {
-			spin_lock_irqsave(ap->lock, flags);
-
-			/* EH might have kicked in while host lock is
-			 * released.
-			 */
-			qc = ata_qc_from_tag(ap, qc->tag);
-			if (qc) {
-				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
-					ap->ops->irq_on(ap);
-					ata_qc_complete(qc);
-				} else
-					ata_port_freeze(ap);
-			}
-
-			spin_unlock_irqrestore(ap->lock, flags);
-		} else {
-			if (likely(!(qc->err_mask & AC_ERR_HSM)))
-				ata_qc_complete(qc);
-			else
-				ata_port_freeze(ap);
-		}
-	} else {
-		if (in_wq) {
-			spin_lock_irqsave(ap->lock, flags);
-			ap->ops->irq_on(ap);
-			ata_qc_complete(qc);
-			spin_unlock_irqrestore(ap->lock, flags);
-		} else
-			ata_qc_complete(qc);
-	}
-}
-
-/**
- *	ata_hsm_move - move the HSM to the next state.
- *	@ap: the target ata_port
- *	@qc: qc on going
- *	@status: current device status
- *	@in_wq: 1 if called from workqueue, 0 otherwise
- *
- *	RETURNS:
- *	1 when poll next status needed, 0 otherwise.
- */
-int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
-		 u8 status, int in_wq)
-{
-	unsigned long flags = 0;
-	int poll_next;
-
-	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
-
-	/* Make sure ata_qc_issue_prot() does not throw things
-	 * like DMA polling into the workqueue. Notice that
-	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
-	 */
-	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
-
-fsm_start:
-	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
-		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
-
-	switch (ap->hsm_task_state) {
-	case HSM_ST_FIRST:
-		/* Send first data block or PACKET CDB */
-
-		/* If polling, we will stay in the work queue after
-		 * sending the data. Otherwise, interrupt handler
-		 * takes over after sending the data.
-		 */
-		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
-
-		/* check device status */
-		if (unlikely((status & ATA_DRQ) == 0)) {
-			/* handle BSY=0, DRQ=0 as error */
-			if (likely(status & (ATA_ERR | ATA_DF)))
-				/* device stops HSM for abort/error */
-				qc->err_mask |= AC_ERR_DEV;
-			else
-				/* HSM violation. Let EH handle this */
-				qc->err_mask |= AC_ERR_HSM;
-
-			ap->hsm_task_state = HSM_ST_ERR;
-			goto fsm_start;
-		}
-
-		/* Device should not ask for data transfer (DRQ=1)
-		 * when it finds something wrong.
-		 * We ignore DRQ here and stop the HSM by
-		 * changing hsm_task_state to HSM_ST_ERR and
-		 * let the EH abort the command or reset the device.
-		 */
-		if (unlikely(status & (ATA_ERR | ATA_DF))) {
-			/* Some ATAPI tape drives forget to clear the ERR bit
-			 * when doing the next command (mostly request sense).
-			 * We ignore ERR here to workaround and proceed sending
-			 * the CDB.
-			 */
-			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
-				ata_port_printk(ap, KERN_WARNING,
-						"DRQ=1 with device error, "
-						"dev_stat 0x%X\n", status);
-				qc->err_mask |= AC_ERR_HSM;
-				ap->hsm_task_state = HSM_ST_ERR;
-				goto fsm_start;
-			}
-		}
-
-		/* Send the CDB (atapi) or the first data block (ata pio out).
-		 * During the state transition, interrupt handler shouldn't
-		 * be invoked before the data transfer is complete and
-		 * hsm_task_state is changed. Hence, the following locking.
-		 */
-		if (in_wq)
-			spin_lock_irqsave(ap->lock, flags);
-
-		if (qc->tf.protocol == ATA_PROT_PIO) {
-			/* PIO data out protocol.
-			 * send first data block.
-			 */
-
-			/* ata_pio_sectors() might change the state
-			 * to HSM_ST_LAST. so, the state is changed here
-			 * before ata_pio_sectors().
-			 */
-			ap->hsm_task_state = HSM_ST;
-			ata_pio_sectors(qc);
-		} else
-			/* send CDB */
-			atapi_send_cdb(ap, qc);
-
-		if (in_wq)
-			spin_unlock_irqrestore(ap->lock, flags);
-
-		/* if polling, ata_pio_task() handles the rest.
-		 * otherwise, interrupt handler takes over from here.
-		 */
-		break;
-
-	case HSM_ST:
-		/* complete command or read/write the data register */
-		if (qc->tf.protocol == ATAPI_PROT_PIO) {
-			/* ATAPI PIO protocol */
-			if ((status & ATA_DRQ) == 0) {
-				/* No more data to transfer or device error.
-				 * Device error will be tagged in HSM_ST_LAST.
-				 */
-				ap->hsm_task_state = HSM_ST_LAST;
-				goto fsm_start;
-			}
-
-			/* Device should not ask for data transfer (DRQ=1)
-			 * when it finds something wrong.
-			 * We ignore DRQ here and stop the HSM by
-			 * changing hsm_task_state to HSM_ST_ERR and
-			 * let the EH abort the command or reset the device.
-			 */
-			if (unlikely(status & (ATA_ERR | ATA_DF))) {
-				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
-						"device error, dev_stat 0x%X\n",
-						status);
-				qc->err_mask |= AC_ERR_HSM;
-				ap->hsm_task_state = HSM_ST_ERR;
-				goto fsm_start;
-			}
-
-			atapi_pio_bytes(qc);
-
-			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
-				/* bad ireason reported by device */
-				goto fsm_start;
-
-		} else {
-			/* ATA PIO protocol */
-			if (unlikely((status & ATA_DRQ) == 0)) {
-				/* handle BSY=0, DRQ=0 as error */
-				if (likely(status & (ATA_ERR | ATA_DF)))
-					/* device stops HSM for abort/error */
-					qc->err_mask |= AC_ERR_DEV;
-				else
-					/* HSM violation. Let EH handle this.
-					 * Phantom devices also trigger this
-					 * condition.  Mark hint.
-					 */
-					qc->err_mask |= AC_ERR_HSM |
-							AC_ERR_NODEV_HINT;
-
-				ap->hsm_task_state = HSM_ST_ERR;
-				goto fsm_start;
-			}
-
-			/* For PIO reads, some devices may ask for
-			 * data transfer (DRQ=1) alone with ERR=1.
-			 * We respect DRQ here and transfer one
-			 * block of junk data before changing the
-			 * hsm_task_state to HSM_ST_ERR.
-			 *
-			 * For PIO writes, ERR=1 DRQ=1 doesn't make
-			 * sense since the data block has been
-			 * transferred to the device.
-			 */
-			if (unlikely(status & (ATA_ERR | ATA_DF))) {
-				/* data might be corrputed */
-				qc->err_mask |= AC_ERR_DEV;
-
-				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
-					ata_pio_sectors(qc);
-					status = ata_wait_idle(ap);
-				}
-
-				if (status & (ATA_BUSY | ATA_DRQ))
-					qc->err_mask |= AC_ERR_HSM;
-
-				/* ata_pio_sectors() might change the
-				 * state to HSM_ST_LAST. so, the state
-				 * is changed after ata_pio_sectors().
-				 */
-				ap->hsm_task_state = HSM_ST_ERR;
-				goto fsm_start;
-			}
-
-			ata_pio_sectors(qc);
-
-			if (ap->hsm_task_state == HSM_ST_LAST &&
-			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
-				/* all data read */
-				status = ata_wait_idle(ap);
-				goto fsm_start;
-			}
-		}
-
-		poll_next = 1;
-		break;
-
-	case HSM_ST_LAST:
-		if (unlikely(!ata_ok(status))) {
-			qc->err_mask |= __ac_err_mask(status);
-			ap->hsm_task_state = HSM_ST_ERR;
-			goto fsm_start;
-		}
-
-		/* no more data to transfer */
-		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
-			ap->print_id, qc->dev->devno, status);
-
-		WARN_ON(qc->err_mask);
-
-		ap->hsm_task_state = HSM_ST_IDLE;
-
-		/* complete taskfile transaction */
-		ata_hsm_qc_complete(qc, in_wq);
-
-		poll_next = 0;
-		break;
-
-	case HSM_ST_ERR:
-		/* make sure qc->err_mask is available to
-		 * know what's wrong and recover
-		 */
-		WARN_ON(qc->err_mask == 0);
-
-		ap->hsm_task_state = HSM_ST_IDLE;
-
-		/* complete taskfile transaction */
-		ata_hsm_qc_complete(qc, in_wq);
-
-		poll_next = 0;
-		break;
-	default:
-		poll_next = 0;
-		BUG();
-	}
-
-	return poll_next;
-}
-
-static void ata_pio_task(struct work_struct *work)
-{
-	struct ata_port *ap =
-		container_of(work, struct ata_port, port_task.work);
-	struct ata_queued_cmd *qc = ap->port_task_data;
-	u8 status;
-	int poll_next;
-
-fsm_start:
-	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
-
-	/*
-	 * This is purely heuristic.  This is a fast path.
-	 * Sometimes when we enter, BSY will be cleared in
-	 * a chk-status or two.  If not, the drive is probably seeking
-	 * or something.  Snooze for a couple msecs, then
-	 * chk-status again.  If still busy, queue delayed work.
-	 */
-	status = ata_busy_wait(ap, ATA_BUSY, 5);
-	if (status & ATA_BUSY) {
-		msleep(2);
-		status = ata_busy_wait(ap, ATA_BUSY, 10);
-		if (status & ATA_BUSY) {
-			ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
-			return;
-		}
-	}
-
-	/* move the HSM */
-	poll_next = ata_hsm_move(ap, qc, status, 1);
-
-	/* another command or interrupt handler
-	 * may be running at this point.
-	 */
-	if (poll_next)
-		goto fsm_start;
-}
-
-/**
  *	ata_qc_new - Request an available ATA command, for queueing
  *	@ap: Port associated with device @dev
  *	@dev: Device from whom we request an available command structure
@@ -5850,7 +4549,7 @@
 	struct ata_port *ap = qc->ap;
 
 	qc->result_tf.flags = qc->tf.flags;
-	ap->ops->tf_read(ap, &qc->result_tf);
+	ap->ops->qc_fill_rtf(qc);
 }
 
 static void ata_verify_xfer(struct ata_queued_cmd *qc)
@@ -5960,7 +4659,6 @@
  *	ata_qc_complete_multiple - Complete multiple qcs successfully
  *	@ap: port in question
  *	@qc_active: new qc_active mask
- *	@finish_qc: LLDD callback invoked before completing a qc
  *
  *	Complete in-flight commands.  This functions is meant to be
  *	called from low-level driver's interrupt routine to complete
@@ -5973,8 +4671,7 @@
  *	RETURNS:
  *	Number of completed commands on success, -errno otherwise.
  */
-int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
-			     void (*finish_qc)(struct ata_queued_cmd *))
+int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
 {
 	int nr_done = 0;
 	u32 done_mask;
@@ -5995,8 +4692,6 @@
 			continue;
 
 		if ((qc = ata_qc_from_tag(ap, i))) {
-			if (finish_qc)
-				finish_qc(qc);
 			ata_qc_complete(qc);
 			nr_done++;
 		}
@@ -6055,9 +4750,9 @@
 		if (ata_sg_setup(qc))
 			goto sg_err;
 
-	/* if device is sleeping, schedule softreset and abort the link */
+	/* if device is sleeping, schedule reset and abort the link */
 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
-		link->eh_info.action |= ATA_EH_SOFTRESET;
+		link->eh_info.action |= ATA_EH_RESET;
 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
 		ata_link_abort(link);
 		return;
@@ -6077,285 +4772,6 @@
 }
 
 /**
- *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
- *	@qc: command to issue to device
- *
- *	Using various libata functions and hooks, this function
- *	starts an ATA command.  ATA commands are grouped into
- *	classes called "protocols", and issuing each type of protocol
- *	is slightly different.
- *
- *	May be used as the qc_issue() entry in ata_port_operations.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- *
- *	RETURNS:
- *	Zero on success, AC_ERR_* mask on failure
- */
-
-unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
-{
-	struct ata_port *ap = qc->ap;
-
-	/* Use polling pio if the LLD doesn't handle
-	 * interrupt driven pio and atapi CDB interrupt.
-	 */
-	if (ap->flags & ATA_FLAG_PIO_POLLING) {
-		switch (qc->tf.protocol) {
-		case ATA_PROT_PIO:
-		case ATA_PROT_NODATA:
-		case ATAPI_PROT_PIO:
-		case ATAPI_PROT_NODATA:
-			qc->tf.flags |= ATA_TFLAG_POLLING;
-			break;
-		case ATAPI_PROT_DMA:
-			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
-				/* see ata_dma_blacklisted() */
-				BUG();
-			break;
-		default:
-			break;
-		}
-	}
-
-	/* select the device */
-	ata_dev_select(ap, qc->dev->devno, 1, 0);
-
-	/* start the command */
-	switch (qc->tf.protocol) {
-	case ATA_PROT_NODATA:
-		if (qc->tf.flags & ATA_TFLAG_POLLING)
-			ata_qc_set_polling(qc);
-
-		ata_tf_to_host(ap, &qc->tf);
-		ap->hsm_task_state = HSM_ST_LAST;
-
-		if (qc->tf.flags & ATA_TFLAG_POLLING)
-			ata_pio_queue_task(ap, qc, 0);
-
-		break;
-
-	case ATA_PROT_DMA:
-		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
-
-		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
-		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
-		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
-		ap->hsm_task_state = HSM_ST_LAST;
-		break;
-
-	case ATA_PROT_PIO:
-		if (qc->tf.flags & ATA_TFLAG_POLLING)
-			ata_qc_set_polling(qc);
-
-		ata_tf_to_host(ap, &qc->tf);
-
-		if (qc->tf.flags & ATA_TFLAG_WRITE) {
-			/* PIO data out protocol */
-			ap->hsm_task_state = HSM_ST_FIRST;
-			ata_pio_queue_task(ap, qc, 0);
-
-			/* always send first data block using
-			 * the ata_pio_task() codepath.
-			 */
-		} else {
-			/* PIO data in protocol */
-			ap->hsm_task_state = HSM_ST;
-
-			if (qc->tf.flags & ATA_TFLAG_POLLING)
-				ata_pio_queue_task(ap, qc, 0);
-
-			/* if polling, ata_pio_task() handles the rest.
-			 * otherwise, interrupt handler takes over from here.
-			 */
-		}
-
-		break;
-
-	case ATAPI_PROT_PIO:
-	case ATAPI_PROT_NODATA:
-		if (qc->tf.flags & ATA_TFLAG_POLLING)
-			ata_qc_set_polling(qc);
-
-		ata_tf_to_host(ap, &qc->tf);
-
-		ap->hsm_task_state = HSM_ST_FIRST;
-
-		/* send cdb by polling if no cdb interrupt */
-		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
-		    (qc->tf.flags & ATA_TFLAG_POLLING))
-			ata_pio_queue_task(ap, qc, 0);
-		break;
-
-	case ATAPI_PROT_DMA:
-		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
-
-		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
-		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
-		ap->hsm_task_state = HSM_ST_FIRST;
-
-		/* send cdb by polling if no cdb interrupt */
-		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
-			ata_pio_queue_task(ap, qc, 0);
-		break;
-
-	default:
-		WARN_ON(1);
-		return AC_ERR_SYSTEM;
-	}
-
-	return 0;
-}
-
-/**
- *	ata_host_intr - Handle host interrupt for given (port, task)
- *	@ap: Port on which interrupt arrived (possibly...)
- *	@qc: Taskfile currently active in engine
- *
- *	Handle host interrupt for given queued command.  Currently,
- *	only DMA interrupts are handled.  All other commands are
- *	handled via polling with interrupts disabled (nIEN bit).
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- *
- *	RETURNS:
- *	One if interrupt was handled, zero if not (shared irq).
- */
-
-inline unsigned int ata_host_intr(struct ata_port *ap,
-				  struct ata_queued_cmd *qc)
-{
-	struct ata_eh_info *ehi = &ap->link.eh_info;
-	u8 status, host_stat = 0;
-
-	VPRINTK("ata%u: protocol %d task_state %d\n",
-		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
-
-	/* Check whether we are expecting interrupt in this state */
-	switch (ap->hsm_task_state) {
-	case HSM_ST_FIRST:
-		/* Some pre-ATAPI-4 devices assert INTRQ
-		 * at this state when ready to receive CDB.
-		 */
-
-		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
-		 * The flag was turned on only for atapi devices.  No
-		 * need to check ata_is_atapi(qc->tf.protocol) again.
-		 */
-		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
-			goto idle_irq;
-		break;
-	case HSM_ST_LAST:
-		if (qc->tf.protocol == ATA_PROT_DMA ||
-		    qc->tf.protocol == ATAPI_PROT_DMA) {
-			/* check status of DMA engine */
-			host_stat = ap->ops->bmdma_status(ap);
-			VPRINTK("ata%u: host_stat 0x%X\n",
-				ap->print_id, host_stat);
-
-			/* if it's not our irq... */
-			if (!(host_stat & ATA_DMA_INTR))
-				goto idle_irq;
-
-			/* before we do anything else, clear DMA-Start bit */
-			ap->ops->bmdma_stop(qc);
-
-			if (unlikely(host_stat & ATA_DMA_ERR)) {
-				/* error when transfering data to/from memory */
-				qc->err_mask |= AC_ERR_HOST_BUS;
-				ap->hsm_task_state = HSM_ST_ERR;
-			}
-		}
-		break;
-	case HSM_ST:
-		break;
-	default:
-		goto idle_irq;
-	}
-
-	/* check altstatus */
-	status = ata_altstatus(ap);
-	if (status & ATA_BUSY)
-		goto idle_irq;
-
-	/* check main status, clearing INTRQ */
-	status = ata_chk_status(ap);
-	if (unlikely(status & ATA_BUSY))
-		goto idle_irq;
-
-	/* ack bmdma irq events */
-	ap->ops->irq_clear(ap);
-
-	ata_hsm_move(ap, qc, status, 0);
-
-	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
-				       qc->tf.protocol == ATAPI_PROT_DMA))
-		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
-
-	return 1;	/* irq handled */
-
-idle_irq:
-	ap->stats.idle_irq++;
-
-#ifdef ATA_IRQ_TRAP
-	if ((ap->stats.idle_irq % 1000) == 0) {
-		ata_chk_status(ap);
-		ap->ops->irq_clear(ap);
-		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
-		return 1;
-	}
-#endif
-	return 0;	/* irq not handled */
-}
-
-/**
- *	ata_interrupt - Default ATA host interrupt handler
- *	@irq: irq line (unused)
- *	@dev_instance: pointer to our ata_host information structure
- *
- *	Default interrupt handler for PCI IDE devices.  Calls
- *	ata_host_intr() for each port that is not disabled.
- *
- *	LOCKING:
- *	Obtains host lock during operation.
- *
- *	RETURNS:
- *	IRQ_NONE or IRQ_HANDLED.
- */
-
-irqreturn_t ata_interrupt(int irq, void *dev_instance)
-{
-	struct ata_host *host = dev_instance;
-	unsigned int i;
-	unsigned int handled = 0;
-	unsigned long flags;
-
-	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
-	spin_lock_irqsave(&host->lock, flags);
-
-	for (i = 0; i < host->n_ports; i++) {
-		struct ata_port *ap;
-
-		ap = host->ports[i];
-		if (ap &&
-		    !(ap->flags & ATA_FLAG_DISABLED)) {
-			struct ata_queued_cmd *qc;
-
-			qc = ata_qc_from_tag(ap, ap->link.active_tag);
-			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
-			    (qc->flags & ATA_QCFLAG_ACTIVE))
-				handled |= ata_host_intr(ap, qc);
-		}
-	}
-
-	spin_unlock_irqrestore(&host->lock, flags);
-
-	return IRQ_RETVAL(handled);
-}
-
-/**
  *	sata_scr_valid - test whether SCRs are accessible
  *	@link: ATA link to test SCR accessibility for
  *
@@ -6513,32 +4929,6 @@
 	return 0;
 }
 
-int ata_flush_cache(struct ata_device *dev)
-{
-	unsigned int err_mask;
-	u8 cmd;
-
-	if (!ata_try_flush_cache(dev))
-		return 0;
-
-	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
-		cmd = ATA_CMD_FLUSH_EXT;
-	else
-		cmd = ATA_CMD_FLUSH;
-
-	/* This is wrong. On a failed flush we get back the LBA of the lost
-	   sector and we should (assuming it wasn't aborted as unknown) issue
-	   a further flush command to continue the writeback until it
-	   does not error */
-	err_mask = ata_do_simple_cmd(dev, cmd);
-	if (err_mask) {
-		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
 			       unsigned int action, unsigned int ehi_flags,
@@ -6634,7 +5024,7 @@
  */
 void ata_host_resume(struct ata_host *host)
 {
-	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
+	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
 	host->dev->power.power_state = PMSG_ON;
 
@@ -6809,7 +5199,9 @@
 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
 #endif
 
+#ifdef CONFIG_ATA_SFF
 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
+#endif
 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
 	INIT_LIST_HEAD(&ap->eh_done_q);
@@ -6959,8 +5351,6 @@
 
 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
 			host->ops = pi->port_ops;
-		if (!host->private_data && pi->private_data)
-			host->private_data = pi->private_data;
 	}
 
 	return host;
@@ -6985,6 +5375,56 @@
 }
 
 /**
+ *	ata_finalize_port_ops - finalize ata_port_operations
+ *	@ops: ata_port_operations to finalize
+ *
+ *	An ata_port_operations can inherit from another ops and that
+ *	ops can again inherit from another.  This can go on as many
+ *	times as necessary as long as there is no loop in the
+ *	inheritance chain.
+ *
+ *	Ops tables are finalized when the host is started.  NULL or
+ *	unspecified entries are inherited from the closet ancestor
+ *	which has the method and the entry is populated with it.
+ *	After finalization, the ops table directly points to all the
+ *	methods and ->inherits is no longer necessary and cleared.
+ *
+ *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_finalize_port_ops(struct ata_port_operations *ops)
+{
+	static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+	const struct ata_port_operations *cur;
+	void **begin = (void **)ops;
+	void **end = (void **)&ops->inherits;
+	void **pp;
+
+	if (!ops || !ops->inherits)
+		return;
+
+	spin_lock(&lock);
+
+	for (cur = ops->inherits; cur; cur = cur->inherits) {
+		void **inherit = (void **)cur;
+
+		for (pp = begin; pp < end; pp++, inherit++)
+			if (!*pp)
+				*pp = *inherit;
+	}
+
+	for (pp = begin; pp < end; pp++)
+		if (IS_ERR(*pp))
+			*pp = NULL;
+
+	ops->inherits = NULL;
+
+	spin_unlock(&lock);
+}
+
+/**
  *	ata_host_start - start and freeze ports of an ATA host
  *	@host: ATA host to start ports for
  *
@@ -7009,9 +5449,13 @@
 	if (host->flags & ATA_HOST_STARTED)
 		return 0;
 
+	ata_finalize_port_ops(host->ops);
+
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
 
+		ata_finalize_port_ops(ap->ops);
+
 		if (!host->ops && !ata_port_is_dummy(ap))
 			host->ops = ap->ops;
 
@@ -7073,7 +5517,7 @@
  */
 /* KILLME - the only user left is ipr */
 void ata_host_init(struct ata_host *host, struct device *dev,
-		   unsigned long flags, const struct ata_port_operations *ops)
+		   unsigned long flags, struct ata_port_operations *ops)
 {
 	spin_lock_init(&host->lock);
 	host->dev = dev;
@@ -7169,9 +5613,8 @@
 			/* kick EH for boot probing */
 			spin_lock_irqsave(ap->lock, flags);
 
-			ehi->probe_mask =
-				(1 << ata_link_max_devices(&ap->link)) - 1;
-			ehi->action |= ATA_EH_SOFTRESET;
+			ehi->probe_mask |= ATA_ALL_DEVICES;
+			ehi->action |= ATA_EH_RESET;
 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
 
 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
@@ -7336,33 +5779,6 @@
 	ata_acpi_dissociate(host);
 }
 
-/**
- *	ata_std_ports - initialize ioaddr with standard port offsets.
- *	@ioaddr: IO address structure to be initialized
- *
- *	Utility function which initializes data_addr, error_addr,
- *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
- *	device_addr, status_addr, and command_addr to standard offsets
- *	relative to cmd_addr.
- *
- *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
- */
-
-void ata_std_ports(struct ata_ioports *ioaddr)
-{
-	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
-	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
-	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
-	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
-	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
-	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
-	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
-	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
-	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
-	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
-}
-
-
 #ifdef CONFIG_PCI
 
 /**
@@ -7749,33 +6165,20 @@
 /*
  * Dummy port_ops
  */
-static void ata_dummy_noret(struct ata_port *ap)	{ }
-static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
-static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
-
-static u8 ata_dummy_check_status(struct ata_port *ap)
-{
-	return ATA_DRDY;
-}
-
 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
 {
 	return AC_ERR_SYSTEM;
 }
 
-const struct ata_port_operations ata_dummy_port_ops = {
-	.check_status		= ata_dummy_check_status,
-	.check_altstatus	= ata_dummy_check_status,
-	.dev_select		= ata_noop_dev_select,
+static void ata_dummy_error_handler(struct ata_port *ap)
+{
+	/* truly dummy */
+}
+
+struct ata_port_operations ata_dummy_port_ops = {
 	.qc_prep		= ata_noop_qc_prep,
 	.qc_issue		= ata_dummy_qc_issue,
-	.freeze			= ata_dummy_noret,
-	.thaw			= ata_dummy_noret,
-	.error_handler		= ata_dummy_noret,
-	.post_internal_cmd	= ata_dummy_qc_noret,
-	.irq_clear		= ata_dummy_noret,
-	.port_start		= ata_dummy_ret0,
-	.port_stop		= ata_dummy_noret,
+	.error_handler		= ata_dummy_error_handler,
 };
 
 const struct ata_port_info ata_dummy_port_info = {
@@ -7791,10 +6194,11 @@
 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
+EXPORT_SYMBOL_GPL(ata_base_port_ops);
+EXPORT_SYMBOL_GPL(sata_port_ops);
 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
 EXPORT_SYMBOL_GPL(ata_std_bios_param);
-EXPORT_SYMBOL_GPL(ata_std_ports);
 EXPORT_SYMBOL_GPL(ata_host_init);
 EXPORT_SYMBOL_GPL(ata_host_alloc);
 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
@@ -7803,14 +6207,8 @@
 EXPORT_SYMBOL_GPL(ata_host_activate);
 EXPORT_SYMBOL_GPL(ata_host_detach);
 EXPORT_SYMBOL_GPL(ata_sg_init);
-EXPORT_SYMBOL_GPL(ata_hsm_move);
 EXPORT_SYMBOL_GPL(ata_qc_complete);
 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
-EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
-EXPORT_SYMBOL_GPL(ata_tf_load);
-EXPORT_SYMBOL_GPL(ata_tf_read);
-EXPORT_SYMBOL_GPL(ata_noop_dev_select);
-EXPORT_SYMBOL_GPL(ata_std_dev_select);
 EXPORT_SYMBOL_GPL(sata_print_link_status);
 EXPORT_SYMBOL_GPL(atapi_cmd_type);
 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
@@ -7822,37 +6220,17 @@
 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
 EXPORT_SYMBOL_GPL(ata_mode_string);
 EXPORT_SYMBOL_GPL(ata_id_xfermask);
-EXPORT_SYMBOL_GPL(ata_check_status);
-EXPORT_SYMBOL_GPL(ata_altstatus);
-EXPORT_SYMBOL_GPL(ata_exec_command);
 EXPORT_SYMBOL_GPL(ata_port_start);
-EXPORT_SYMBOL_GPL(ata_sff_port_start);
-EXPORT_SYMBOL_GPL(ata_interrupt);
 EXPORT_SYMBOL_GPL(ata_do_set_mode);
-EXPORT_SYMBOL_GPL(ata_data_xfer);
-EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
-EXPORT_SYMBOL_GPL(ata_qc_prep);
-EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-EXPORT_SYMBOL_GPL(ata_bmdma_setup);
-EXPORT_SYMBOL_GPL(ata_bmdma_start);
-EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
-EXPORT_SYMBOL_GPL(ata_bmdma_status);
-EXPORT_SYMBOL_GPL(ata_bmdma_stop);
-EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
-EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
-EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
-EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
-EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
 EXPORT_SYMBOL_GPL(ata_port_probe);
 EXPORT_SYMBOL_GPL(ata_dev_disable);
 EXPORT_SYMBOL_GPL(sata_set_spd);
+EXPORT_SYMBOL_GPL(ata_wait_after_reset);
 EXPORT_SYMBOL_GPL(sata_link_debounce);
 EXPORT_SYMBOL_GPL(sata_link_resume);
-EXPORT_SYMBOL_GPL(ata_bus_reset);
 EXPORT_SYMBOL_GPL(ata_std_prereset);
-EXPORT_SYMBOL_GPL(ata_std_softreset);
 EXPORT_SYMBOL_GPL(sata_link_hardreset);
 EXPORT_SYMBOL_GPL(sata_std_hardreset);
 EXPORT_SYMBOL_GPL(ata_std_postreset);
@@ -7861,15 +6239,11 @@
 EXPORT_SYMBOL_GPL(ata_port_disable);
 EXPORT_SYMBOL_GPL(ata_ratelimit);
 EXPORT_SYMBOL_GPL(ata_wait_register);
-EXPORT_SYMBOL_GPL(ata_busy_sleep);
-EXPORT_SYMBOL_GPL(ata_wait_after_reset);
-EXPORT_SYMBOL_GPL(ata_wait_ready);
 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
-EXPORT_SYMBOL_GPL(ata_host_intr);
 EXPORT_SYMBOL_GPL(sata_scr_valid);
 EXPORT_SYMBOL_GPL(sata_scr_read);
 EXPORT_SYMBOL_GPL(sata_scr_write);
@@ -7892,11 +6266,6 @@
 
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL_GPL(pci_test_config_bits);
-EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
-EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
-EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
-EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
-EXPORT_SYMBOL_GPL(ata_pci_init_one);
 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
 #ifdef CONFIG_PM
 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
@@ -7904,16 +6273,8 @@
 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
 #endif /* CONFIG_PM */
-EXPORT_SYMBOL_GPL(ata_pci_default_filter);
-EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
 #endif /* CONFIG_PCI */
 
-EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
-EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
-EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
-EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
-EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
-
 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
@@ -7931,8 +6292,7 @@
 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
 EXPORT_SYMBOL_GPL(ata_do_eh);
-EXPORT_SYMBOL_GPL(ata_irq_on);
-EXPORT_SYMBOL_GPL(ata_dev_try_classify);
+EXPORT_SYMBOL_GPL(ata_std_error_handler);
 
 EXPORT_SYMBOL_GPL(ata_cable_40wire);
 EXPORT_SYMBOL_GPL(ata_cable_80wire);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index a583032..d94359a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -873,9 +873,9 @@
 	if (rc == 0)
 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
 
-	if (!ap->nr_pmp_links || rc) {
+	if (!sata_pmp_attached(ap) || rc) {
 		/* PMP is not attached or SNTF is not available */
-		if (!ap->nr_pmp_links) {
+		if (!sata_pmp_attached(ap)) {
 			/* PMP is not attached.  Check whether ATAPI
 			 * AN is configured.  If so, notify media
 			 * change.
@@ -1079,19 +1079,6 @@
 
 	spin_lock_irqsave(ap->lock, flags);
 
-	/* Reset is represented by combination of actions and EHI
-	 * flags.  Suck in all related bits before clearing eh_info to
-	 * avoid losing requested action.
-	 */
-	if (action & ATA_EH_RESET_MASK) {
-		ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
-		ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
-
-		/* make sure all reset actions are cleared & clear EHI flags */
-		action |= ATA_EH_RESET_MASK;
-		ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
-	}
-
 	ata_eh_clear_action(link, dev, ehi, action);
 
 	if (!(ehc->i.flags & ATA_EHI_QUIET))
@@ -1117,12 +1104,6 @@
 {
 	struct ata_eh_context *ehc = &link->eh_context;
 
-	/* if reset is complete, clear all reset actions & reset modifier */
-	if (action & ATA_EH_RESET_MASK) {
-		action |= ATA_EH_RESET_MASK;
-		ehc->i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
-	}
-
 	ata_eh_clear_action(link, dev, &ehc->i, action);
 }
 
@@ -1329,20 +1310,20 @@
 
 	if (serror & SERR_PERSISTENT) {
 		err_mask |= AC_ERR_ATA_BUS;
-		action |= ATA_EH_HARDRESET;
+		action |= ATA_EH_RESET;
 	}
 	if (serror &
 	    (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
 		err_mask |= AC_ERR_ATA_BUS;
-		action |= ATA_EH_SOFTRESET;
+		action |= ATA_EH_RESET;
 	}
 	if (serror & SERR_PROTOCOL) {
 		err_mask |= AC_ERR_HSM;
-		action |= ATA_EH_SOFTRESET;
+		action |= ATA_EH_RESET;
 	}
 	if (serror & SERR_INTERNAL) {
 		err_mask |= AC_ERR_SYSTEM;
-		action |= ATA_EH_HARDRESET;
+		action |= ATA_EH_RESET;
 	}
 
 	/* Determine whether a hotplug event has occurred.  Both
@@ -1448,7 +1429,7 @@
 
 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
 		qc->err_mask |= AC_ERR_HSM;
-		return ATA_EH_SOFTRESET;
+		return ATA_EH_RESET;
 	}
 
 	if (stat & (ATA_ERR | ATA_DF))
@@ -1484,7 +1465,7 @@
 	}
 
 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
-		action |= ATA_EH_SOFTRESET;
+		action |= ATA_EH_RESET;
 
 	return action;
 }
@@ -1685,7 +1666,7 @@
 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
 		/* speed down SATA link speed if possible */
 		if (sata_down_spd_limit(link) == 0) {
-			action |= ATA_EH_HARDRESET;
+			action |= ATA_EH_RESET;
 			goto done;
 		}
 
@@ -1705,7 +1686,7 @@
 			dev->spdn_cnt++;
 
 			if (ata_down_xfermask_limit(dev, sel) == 0) {
-				action |= ATA_EH_SOFTRESET;
+				action |= ATA_EH_RESET;
 				goto done;
 			}
 		}
@@ -1719,7 +1700,7 @@
 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
 			dev->spdn_cnt = 0;
-			action |= ATA_EH_SOFTRESET;
+			action |= ATA_EH_RESET;
 			goto done;
 		}
 	}
@@ -1764,9 +1745,9 @@
 		ehc->i.serror |= serror;
 		ata_eh_analyze_serror(link);
 	} else if (rc != -EOPNOTSUPP) {
-		/* SError read failed, force hardreset and probing */
-		ata_ehi_schedule_probe(&ehc->i);
-		ehc->i.action |= ATA_EH_HARDRESET;
+		/* SError read failed, force reset and probing */
+		ehc->i.probe_mask |= ATA_ALL_DEVICES;
+		ehc->i.action |= ATA_EH_RESET;
 		ehc->i.err_mask |= AC_ERR_OTHER;
 	}
 
@@ -1804,6 +1785,11 @@
 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
 
+		/* determine whether the command is worth retrying */
+		if (!(qc->err_mask & AC_ERR_INVALID) &&
+		    ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
+			qc->flags |= ATA_QCFLAG_RETRY;
+
 		/* accumulate error info */
 		ehc->i.dev = qc->dev;
 		all_err_mask |= qc->err_mask;
@@ -1814,7 +1800,7 @@
 	/* enforce default EH actions */
 	if (ap->pflags & ATA_PFLAG_FROZEN ||
 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
-		ehc->i.action |= ATA_EH_SOFTRESET;
+		ehc->i.action |= ATA_EH_RESET;
 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
 		ehc->i.action |= ATA_EH_REVALIDATE;
@@ -1867,7 +1853,7 @@
 	/* Autopsy of fanout ports can affect host link autopsy.
 	 * Perform host link autopsy last.
 	 */
-	if (ap->nr_pmp_links)
+	if (sata_pmp_attached(ap))
 		ata_eh_link_autopsy(&ap->link);
 }
 
@@ -2066,41 +2052,29 @@
 		classes[dev->devno] = ATA_DEV_UNKNOWN;
 
 	rc = reset(link, classes, deadline);
-	if (rc)
-		return rc;
 
-	/* If any class isn't ATA_DEV_UNKNOWN, consider classification
-	 * is complete and convert all ATA_DEV_UNKNOWN to
-	 * ATA_DEV_NONE.
-	 */
+	/* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
 	ata_link_for_each_dev(dev, link)
-		if (classes[dev->devno] != ATA_DEV_UNKNOWN)
-			break;
+		if (classes[dev->devno] == ATA_DEV_UNKNOWN)
+			classes[dev->devno] = ATA_DEV_NONE;
 
-	if (dev) {
-		ata_link_for_each_dev(dev, link) {
-			if (classes[dev->devno] == ATA_DEV_UNKNOWN)
-				classes[dev->devno] = ATA_DEV_NONE;
-		}
-	}
-
-	return 0;
+	return rc;
 }
 
 static int ata_eh_followup_srst_needed(struct ata_link *link,
 				       int rc, int classify,
 				       const unsigned int *classes)
 {
-	if (link->flags & ATA_LFLAG_NO_SRST)
+	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
 		return 0;
-	if (rc == -EAGAIN)
-		return 1;
+	if (rc == -EAGAIN) {
+		if (classify)
+			return 1;
+		rc = 0;
+	}
 	if (rc != 0)
 		return 0;
-	if ((link->ap->flags & ATA_FLAG_PMP) && ata_is_host_link(link))
-		return 1;
-	if (classify && !(link->flags & ATA_LFLAG_ASSUME_CLASS) &&
-	    classes[0] == ATA_DEV_UNKNOWN)
+	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
 		return 1;
 	return 0;
 }
@@ -2118,7 +2092,6 @@
 	int try = 0;
 	struct ata_device *dev;
 	unsigned long deadline, now;
-	unsigned int tmp_action;
 	ata_reset_fn_t reset;
 	unsigned long flags;
 	u32 sstatus;
@@ -2129,7 +2102,7 @@
 	ap->pflags |= ATA_PFLAG_RESETTING;
 	spin_unlock_irqrestore(ap->lock, flags);
 
-	ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
+	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
 
 	ata_link_for_each_dev(dev, link) {
 		/* If we issue an SRST then an ATA drive (not ATAPI)
@@ -2159,17 +2132,20 @@
 		goto done;
 	}
 
-	/* Determine which reset to use and record in ehc->i.action.
-	 * prereset() may examine and modify it.
-	 */
-	if (softreset && (!hardreset || (!(lflags & ATA_LFLAG_NO_SRST) &&
-					 !sata_set_spd_needed(link) &&
-					 !(ehc->i.action & ATA_EH_HARDRESET))))
-		tmp_action = ATA_EH_SOFTRESET;
-	else
-		tmp_action = ATA_EH_HARDRESET;
-
-	ehc->i.action = (ehc->i.action & ~ATA_EH_RESET_MASK) | tmp_action;
+	/* prefer hardreset */
+	ehc->i.action &= ~ATA_EH_RESET;
+	if (hardreset) {
+		reset = hardreset;
+		ehc->i.action = ATA_EH_HARDRESET;
+	} else if (softreset) {
+		reset = softreset;
+		ehc->i.action = ATA_EH_SOFTRESET;
+	} else {
+		ata_link_printk(link, KERN_ERR, "BUG: no reset method, "
+				"please report to linux-ide@vger.kernel.org\n");
+		dump_stack();
+		return -EINVAL;
+	}
 
 	if (prereset) {
 		rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT);
@@ -2177,7 +2153,7 @@
 			if (rc == -ENOENT) {
 				ata_link_printk(link, KERN_DEBUG,
 						"port disabled. ignoring.\n");
-				ehc->i.action &= ~ATA_EH_RESET_MASK;
+				ehc->i.action &= ~ATA_EH_RESET;
 
 				ata_link_for_each_dev(dev, link)
 					classes[dev->devno] = ATA_DEV_NONE;
@@ -2190,12 +2166,8 @@
 		}
 	}
 
-	/* prereset() might have modified ehc->i.action */
-	if (ehc->i.action & ATA_EH_HARDRESET)
-		reset = hardreset;
-	else if (ehc->i.action & ATA_EH_SOFTRESET)
-		reset = softreset;
-	else {
+	/* prereset() might have cleared ATA_EH_RESET */
+	if (!(ehc->i.action & ATA_EH_RESET)) {
 		/* prereset told us not to reset, bang classes and return */
 		ata_link_for_each_dev(dev, link)
 			classes[dev->devno] = ATA_DEV_NONE;
@@ -2203,14 +2175,6 @@
 		goto out;
 	}
 
-	/* did prereset() screw up?  if so, fix up to avoid oopsing */
-	if (!reset) {
-		if (softreset)
-			reset = softreset;
-		else
-			reset = hardreset;
-	}
-
  retry:
 	deadline = jiffies + ata_eh_reset_timeouts[try++];
 
@@ -2240,7 +2204,7 @@
 			goto fail;
 		}
 
-		ata_eh_about_to_do(link, NULL, ATA_EH_RESET_MASK);
+		ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
 		rc = ata_do_reset(link, reset, classes, deadline);
 	}
 
@@ -2248,21 +2212,6 @@
 	if (rc && rc != -EAGAIN)
 		goto fail;
 
-	/* was classification successful? */
-	if (classify && classes[0] == ATA_DEV_UNKNOWN &&
-	    !(lflags & ATA_LFLAG_ASSUME_CLASS)) {
-		if (try < max_tries) {
-			ata_link_printk(link, KERN_WARNING,
-					"classification failed\n");
-			rc = -EINVAL;
-			goto fail;
-		}
-
-		ata_link_printk(link, KERN_WARNING,
-				"classfication failed, assuming ATA\n");
-		lflags |= ATA_LFLAG_ASSUME_ATA;
-	}
-
  done:
 	ata_link_for_each_dev(dev, link) {
 		/* After the reset, the device state is PIO 0 and the
@@ -2290,7 +2239,7 @@
 		postreset(link, classes);
 
 	/* reset successful, schedule revalidation */
-	ata_eh_done(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
+	ata_eh_done(link, NULL, ATA_EH_RESET);
 	ehc->i.action |= ATA_EH_REVALIDATE;
 
 	rc = 0;
@@ -2305,6 +2254,11 @@
 	return rc;
 
  fail:
+	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
+	if (!ata_is_host_link(link) &&
+	    sata_scr_read(link, SCR_STATUS, &sstatus))
+		rc = -ERESTART;
+
 	if (rc == -ERESTART || try >= max_tries)
 		goto out;
 
@@ -2515,6 +2469,7 @@
 
 static int ata_eh_skip_recovery(struct ata_link *link)
 {
+	struct ata_port *ap = link->ap;
 	struct ata_eh_context *ehc = &link->eh_context;
 	struct ata_device *dev;
 
@@ -2522,9 +2477,13 @@
 	if (link->flags & ATA_LFLAG_DISABLED)
 		return 1;
 
-	/* thaw frozen port, resume link and recover failed devices */
-	if ((link->ap->pflags & ATA_PFLAG_FROZEN) ||
-	    (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_link_nr_enabled(link))
+	/* thaw frozen port and recover failed devices */
+	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
+		return 0;
+
+	/* reset at least once if reset is requested */
+	if ((ehc->i.action & ATA_EH_RESET) &&
+	    !(ehc->i.flags & ATA_EHI_DID_RESET))
 		return 0;
 
 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
@@ -2548,7 +2507,7 @@
 	ata_eh_detach_dev(dev);
 	ata_dev_init(dev);
 	ehc->did_probe_mask |= (1 << dev->devno);
-	ehc->i.action |= ATA_EH_SOFTRESET;
+	ehc->i.action |= ATA_EH_RESET;
 	ehc->saved_xfer_mode[dev->devno] = 0;
 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
 
@@ -2592,12 +2551,7 @@
 
 		return 1;
 	} else {
-		/* soft didn't work?  be haaaaard */
-		if (ehc->i.flags & ATA_EHI_DID_RESET)
-			ehc->i.action |= ATA_EH_HARDRESET;
-		else
-			ehc->i.action |= ATA_EH_SOFTRESET;
-
+		ehc->i.action |= ATA_EH_RESET;
 		return 0;
 	}
 }
@@ -2690,7 +2644,7 @@
 			ehc->i.action = 0;
 
 		/* do we need to reset? */
-		if (ehc->i.action & ATA_EH_RESET_MASK)
+		if (ehc->i.action & ATA_EH_RESET)
 			reset = 1;
 
 		ata_link_for_each_dev(dev, link)
@@ -2702,13 +2656,13 @@
 		/* if PMP is attached, this function only deals with
 		 * downstream links, port should stay thawed.
 		 */
-		if (!ap->nr_pmp_links)
+		if (!sata_pmp_attached(ap))
 			ata_eh_freeze_port(ap);
 
 		ata_port_for_each_link(link, ap) {
 			struct ata_eh_context *ehc = &link->eh_context;
 
-			if (!(ehc->i.action & ATA_EH_RESET_MASK))
+			if (!(ehc->i.action & ATA_EH_RESET))
 				continue;
 
 			rc = ata_eh_reset(link, ata_link_nr_vacant(link),
@@ -2721,7 +2675,7 @@
 			}
 		}
 
-		if (!ap->nr_pmp_links)
+		if (!sata_pmp_attached(ap))
 			ata_eh_thaw_port(ap);
 	}
 
@@ -2765,7 +2719,7 @@
 			/* PMP reset requires working host port.
 			 * Can't retry if it's frozen.
 			 */
-			if (ap->nr_pmp_links)
+			if (sata_pmp_attached(ap))
 				goto out;
 			break;
 		}
@@ -2817,18 +2771,11 @@
 			/* FIXME: Once EH migration is complete,
 			 * generate sense data in this function,
 			 * considering both err_mask and tf.
-			 *
-			 * There's no point in retrying invalid
-			 * (detected by libata) and non-IO device
-			 * errors (rejected by device).  Finish them
-			 * immediately.
 			 */
-			if ((qc->err_mask & AC_ERR_INVALID) ||
-			    (!(qc->flags & ATA_QCFLAG_IO) &&
-			     qc->err_mask == AC_ERR_DEV))
-				ata_eh_qc_complete(qc);
-			else
+			if (qc->flags & ATA_QCFLAG_RETRY)
 				ata_eh_qc_retry(qc);
+			else
+				ata_eh_qc_complete(qc);
 		} else {
 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
 				ata_eh_qc_complete(qc);
@@ -2848,6 +2795,7 @@
 /**
  *	ata_do_eh - do standard error handling
  *	@ap: host port to handle error for
+ *
  *	@prereset: prereset method (can be NULL)
  *	@softreset: softreset method (can be NULL)
  *	@hardreset: hardreset method (can be NULL)
@@ -2878,6 +2826,27 @@
 	ata_eh_finish(ap);
 }
 
+/**
+ *	ata_std_error_handler - standard error handler
+ *	@ap: host port to handle error for
+ *
+ *	Standard error handler
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_std_error_handler(struct ata_port *ap)
+{
+	struct ata_port_operations *ops = ap->ops;
+	ata_reset_fn_t hardreset = ops->hardreset;
+
+	/* ignore built-in hardreset if SCR access is not available */
+	if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+		hardreset = NULL;
+
+	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
+}
+
 #ifdef CONFIG_PM
 /**
  *	ata_eh_handle_port_suspend - perform port suspend operation
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index d91f509..ff1822a 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -11,6 +11,14 @@
 #include <linux/libata.h>
 #include "libata.h"
 
+const struct ata_port_operations sata_pmp_port_ops = {
+	.inherits		= &sata_port_ops,
+	.pmp_prereset		= ata_std_prereset,
+	.pmp_hardreset		= sata_std_hardreset,
+	.pmp_postreset		= ata_std_postreset,
+	.error_handler		= sata_pmp_error_handler,
+};
+
 /**
  *	sata_pmp_read - read PMP register
  *	@link: link to read PMP register for
@@ -176,140 +184,6 @@
 }
 
 /**
- *	sata_pmp_std_prereset - prepare PMP link for reset
- *	@link: link to be reset
- *	@deadline: deadline jiffies for the operation
- *
- *	@link is about to be reset.  Initialize it.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep)
- *
- *	RETURNS:
- *	0 on success, -errno otherwise.
- */
-int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline)
-{
-	struct ata_eh_context *ehc = &link->eh_context;
-	const unsigned long *timing = sata_ehc_deb_timing(ehc);
-	int rc;
-
-	/* force HRST? */
-	if (link->flags & ATA_LFLAG_NO_SRST)
-		ehc->i.action |= ATA_EH_HARDRESET;
-
-	/* handle link resume */
-	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
-	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
-		ehc->i.action |= ATA_EH_HARDRESET;
-
-	/* if we're about to do hardreset, nothing more to do */
-	if (ehc->i.action & ATA_EH_HARDRESET)
-		return 0;
-
-	/* resume link */
-	rc = sata_link_resume(link, timing, deadline);
-	if (rc) {
-		/* phy resume failed */
-		ata_link_printk(link, KERN_WARNING, "failed to resume link "
-				"for reset (errno=%d)\n", rc);
-		return rc;
-	}
-
-	/* clear SError bits including .X which blocks the port when set */
-	rc = sata_scr_write(link, SCR_ERROR, 0xffffffff);
-	if (rc) {
-		ata_link_printk(link, KERN_ERR,
-				"failed to clear SError (errno=%d)\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
-/**
- *	sata_pmp_std_hardreset - standard hardreset method for PMP link
- *	@link: link to be reset
- *	@class: resulting class of attached device
- *	@deadline: deadline jiffies for the operation
- *
- *	Hardreset PMP port @link.  Note that this function doesn't
- *	wait for BSY clearance.  There simply isn't a generic way to
- *	wait the event.  Instead, this function return -EAGAIN thus
- *	telling libata-EH to followup with softreset.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep)
- *
- *	RETURNS:
- *	0 on success, -errno otherwise.
- */
-int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class,
-			   unsigned long deadline)
-{
-	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
-	u32 tmp;
-	int rc;
-
-	DPRINTK("ENTER\n");
-
-	/* do hardreset */
-	rc = sata_link_hardreset(link, timing, deadline);
-	if (rc) {
-		ata_link_printk(link, KERN_ERR,
-				"COMRESET failed (errno=%d)\n", rc);
-		goto out;
-	}
-
-	/* clear SError bits including .X which blocks the port when set */
-	rc = sata_scr_write(link, SCR_ERROR, 0xffffffff);
-	if (rc) {
-		ata_link_printk(link, KERN_ERR, "failed to clear SError "
-				"during hardreset (errno=%d)\n", rc);
-		goto out;
-	}
-
-	/* if device is present, follow up with srst to wait for !BSY */
-	if (ata_link_online(link))
-		rc = -EAGAIN;
- out:
-	/* if SCR isn't accessible, we need to reset the PMP */
-	if (rc && rc != -EAGAIN && sata_scr_read(link, SCR_STATUS, &tmp))
-		rc = -ERESTART;
-
-	DPRINTK("EXIT, rc=%d\n", rc);
-	return rc;
-}
-
-/**
- *	ata_std_postreset - standard postreset method for PMP link
- *	@link: the target ata_link
- *	@classes: classes of attached devices
- *
- *	This function is invoked after a successful reset.  Note that
- *	the device might have been reset more than once using
- *	different reset methods before postreset is invoked.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep)
- */
-void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class)
-{
-	u32 serror;
-
-	DPRINTK("ENTER\n");
-
-	/* clear SError */
-	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
-		sata_scr_write(link, SCR_ERROR, serror);
-
-	/* print link status */
-	sata_print_link_status(link);
-
-	DPRINTK("EXIT\n");
-}
-
-/**
  *	sata_pmp_read_gscr - read GSCR block of SATA PMP
  *	@dev: PMP device
  *	@gscr: buffer to read GSCR block into
@@ -444,9 +318,8 @@
 		struct ata_eh_context *ehc = &link->eh_context;
 
 		link->flags = 0;
-		ehc->i.probe_mask |= 1;
-		ehc->i.action |= ATA_EH_SOFTRESET;
-		ehc->i.flags |= ATA_EHI_RESUME_LINK;
+		ehc->i.probe_mask |= ATA_ALL_DEVICES;
+		ehc->i.action |= ATA_EH_RESET;
 	}
 
 	return 0;
@@ -462,9 +335,6 @@
 	if (vendor == 0x1095 && devid == 0x3726) {
 		/* sil3726 quirks */
 		ata_port_for_each_link(link, ap) {
-			/* SError.N need a kick in the ass to get working */
-			link->flags |= ATA_LFLAG_HRST_TO_RESUME;
-
 			/* class code report is unreliable */
 			if (link->pmp < 5)
 				link->flags |= ATA_LFLAG_ASSUME_ATA;
@@ -477,9 +347,6 @@
 	} else if (vendor == 0x1095 && devid == 0x4723) {
 		/* sil4723 quirks */
 		ata_port_for_each_link(link, ap) {
-			/* SError.N need a kick in the ass to get working */
-			link->flags |= ATA_LFLAG_HRST_TO_RESUME;
-
 			/* class code report is unreliable */
 			if (link->pmp < 2)
 				link->flags |= ATA_LFLAG_ASSUME_ATA;
@@ -492,9 +359,6 @@
 	} else if (vendor == 0x1095 && devid == 0x4726) {
 		/* sil4726 quirks */
 		ata_port_for_each_link(link, ap) {
-			/* SError.N need a kick in the ass to get working */
-			link->flags |= ATA_LFLAG_HRST_TO_RESUME;
-
 			/* Class code report is unreliable and SRST
 			 * times out under certain configurations.
 			 * Config device can be at port 0 or 5 and
@@ -522,13 +386,6 @@
 		 * otherwise.  Don't try hard to recover it.
 		 */
 		ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
-	} else if (vendor == 0x11ab && devid == 0x4140) {
-		/* Marvell 88SM4140 quirks.  Fan-out ports require PHY
-		 * reset to work; other than that, it behaves very
-		 * nicely.
-		 */
-		ata_port_for_each_link(link, ap)
-			link->flags |= ATA_LFLAG_HRST_TO_RESUME;
 	}
 }
 
@@ -554,7 +411,7 @@
 	int rc;
 
 	/* is it hanging off the right place? */
-	if (!(ap->flags & ATA_FLAG_PMP)) {
+	if (!sata_pmp_supported(ap)) {
 		ata_dev_printk(dev, KERN_ERR,
 			       "host does not support Port Multiplier\n");
 		return -EINVAL;
@@ -840,13 +697,12 @@
  retry:
 	ehc->classes[0] = ATA_DEV_UNKNOWN;
 
-	if (ehc->i.action & ATA_EH_RESET_MASK) {
+	if (ehc->i.action & ATA_EH_RESET) {
 		struct ata_link *tlink;
 
 		ata_eh_freeze_port(ap);
 
 		/* reset */
-		ehc->i.action = ATA_EH_HARDRESET;
 		rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
 				  postreset);
 		if (rc) {
@@ -858,8 +714,12 @@
 		ata_eh_thaw_port(ap);
 
 		/* PMP is reset, SErrors cannot be trusted, scan all */
-		ata_port_for_each_link(tlink, ap)
-			ata_ehi_schedule_probe(&tlink->eh_context.i);
+		ata_port_for_each_link(tlink, ap) {
+			struct ata_eh_context *ehc = &tlink->eh_context;
+
+			ehc->i.probe_mask |= ATA_ALL_DEVICES;
+			ehc->i.action |= ATA_EH_RESET;
+		}
 	}
 
 	/* If revalidation is requested, revalidate and reconfigure;
@@ -874,7 +734,7 @@
 		tries--;
 
 		if (rc == -ENODEV) {
-			ehc->i.probe_mask |= 1;
+			ehc->i.probe_mask |= ATA_ALL_DEVICES;
 			detach = 1;
 			/* give it just two more chances */
 			tries = min(tries, 2);
@@ -890,11 +750,11 @@
 				reval_failed = 1;
 
 			ata_dev_printk(dev, KERN_WARNING,
-				       "retrying hardreset%s\n",
+				       "retrying reset%s\n",
 				       sleep ? " in 5 secs" : "");
 			if (sleep)
 				ssleep(5);
-			ehc->i.action |= ATA_EH_HARDRESET;
+			ehc->i.action |= ATA_EH_RESET;
 			goto retry;
 		} else {
 			ata_dev_printk(dev, KERN_ERR, "failed to recover PMP "
@@ -938,10 +798,8 @@
 		/* Some PMPs require hardreset sequence to get
 		 * SError.N working.
 		 */
-		if ((link->flags & ATA_LFLAG_HRST_TO_RESUME) &&
-		    (link->eh_context.i.flags & ATA_EHI_RESUME_LINK))
-			sata_link_hardreset(link, sata_deb_timing_normal,
-					    jiffies + ATA_TMOUT_INTERNAL_QUICK);
+		sata_link_hardreset(link, sata_deb_timing_normal,
+				jiffies + ATA_TMOUT_INTERNAL_QUICK, NULL, NULL);
 
 		/* unconditionally clear SError.N */
 		rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
@@ -987,14 +845,6 @@
 /**
  *	sata_pmp_eh_recover - recover PMP-enabled port
  *	@ap: ATA port to recover
- *	@prereset: prereset method (can be NULL)
- *	@softreset: softreset method
- *	@hardreset: hardreset method
- *	@postreset: postreset method (can be NULL)
- *	@pmp_prereset: PMP prereset method (can be NULL)
- *	@pmp_softreset: PMP softreset method (can be NULL)
- *	@pmp_hardreset: PMP hardreset method (can be NULL)
- *	@pmp_postreset: PMP postreset method (can be NULL)
  *
  *	Drive EH recovery operation for PMP enabled port @ap.  This
  *	function recovers host and PMP ports with proper retrials and
@@ -1007,12 +857,9 @@
  *	RETURNS:
  *	0 on success, -errno on failure.
  */
-static int sata_pmp_eh_recover(struct ata_port *ap,
-		ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
-		ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
-		ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
-		ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset)
+static int sata_pmp_eh_recover(struct ata_port *ap)
 {
+	struct ata_port_operations *ops = ap->ops;
 	int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
 	struct ata_link *pmp_link = &ap->link;
 	struct ata_device *pmp_dev = pmp_link->device;
@@ -1029,9 +876,9 @@
 
  retry:
 	/* PMP attached? */
-	if (!ap->nr_pmp_links) {
-		rc = ata_eh_recover(ap, prereset, softreset, hardreset,
-				    postreset, NULL);
+	if (!sata_pmp_attached(ap)) {
+		rc = ata_eh_recover(ap, ops->prereset, ops->softreset,
+				    ops->hardreset, ops->postreset, NULL);
 		if (rc) {
 			ata_link_for_each_dev(dev, &ap->link)
 				ata_dev_disable(dev);
@@ -1049,8 +896,8 @@
 	}
 
 	/* recover pmp */
-	rc = sata_pmp_eh_recover_pmp(ap, prereset, softreset, hardreset,
-				     postreset);
+	rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset,
+				     ops->hardreset, ops->postreset);
 	if (rc)
 		goto pmp_fail;
 
@@ -1060,8 +907,8 @@
 		goto pmp_fail;
 
 	/* recover links */
-	rc = ata_eh_recover(ap, pmp_prereset, pmp_softreset, pmp_hardreset,
-			    pmp_postreset, &link);
+	rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset,
+			    ops->pmp_hardreset, ops->pmp_postreset, &link);
 	if (rc)
 		goto link_fail;
 
@@ -1124,7 +971,7 @@
 
  link_fail:
 	if (sata_pmp_handle_link_fail(link, link_tries)) {
-		pmp_ehc->i.action |= ATA_EH_HARDRESET;
+		pmp_ehc->i.action |= ATA_EH_RESET;
 		goto retry;
 	}
 
@@ -1136,13 +983,13 @@
 	if (ap->pflags & ATA_PFLAG_UNLOADING)
 		return rc;
 
-	if (!ap->nr_pmp_links)
+	if (!sata_pmp_attached(ap))
 		goto retry;
 
 	if (--pmp_tries) {
 		ata_port_printk(ap, KERN_WARNING,
 				"failed to recover PMP, retrying in 5 secs\n");
-		pmp_ehc->i.action |= ATA_EH_HARDRESET;
+		pmp_ehc->i.action |= ATA_EH_RESET;
 		ssleep(5);
 		goto retry;
 	}
@@ -1157,16 +1004,8 @@
 }
 
 /**
- *	sata_pmp_do_eh - do standard error handling for PMP-enabled host
+ *	sata_pmp_error_handler - do standard error handling for PMP-enabled host
  *	@ap: host port to handle error for
- *	@prereset: prereset method (can be NULL)
- *	@softreset: softreset method
- *	@hardreset: hardreset method
- *	@postreset: postreset method (can be NULL)
- *	@pmp_prereset: PMP prereset method (can be NULL)
- *	@pmp_softreset: PMP softreset method (can be NULL)
- *	@pmp_hardreset: PMP hardreset method (can be NULL)
- *	@pmp_postreset: PMP postreset method (can be NULL)
  *
  *	Perform standard error handling sequence for PMP-enabled host
  *	@ap.
@@ -1174,16 +1013,14 @@
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void sata_pmp_do_eh(struct ata_port *ap,
-		ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
-		ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
-		ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
-		ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset)
+void sata_pmp_error_handler(struct ata_port *ap)
 {
 	ata_eh_autopsy(ap);
 	ata_eh_report(ap);
-	sata_pmp_eh_recover(ap, prereset, softreset, hardreset, postreset,
-			    pmp_prereset, pmp_softreset, pmp_hardreset,
-			    pmp_postreset);
+	sata_pmp_eh_recover(ap);
 	ata_eh_finish(ap);
 }
+
+EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
+EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
+EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1579539..f3c69a8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2332,11 +2332,7 @@
 {
 	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
-	cmd->sense_buffer[0] = 0x70;	/* fixed format, current */
-	cmd->sense_buffer[2] = sk;
-	cmd->sense_buffer[7] = 18 - 8;	/* additional sense length */
-	cmd->sense_buffer[12] = asc;
-	cmd->sense_buffer[13] = ascq;
+	scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
 }
 
 /**
@@ -2393,7 +2389,10 @@
 	/* FIXME: is this needed? */
 	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 
-	ap->ops->tf_read(ap, &qc->tf);
+#ifdef CONFIG_ATA_SFF
+	if (ap->ops->sff_tf_read)
+		ap->ops->sff_tf_read(ap, &qc->tf);
+#endif
 
 	/* fill these in, for the case where they are -not- overwritten */
 	cmd->sense_buffer[0] = 0x70;
@@ -2615,7 +2614,7 @@
 
 static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
 {
-	if (ap->nr_pmp_links == 0) {
+	if (!sata_pmp_attached(ap)) {
 		if (likely(devno < ata_link_max_devices(&ap->link)))
 			return &ap->link.device[devno];
 	} else {
@@ -2632,7 +2631,7 @@
 	int devno;
 
 	/* skip commands not addressed to targets we simulate */
-	if (ap->nr_pmp_links == 0) {
+	if (!sata_pmp_attached(ap)) {
 		if (unlikely(scsidev->channel || scsidev->lun))
 			return NULL;
 		devno = scsidev->id;
@@ -3490,7 +3489,7 @@
 	if (lun != SCAN_WILD_CARD && lun)
 		return -EINVAL;
 
-	if (ap->nr_pmp_links == 0) {
+	if (!sata_pmp_attached(ap)) {
 		if (channel != SCAN_WILD_CARD && channel)
 			return -EINVAL;
 		devno = id;
@@ -3507,8 +3506,8 @@
 
 		ata_port_for_each_link(link, ap) {
 			struct ata_eh_info *ehi = &link->eh_info;
-			ehi->probe_mask |= (1 << ata_link_max_devices(link)) - 1;
-			ehi->action |= ATA_EH_SOFTRESET;
+			ehi->probe_mask |= ATA_ALL_DEVICES;
+			ehi->action |= ATA_EH_RESET;
 		}
 	} else {
 		struct ata_device *dev = ata_find_dev(ap, devno);
@@ -3516,8 +3515,7 @@
 		if (dev) {
 			struct ata_eh_info *ehi = &dev->link->eh_info;
 			ehi->probe_mask |= 1 << dev->devno;
-			ehi->action |= ATA_EH_SOFTRESET;
-			ehi->flags |= ATA_EHI_RESUME_LINK;
+			ehi->action |= ATA_EH_RESET;
 		} else
 			rc = -EINVAL;
 	}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 20dc572..1549952 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -35,11 +35,377 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/libata.h>
+#include <linux/highmem.h>
 
 #include "libata.h"
 
+const struct ata_port_operations ata_sff_port_ops = {
+	.inherits		= &ata_base_port_ops,
+
+	.qc_prep		= ata_sff_qc_prep,
+	.qc_issue		= ata_sff_qc_issue,
+	.qc_fill_rtf		= ata_sff_qc_fill_rtf,
+
+	.freeze			= ata_sff_freeze,
+	.thaw			= ata_sff_thaw,
+	.prereset		= ata_sff_prereset,
+	.softreset		= ata_sff_softreset,
+	.hardreset		= sata_sff_hardreset,
+	.postreset		= ata_sff_postreset,
+	.error_handler		= ata_sff_error_handler,
+	.post_internal_cmd	= ata_sff_post_internal_cmd,
+
+	.sff_dev_select		= ata_sff_dev_select,
+	.sff_check_status	= ata_sff_check_status,
+	.sff_tf_load		= ata_sff_tf_load,
+	.sff_tf_read		= ata_sff_tf_read,
+	.sff_exec_command	= ata_sff_exec_command,
+	.sff_data_xfer		= ata_sff_data_xfer,
+	.sff_irq_on		= ata_sff_irq_on,
+	.sff_irq_clear		= ata_sff_irq_clear,
+
+	.port_start		= ata_sff_port_start,
+};
+
+const struct ata_port_operations ata_bmdma_port_ops = {
+	.inherits		= &ata_sff_port_ops,
+
+	.mode_filter		= ata_bmdma_mode_filter,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+};
+
 /**
- *	ata_irq_on - Enable interrupts on a port.
+ *	ata_fill_sg - Fill PCI IDE PRD table
+ *	@qc: Metadata associated with taskfile to be transferred
+ *
+ *	Fill PCI IDE PRD (scatter-gather) table with segments
+ *	associated with the current disk command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg;
+	unsigned int si, pi;
+
+	pi = 0;
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u32 addr, offset;
+		u32 sg_len, len;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			ap->prd[pi].addr = cpu_to_le32(addr);
+			ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+			pi++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ *	ata_fill_sg_dumb - Fill PCI IDE PRD table
+ *	@qc: Metadata associated with taskfile to be transferred
+ *
+ *	Fill PCI IDE PRD (scatter-gather) table with segments
+ *	associated with the current disk command. Perform the fill
+ *	so that we avoid writing any length 64K records for
+ *	controllers that don't follow the spec.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg;
+	unsigned int si, pi;
+
+	pi = 0;
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u32 addr, offset;
+		u32 sg_len, len, blen;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			blen = len & 0xffff;
+			ap->prd[pi].addr = cpu_to_le32(addr);
+			if (blen == 0) {
+			   /* Some PATA chipsets like the CS5530 can't
+			      cope with 0x0000 meaning 64K as the spec says */
+				ap->prd[pi].flags_len = cpu_to_le32(0x8000);
+				blen = 0x8000;
+				ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
+			}
+			ap->prd[pi].flags_len = cpu_to_le32(blen);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+			pi++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ *	ata_sff_qc_prep - Prepare taskfile for submission
+ *	@qc: Metadata associated with taskfile to be prepared
+ *
+ *	Prepare ATA taskfile for submission.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_sff_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	ata_fill_sg(qc);
+}
+
+/**
+ *	ata_sff_dumb_qc_prep - Prepare taskfile for submission
+ *	@qc: Metadata associated with taskfile to be prepared
+ *
+ *	Prepare ATA taskfile for submission.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	ata_fill_sg_dumb(qc);
+}
+
+/**
+ *	ata_sff_check_status - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile status register for currently-selected device
+ *	and return its value. This also clears pending interrupts
+ *      from this device
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+u8 ata_sff_check_status(struct ata_port *ap)
+{
+	return ioread8(ap->ioaddr.status_addr);
+}
+
+/**
+ *	ata_sff_altstatus - Read device alternate status reg
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile alternate status register for
+ *	currently-selected device and return its value.
+ *
+ *	Note: may NOT be used as the check_altstatus() entry in
+ *	ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+u8 ata_sff_altstatus(struct ata_port *ap)
+{
+	if (ap->ops->sff_check_altstatus)
+		return ap->ops->sff_check_altstatus(ap);
+
+	return ioread8(ap->ioaddr.altstatus_addr);
+}
+
+/**
+ *	ata_sff_busy_sleep - sleep until BSY clears, or timeout
+ *	@ap: port containing status register to be polled
+ *	@tmout_pat: impatience timeout
+ *	@tmout: overall timeout
+ *
+ *	Sleep until ATA Status register bit BSY clears,
+ *	or a timeout occurs.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_sff_busy_sleep(struct ata_port *ap,
+		       unsigned long tmout_pat, unsigned long tmout)
+{
+	unsigned long timer_start, timeout;
+	u8 status;
+
+	status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
+	timer_start = jiffies;
+	timeout = timer_start + tmout_pat;
+	while (status != 0xff && (status & ATA_BUSY) &&
+	       time_before(jiffies, timeout)) {
+		msleep(50);
+		status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
+	}
+
+	if (status != 0xff && (status & ATA_BUSY))
+		ata_port_printk(ap, KERN_WARNING,
+				"port is slow to respond, please be patient "
+				"(Status 0x%x)\n", status);
+
+	timeout = timer_start + tmout;
+	while (status != 0xff && (status & ATA_BUSY) &&
+	       time_before(jiffies, timeout)) {
+		msleep(50);
+		status = ap->ops->sff_check_status(ap);
+	}
+
+	if (status == 0xff)
+		return -ENODEV;
+
+	if (status & ATA_BUSY) {
+		ata_port_printk(ap, KERN_ERR, "port failed to respond "
+				"(%lu secs, Status 0x%x)\n",
+				tmout / HZ, status);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int ata_sff_check_ready(struct ata_link *link)
+{
+	u8 status = link->ap->ops->sff_check_status(link->ap);
+
+	if (!(status & ATA_BUSY))
+		return 1;
+	if (status == 0xff)
+		return -ENODEV;
+	return 0;
+}
+
+/**
+ *	ata_sff_wait_ready - sleep until BSY clears, or timeout
+ *	@link: SFF link to wait ready status for
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Sleep until ATA Status register bit BSY clears, or timeout
+ *	occurs.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
+{
+	return ata_wait_ready(link, deadline, ata_sff_check_ready);
+}
+
+/**
+ *	ata_sff_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *
+ *	Use the method defined in the ATA specification to
+ *	make either device 0, or device 1, active on the
+ *	ATA channel.  Works with both PIO and MMIO.
+ *
+ *	May be used as the dev_select() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
+{
+	u8 tmp;
+
+	if (device == 0)
+		tmp = ATA_DEVICE_OBS;
+	else
+		tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+	iowrite8(tmp, ap->ioaddr.device_addr);
+	ata_sff_pause(ap);	/* needed; also flushes, for mmio */
+}
+
+/**
+ *	ata_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *	@wait: non-zero to wait for Status register BSY bit to clear
+ *	@can_sleep: non-zero if context allows sleeping
+ *
+ *	Use the method defined in the ATA specification to
+ *	make either device 0, or device 1, active on the
+ *	ATA channel.
+ *
+ *	This is a high-level version of ata_sff_dev_select(), which
+ *	additionally provides the services of inserting the proper
+ *	pauses and status polling, where needed.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+void ata_dev_select(struct ata_port *ap, unsigned int device,
+			   unsigned int wait, unsigned int can_sleep)
+{
+	if (ata_msg_probe(ap))
+		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
+				"device %u, wait %u\n", device, wait);
+
+	if (wait)
+		ata_wait_idle(ap);
+
+	ap->ops->sff_dev_select(ap, device);
+
+	if (wait) {
+		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
+			msleep(150);
+		ata_wait_idle(ap);
+	}
+}
+
+/**
+ *	ata_sff_irq_on - Enable interrupts on a port.
  *	@ap: Port on which interrupts are enabled.
  *
  *	Enable interrupts on a legacy IDE device using MMIO or PIO,
@@ -48,7 +414,7 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-u8 ata_irq_on(struct ata_port *ap)
+u8 ata_sff_irq_on(struct ata_port *ap)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 	u8 tmp;
@@ -60,13 +426,34 @@
 		iowrite8(ap->ctl, ioaddr->ctl_addr);
 	tmp = ata_wait_idle(ap);
 
-	ap->ops->irq_clear(ap);
+	ap->ops->sff_irq_clear(ap);
 
 	return tmp;
 }
 
 /**
- *	ata_tf_load - send taskfile registers to host controller
+ *	ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	Clear interrupt and error flags in DMA status register.
+ *
+ *	May be used as the irq_clear() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_sff_irq_clear(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	if (!mmio)
+		return;
+
+	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
+}
+
+/**
+ *	ata_sff_tf_load - send taskfile registers to host controller
  *	@ap: Port to which output is sent
  *	@tf: ATA taskfile register set
  *
@@ -75,8 +462,7 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-
-void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -126,26 +512,7 @@
 }
 
 /**
- *	ata_exec_command - issue ATA command to host controller
- *	@ap: port to which command is being issued
- *	@tf: ATA taskfile register set
- *
- *	Issues ATA command, with proper synchronization with interrupt
- *	handler / other threads.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- */
-void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
-{
-	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
-	iowrite8(tf->command, ap->ioaddr.command_addr);
-	ata_pause(ap);
-}
-
-/**
- *	ata_tf_read - input device's ATA taskfile shadow registers
+ *	ata_sff_tf_read - input device's ATA taskfile shadow registers
  *	@ap: Port from which input is read
  *	@tf: ATA taskfile register set for storing input
  *
@@ -157,11 +524,11 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 
-	tf->command = ata_check_status(ap);
+	tf->command = ata_sff_check_status(ap);
 	tf->feature = ioread8(ioaddr->error_addr);
 	tf->nsect = ioread8(ioaddr->nsect_addr);
 	tf->lbal = ioread8(ioaddr->lbal_addr);
@@ -185,40 +552,1578 @@
 }
 
 /**
- *	ata_check_status - Read device status reg & clear interrupt
- *	@ap: port where the device is
+ *	ata_sff_exec_command - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
  *
- *	Reads ATA taskfile status register for currently-selected device
- *	and return its value. This also clears pending interrupts
- *      from this device
+ *	Issues ATA command, with proper synchronization with interrupt
+ *	handler / other threads.
  *
  *	LOCKING:
- *	Inherited from caller.
+ *	spin_lock_irqsave(host lock)
  */
-u8 ata_check_status(struct ata_port *ap)
+void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
 {
-	return ioread8(ap->ioaddr.status_addr);
+	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+	iowrite8(tf->command, ap->ioaddr.command_addr);
+	ata_sff_pause(ap);
 }
 
 /**
- *	ata_altstatus - Read device alternate status reg
- *	@ap: port where the device is
+ *	ata_tf_to_host - issue ATA taskfile to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
  *
- *	Reads ATA taskfile alternate status register for
- *	currently-selected device and return its value.
+ *	Issues ATA taskfile register set to ATA host controller,
+ *	with proper synchronization with interrupt handler and
+ *	other threads.
  *
- *	Note: may NOT be used as the check_altstatus() entry in
- *	ata_port_operations.
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static inline void ata_tf_to_host(struct ata_port *ap,
+				  const struct ata_taskfile *tf)
+{
+	ap->ops->sff_tf_load(ap, tf);
+	ap->ops->sff_exec_command(ap, tf);
+}
+
+/**
+ *	ata_sff_data_xfer - Transfer data by PIO
+ *	@dev: device to target
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@rw: read/write
+ *
+ *	Transfer data from/to the device data register by PIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	Bytes consumed.
+ */
+unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
+			       unsigned int buflen, int rw)
+{
+	struct ata_port *ap = dev->link->ap;
+	void __iomem *data_addr = ap->ioaddr.data_addr;
+	unsigned int words = buflen >> 1;
+
+	/* Transfer multiple of 2 bytes */
+	if (rw == READ)
+		ioread16_rep(data_addr, buf, words);
+	else
+		iowrite16_rep(data_addr, buf, words);
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		__le16 align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (rw == READ) {
+			align_buf[0] = cpu_to_le16(ioread16(data_addr));
+			memcpy(trailing_buf, align_buf, 1);
+		} else {
+			memcpy(align_buf, trailing_buf, 1);
+			iowrite16(le16_to_cpu(align_buf[0]), data_addr);
+		}
+		words++;
+	}
+
+	return words << 1;
+}
+
+/**
+ *	ata_sff_data_xfer_noirq - Transfer data by PIO
+ *	@dev: device to target
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@rw: read/write
+ *
+ *	Transfer data from/to the device data register by PIO. Do the
+ *	transfer with interrupts disabled.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	Bytes consumed.
+ */
+unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
+				     unsigned int buflen, int rw)
+{
+	unsigned long flags;
+	unsigned int consumed;
+
+	local_irq_save(flags);
+	consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
+	local_irq_restore(flags);
+
+	return consumed;
+}
+
+/**
+ *	ata_pio_sector - Transfer a sector of data.
+ *	@qc: Command on going
+ *
+ *	Transfer qc->sect_size bytes of data from/to the ATA device.
  *
  *	LOCKING:
  *	Inherited from caller.
  */
-u8 ata_altstatus(struct ata_port *ap)
+static void ata_pio_sector(struct ata_queued_cmd *qc)
 {
-	if (ap->ops->check_altstatus)
-		return ap->ops->check_altstatus(ap);
+	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+	struct ata_port *ap = qc->ap;
+	struct page *page;
+	unsigned int offset;
+	unsigned char *buf;
 
-	return ioread8(ap->ioaddr.altstatus_addr);
+	if (qc->curbytes == qc->nbytes - qc->sect_size)
+		ap->hsm_task_state = HSM_ST_LAST;
+
+	page = sg_page(qc->cursg);
+	offset = qc->cursg->offset + qc->cursg_ofs;
+
+	/* get the current page and offset */
+	page = nth_page(page, (offset >> PAGE_SHIFT));
+	offset %= PAGE_SIZE;
+
+	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	if (PageHighMem(page)) {
+		unsigned long flags;
+
+		/* FIXME: use a bounce buffer */
+		local_irq_save(flags);
+		buf = kmap_atomic(page, KM_IRQ0);
+
+		/* do the actual data transfer */
+		ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+				       do_write);
+
+		kunmap_atomic(buf, KM_IRQ0);
+		local_irq_restore(flags);
+	} else {
+		buf = page_address(page);
+		ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+				       do_write);
+	}
+
+	qc->curbytes += qc->sect_size;
+	qc->cursg_ofs += qc->sect_size;
+
+	if (qc->cursg_ofs == qc->cursg->length) {
+		qc->cursg = sg_next(qc->cursg);
+		qc->cursg_ofs = 0;
+	}
+}
+
+/**
+ *	ata_pio_sectors - Transfer one or many sectors.
+ *	@qc: Command on going
+ *
+ *	Transfer one or many sectors of data from/to the
+ *	ATA device for the DRQ request.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static void ata_pio_sectors(struct ata_queued_cmd *qc)
+{
+	if (is_multi_taskfile(&qc->tf)) {
+		/* READ/WRITE MULTIPLE */
+		unsigned int nsect;
+
+		WARN_ON(qc->dev->multi_count == 0);
+
+		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
+			    qc->dev->multi_count);
+		while (nsect--)
+			ata_pio_sector(qc);
+	} else
+		ata_pio_sector(qc);
+
+	ata_sff_altstatus(qc->ap); /* flush */
+}
+
+/**
+ *	atapi_send_cdb - Write CDB bytes to hardware
+ *	@ap: Port to which ATAPI device is attached.
+ *	@qc: Taskfile currently active
+ *
+ *	When device has indicated its readiness to accept
+ *	a CDB, this function is called.  Send the CDB.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+	/* send SCSI cdb */
+	DPRINTK("send cdb\n");
+	WARN_ON(qc->dev->cdb_len < 12);
+
+	ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
+	ata_sff_altstatus(ap); /* flush */
+
+	switch (qc->tf.protocol) {
+	case ATAPI_PROT_PIO:
+		ap->hsm_task_state = HSM_ST;
+		break;
+	case ATAPI_PROT_NODATA:
+		ap->hsm_task_state = HSM_ST_LAST;
+		break;
+	case ATAPI_PROT_DMA:
+		ap->hsm_task_state = HSM_ST_LAST;
+		/* initiate bmdma */
+		ap->ops->bmdma_start(qc);
+		break;
+	}
+}
+
+/**
+ *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ *	@qc: Command on going
+ *	@bytes: number of bytes
+ *
+ *	Transfer Transfer data from/to the ATAPI device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ */
+static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+{
+	int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
+	struct ata_port *ap = qc->ap;
+	struct ata_device *dev = qc->dev;
+	struct ata_eh_info *ehi = &dev->link->eh_info;
+	struct scatterlist *sg;
+	struct page *page;
+	unsigned char *buf;
+	unsigned int offset, count, consumed;
+
+next_sg:
+	sg = qc->cursg;
+	if (unlikely(!sg)) {
+		ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
+				  "buf=%u cur=%u bytes=%u",
+				  qc->nbytes, qc->curbytes, bytes);
+		return -1;
+	}
+
+	page = sg_page(sg);
+	offset = sg->offset + qc->cursg_ofs;
+
+	/* get the current page and offset */
+	page = nth_page(page, (offset >> PAGE_SHIFT));
+	offset %= PAGE_SIZE;
+
+	/* don't overrun current sg */
+	count = min(sg->length - qc->cursg_ofs, bytes);
+
+	/* don't cross page boundaries */
+	count = min(count, (unsigned int)PAGE_SIZE - offset);
+
+	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	if (PageHighMem(page)) {
+		unsigned long flags;
+
+		/* FIXME: use bounce buffer */
+		local_irq_save(flags);
+		buf = kmap_atomic(page, KM_IRQ0);
+
+		/* do the actual data transfer */
+		consumed = ap->ops->sff_data_xfer(dev,  buf + offset, count, rw);
+
+		kunmap_atomic(buf, KM_IRQ0);
+		local_irq_restore(flags);
+	} else {
+		buf = page_address(page);
+		consumed = ap->ops->sff_data_xfer(dev,  buf + offset, count, rw);
+	}
+
+	bytes -= min(bytes, consumed);
+	qc->curbytes += count;
+	qc->cursg_ofs += count;
+
+	if (qc->cursg_ofs == sg->length) {
+		qc->cursg = sg_next(qc->cursg);
+		qc->cursg_ofs = 0;
+	}
+
+	/* consumed can be larger than count only for the last transfer */
+	WARN_ON(qc->cursg && count != consumed);
+
+	if (bytes)
+		goto next_sg;
+	return 0;
+}
+
+/**
+ *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ *	@qc: Command on going
+ *
+ *	Transfer Transfer data from/to the ATAPI device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static void atapi_pio_bytes(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *dev = qc->dev;
+	struct ata_eh_info *ehi = &dev->link->eh_info;
+	unsigned int ireason, bc_lo, bc_hi, bytes;
+	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
+
+	/* Abuse qc->result_tf for temp storage of intermediate TF
+	 * here to save some kernel stack usage.
+	 * For normal completion, qc->result_tf is not relevant. For
+	 * error, qc->result_tf is later overwritten by ata_qc_complete().
+	 * So, the correctness of qc->result_tf is not affected.
+	 */
+	ap->ops->sff_tf_read(ap, &qc->result_tf);
+	ireason = qc->result_tf.nsect;
+	bc_lo = qc->result_tf.lbam;
+	bc_hi = qc->result_tf.lbah;
+	bytes = (bc_hi << 8) | bc_lo;
+
+	/* shall be cleared to zero, indicating xfer of data */
+	if (unlikely(ireason & (1 << 0)))
+		goto atapi_check;
+
+	/* make sure transfer direction matches expected */
+	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
+	if (unlikely(do_write != i_write))
+		goto atapi_check;
+
+	if (unlikely(!bytes))
+		goto atapi_check;
+
+	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
+
+	if (unlikely(__atapi_pio_bytes(qc, bytes)))
+		goto err_out;
+	ata_sff_altstatus(ap); /* flush */
+
+	return;
+
+ atapi_check:
+	ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
+			  ireason, bytes);
+ err_out:
+	qc->err_mask |= AC_ERR_HSM;
+	ap->hsm_task_state = HSM_ST_ERR;
+}
+
+/**
+ *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
+ *	@ap: the target ata_port
+ *	@qc: qc on going
+ *
+ *	RETURNS:
+ *	1 if ok in workqueue, 0 otherwise.
+ */
+static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+	if (qc->tf.flags & ATA_TFLAG_POLLING)
+		return 1;
+
+	if (ap->hsm_task_state == HSM_ST_FIRST) {
+		if (qc->tf.protocol == ATA_PROT_PIO &&
+		    (qc->tf.flags & ATA_TFLAG_WRITE))
+		    return 1;
+
+		if (ata_is_atapi(qc->tf.protocol) &&
+		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+			return 1;
+	}
+
+	return 0;
+}
+
+/**
+ *	ata_hsm_qc_complete - finish a qc running on standard HSM
+ *	@qc: Command to complete
+ *	@in_wq: 1 if called from workqueue, 0 otherwise
+ *
+ *	Finish @qc which is running on standard HSM.
+ *
+ *	LOCKING:
+ *	If @in_wq is zero, spin_lock_irqsave(host lock).
+ *	Otherwise, none on entry and grabs host lock.
+ */
+static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned long flags;
+
+	if (ap->ops->error_handler) {
+		if (in_wq) {
+			spin_lock_irqsave(ap->lock, flags);
+
+			/* EH might have kicked in while host lock is
+			 * released.
+			 */
+			qc = ata_qc_from_tag(ap, qc->tag);
+			if (qc) {
+				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
+					ap->ops->sff_irq_on(ap);
+					ata_qc_complete(qc);
+				} else
+					ata_port_freeze(ap);
+			}
+
+			spin_unlock_irqrestore(ap->lock, flags);
+		} else {
+			if (likely(!(qc->err_mask & AC_ERR_HSM)))
+				ata_qc_complete(qc);
+			else
+				ata_port_freeze(ap);
+		}
+	} else {
+		if (in_wq) {
+			spin_lock_irqsave(ap->lock, flags);
+			ap->ops->sff_irq_on(ap);
+			ata_qc_complete(qc);
+			spin_unlock_irqrestore(ap->lock, flags);
+		} else
+			ata_qc_complete(qc);
+	}
+}
+
+/**
+ *	ata_sff_hsm_move - move the HSM to the next state.
+ *	@ap: the target ata_port
+ *	@qc: qc on going
+ *	@status: current device status
+ *	@in_wq: 1 if called from workqueue, 0 otherwise
+ *
+ *	RETURNS:
+ *	1 when poll next status needed, 0 otherwise.
+ */
+int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+		     u8 status, int in_wq)
+{
+	unsigned long flags = 0;
+	int poll_next;
+
+	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+
+	/* Make sure ata_sff_qc_issue() does not throw things
+	 * like DMA polling into the workqueue. Notice that
+	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
+	 */
+	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
+
+fsm_start:
+	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
+		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
+
+	switch (ap->hsm_task_state) {
+	case HSM_ST_FIRST:
+		/* Send first data block or PACKET CDB */
+
+		/* If polling, we will stay in the work queue after
+		 * sending the data. Otherwise, interrupt handler
+		 * takes over after sending the data.
+		 */
+		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
+
+		/* check device status */
+		if (unlikely((status & ATA_DRQ) == 0)) {
+			/* handle BSY=0, DRQ=0 as error */
+			if (likely(status & (ATA_ERR | ATA_DF)))
+				/* device stops HSM for abort/error */
+				qc->err_mask |= AC_ERR_DEV;
+			else
+				/* HSM violation. Let EH handle this */
+				qc->err_mask |= AC_ERR_HSM;
+
+			ap->hsm_task_state = HSM_ST_ERR;
+			goto fsm_start;
+		}
+
+		/* Device should not ask for data transfer (DRQ=1)
+		 * when it finds something wrong.
+		 * We ignore DRQ here and stop the HSM by
+		 * changing hsm_task_state to HSM_ST_ERR and
+		 * let the EH abort the command or reset the device.
+		 */
+		if (unlikely(status & (ATA_ERR | ATA_DF))) {
+			/* Some ATAPI tape drives forget to clear the ERR bit
+			 * when doing the next command (mostly request sense).
+			 * We ignore ERR here to workaround and proceed sending
+			 * the CDB.
+			 */
+			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
+				ata_port_printk(ap, KERN_WARNING,
+						"DRQ=1 with device error, "
+						"dev_stat 0x%X\n", status);
+				qc->err_mask |= AC_ERR_HSM;
+				ap->hsm_task_state = HSM_ST_ERR;
+				goto fsm_start;
+			}
+		}
+
+		/* Send the CDB (atapi) or the first data block (ata pio out).
+		 * During the state transition, interrupt handler shouldn't
+		 * be invoked before the data transfer is complete and
+		 * hsm_task_state is changed. Hence, the following locking.
+		 */
+		if (in_wq)
+			spin_lock_irqsave(ap->lock, flags);
+
+		if (qc->tf.protocol == ATA_PROT_PIO) {
+			/* PIO data out protocol.
+			 * send first data block.
+			 */
+
+			/* ata_pio_sectors() might change the state
+			 * to HSM_ST_LAST. so, the state is changed here
+			 * before ata_pio_sectors().
+			 */
+			ap->hsm_task_state = HSM_ST;
+			ata_pio_sectors(qc);
+		} else
+			/* send CDB */
+			atapi_send_cdb(ap, qc);
+
+		if (in_wq)
+			spin_unlock_irqrestore(ap->lock, flags);
+
+		/* if polling, ata_pio_task() handles the rest.
+		 * otherwise, interrupt handler takes over from here.
+		 */
+		break;
+
+	case HSM_ST:
+		/* complete command or read/write the data register */
+		if (qc->tf.protocol == ATAPI_PROT_PIO) {
+			/* ATAPI PIO protocol */
+			if ((status & ATA_DRQ) == 0) {
+				/* No more data to transfer or device error.
+				 * Device error will be tagged in HSM_ST_LAST.
+				 */
+				ap->hsm_task_state = HSM_ST_LAST;
+				goto fsm_start;
+			}
+
+			/* Device should not ask for data transfer (DRQ=1)
+			 * when it finds something wrong.
+			 * We ignore DRQ here and stop the HSM by
+			 * changing hsm_task_state to HSM_ST_ERR and
+			 * let the EH abort the command or reset the device.
+			 */
+			if (unlikely(status & (ATA_ERR | ATA_DF))) {
+				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
+						"device error, dev_stat 0x%X\n",
+						status);
+				qc->err_mask |= AC_ERR_HSM;
+				ap->hsm_task_state = HSM_ST_ERR;
+				goto fsm_start;
+			}
+
+			atapi_pio_bytes(qc);
+
+			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
+				/* bad ireason reported by device */
+				goto fsm_start;
+
+		} else {
+			/* ATA PIO protocol */
+			if (unlikely((status & ATA_DRQ) == 0)) {
+				/* handle BSY=0, DRQ=0 as error */
+				if (likely(status & (ATA_ERR | ATA_DF)))
+					/* device stops HSM for abort/error */
+					qc->err_mask |= AC_ERR_DEV;
+				else
+					/* HSM violation. Let EH handle this.
+					 * Phantom devices also trigger this
+					 * condition.  Mark hint.
+					 */
+					qc->err_mask |= AC_ERR_HSM |
+							AC_ERR_NODEV_HINT;
+
+				ap->hsm_task_state = HSM_ST_ERR;
+				goto fsm_start;
+			}
+
+			/* For PIO reads, some devices may ask for
+			 * data transfer (DRQ=1) alone with ERR=1.
+			 * We respect DRQ here and transfer one
+			 * block of junk data before changing the
+			 * hsm_task_state to HSM_ST_ERR.
+			 *
+			 * For PIO writes, ERR=1 DRQ=1 doesn't make
+			 * sense since the data block has been
+			 * transferred to the device.
+			 */
+			if (unlikely(status & (ATA_ERR | ATA_DF))) {
+				/* data might be corrputed */
+				qc->err_mask |= AC_ERR_DEV;
+
+				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
+					ata_pio_sectors(qc);
+					status = ata_wait_idle(ap);
+				}
+
+				if (status & (ATA_BUSY | ATA_DRQ))
+					qc->err_mask |= AC_ERR_HSM;
+
+				/* ata_pio_sectors() might change the
+				 * state to HSM_ST_LAST. so, the state
+				 * is changed after ata_pio_sectors().
+				 */
+				ap->hsm_task_state = HSM_ST_ERR;
+				goto fsm_start;
+			}
+
+			ata_pio_sectors(qc);
+
+			if (ap->hsm_task_state == HSM_ST_LAST &&
+			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
+				/* all data read */
+				status = ata_wait_idle(ap);
+				goto fsm_start;
+			}
+		}
+
+		poll_next = 1;
+		break;
+
+	case HSM_ST_LAST:
+		if (unlikely(!ata_ok(status))) {
+			qc->err_mask |= __ac_err_mask(status);
+			ap->hsm_task_state = HSM_ST_ERR;
+			goto fsm_start;
+		}
+
+		/* no more data to transfer */
+		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
+			ap->print_id, qc->dev->devno, status);
+
+		WARN_ON(qc->err_mask);
+
+		ap->hsm_task_state = HSM_ST_IDLE;
+
+		/* complete taskfile transaction */
+		ata_hsm_qc_complete(qc, in_wq);
+
+		poll_next = 0;
+		break;
+
+	case HSM_ST_ERR:
+		/* make sure qc->err_mask is available to
+		 * know what's wrong and recover
+		 */
+		WARN_ON(qc->err_mask == 0);
+
+		ap->hsm_task_state = HSM_ST_IDLE;
+
+		/* complete taskfile transaction */
+		ata_hsm_qc_complete(qc, in_wq);
+
+		poll_next = 0;
+		break;
+	default:
+		poll_next = 0;
+		BUG();
+	}
+
+	return poll_next;
+}
+
+void ata_pio_task(struct work_struct *work)
+{
+	struct ata_port *ap =
+		container_of(work, struct ata_port, port_task.work);
+	struct ata_queued_cmd *qc = ap->port_task_data;
+	u8 status;
+	int poll_next;
+
+fsm_start:
+	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
+
+	/*
+	 * This is purely heuristic.  This is a fast path.
+	 * Sometimes when we enter, BSY will be cleared in
+	 * a chk-status or two.  If not, the drive is probably seeking
+	 * or something.  Snooze for a couple msecs, then
+	 * chk-status again.  If still busy, queue delayed work.
+	 */
+	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+	if (status & ATA_BUSY) {
+		msleep(2);
+		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+		if (status & ATA_BUSY) {
+			ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
+			return;
+		}
+	}
+
+	/* move the HSM */
+	poll_next = ata_sff_hsm_move(ap, qc, status, 1);
+
+	/* another command or interrupt handler
+	 * may be running at this point.
+	 */
+	if (poll_next)
+		goto fsm_start;
+}
+
+/**
+ *	ata_sff_qc_issue - issue taskfile to device in proto-dependent manner
+ *	@qc: command to issue to device
+ *
+ *	Using various libata functions and hooks, this function
+ *	starts an ATA command.  ATA commands are grouped into
+ *	classes called "protocols", and issuing each type of protocol
+ *	is slightly different.
+ *
+ *	May be used as the qc_issue() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Zero on success, AC_ERR_* mask on failure
+ */
+unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* Use polling pio if the LLD doesn't handle
+	 * interrupt driven pio and atapi CDB interrupt.
+	 */
+	if (ap->flags & ATA_FLAG_PIO_POLLING) {
+		switch (qc->tf.protocol) {
+		case ATA_PROT_PIO:
+		case ATA_PROT_NODATA:
+		case ATAPI_PROT_PIO:
+		case ATAPI_PROT_NODATA:
+			qc->tf.flags |= ATA_TFLAG_POLLING;
+			break;
+		case ATAPI_PROT_DMA:
+			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
+				/* see ata_dma_blacklisted() */
+				BUG();
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* select the device */
+	ata_dev_select(ap, qc->dev->devno, 1, 0);
+
+	/* start the command */
+	switch (qc->tf.protocol) {
+	case ATA_PROT_NODATA:
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			ata_qc_set_polling(qc);
+
+		ata_tf_to_host(ap, &qc->tf);
+		ap->hsm_task_state = HSM_ST_LAST;
+
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			ata_pio_queue_task(ap, qc, 0);
+
+		break;
+
+	case ATA_PROT_DMA:
+		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+
+		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
+		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
+		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
+		ap->hsm_task_state = HSM_ST_LAST;
+		break;
+
+	case ATA_PROT_PIO:
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			ata_qc_set_polling(qc);
+
+		ata_tf_to_host(ap, &qc->tf);
+
+		if (qc->tf.flags & ATA_TFLAG_WRITE) {
+			/* PIO data out protocol */
+			ap->hsm_task_state = HSM_ST_FIRST;
+			ata_pio_queue_task(ap, qc, 0);
+
+			/* always send first data block using
+			 * the ata_pio_task() codepath.
+			 */
+		} else {
+			/* PIO data in protocol */
+			ap->hsm_task_state = HSM_ST;
+
+			if (qc->tf.flags & ATA_TFLAG_POLLING)
+				ata_pio_queue_task(ap, qc, 0);
+
+			/* if polling, ata_pio_task() handles the rest.
+			 * otherwise, interrupt handler takes over from here.
+			 */
+		}
+
+		break;
+
+	case ATAPI_PROT_PIO:
+	case ATAPI_PROT_NODATA:
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			ata_qc_set_polling(qc);
+
+		ata_tf_to_host(ap, &qc->tf);
+
+		ap->hsm_task_state = HSM_ST_FIRST;
+
+		/* send cdb by polling if no cdb interrupt */
+		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
+		    (qc->tf.flags & ATA_TFLAG_POLLING))
+			ata_pio_queue_task(ap, qc, 0);
+		break;
+
+	case ATAPI_PROT_DMA:
+		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+
+		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
+		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
+		ap->hsm_task_state = HSM_ST_FIRST;
+
+		/* send cdb by polling if no cdb interrupt */
+		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+			ata_pio_queue_task(ap, qc, 0);
+		break;
+
+	default:
+		WARN_ON(1);
+		return AC_ERR_SYSTEM;
+	}
+
+	return 0;
+}
+
+/**
+ *	ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
+ *	@qc: qc to fill result TF for
+ *
+ *	@qc is finished and result TF needs to be filled.  Fill it
+ *	using ->sff_tf_read.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	true indicating that result TF is successfully filled.
+ */
+bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
+	return true;
+}
+
+/**
+ *	ata_sff_host_intr - Handle host interrupt for given (port, task)
+ *	@ap: Port on which interrupt arrived (possibly...)
+ *	@qc: Taskfile currently active in engine
+ *
+ *	Handle host interrupt for given queued command.  Currently,
+ *	only DMA interrupts are handled.  All other commands are
+ *	handled via polling with interrupts disabled (nIEN bit).
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	One if interrupt was handled, zero if not (shared irq).
+ */
+inline unsigned int ata_sff_host_intr(struct ata_port *ap,
+				      struct ata_queued_cmd *qc)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	u8 status, host_stat = 0;
+
+	VPRINTK("ata%u: protocol %d task_state %d\n",
+		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
+
+	/* Check whether we are expecting interrupt in this state */
+	switch (ap->hsm_task_state) {
+	case HSM_ST_FIRST:
+		/* Some pre-ATAPI-4 devices assert INTRQ
+		 * at this state when ready to receive CDB.
+		 */
+
+		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+		 * The flag was turned on only for atapi devices.  No
+		 * need to check ata_is_atapi(qc->tf.protocol) again.
+		 */
+		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+			goto idle_irq;
+		break;
+	case HSM_ST_LAST:
+		if (qc->tf.protocol == ATA_PROT_DMA ||
+		    qc->tf.protocol == ATAPI_PROT_DMA) {
+			/* check status of DMA engine */
+			host_stat = ap->ops->bmdma_status(ap);
+			VPRINTK("ata%u: host_stat 0x%X\n",
+				ap->print_id, host_stat);
+
+			/* if it's not our irq... */
+			if (!(host_stat & ATA_DMA_INTR))
+				goto idle_irq;
+
+			/* before we do anything else, clear DMA-Start bit */
+			ap->ops->bmdma_stop(qc);
+
+			if (unlikely(host_stat & ATA_DMA_ERR)) {
+				/* error when transfering data to/from memory */
+				qc->err_mask |= AC_ERR_HOST_BUS;
+				ap->hsm_task_state = HSM_ST_ERR;
+			}
+		}
+		break;
+	case HSM_ST:
+		break;
+	default:
+		goto idle_irq;
+	}
+
+	/* check altstatus */
+	status = ata_sff_altstatus(ap);
+	if (status & ATA_BUSY)
+		goto idle_irq;
+
+	/* check main status, clearing INTRQ */
+	status = ap->ops->sff_check_status(ap);
+	if (unlikely(status & ATA_BUSY))
+		goto idle_irq;
+
+	/* ack bmdma irq events */
+	ap->ops->sff_irq_clear(ap);
+
+	ata_sff_hsm_move(ap, qc, status, 0);
+
+	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+				       qc->tf.protocol == ATAPI_PROT_DMA))
+		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
+	return 1;	/* irq handled */
+
+idle_irq:
+	ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+	if ((ap->stats.idle_irq % 1000) == 0) {
+		ap->ops->sff_check_status(ap);
+		ap->ops->sff_irq_clear(ap);
+		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+		return 1;
+	}
+#endif
+	return 0;	/* irq not handled */
+}
+
+/**
+ *	ata_sff_interrupt - Default ATA host interrupt handler
+ *	@irq: irq line (unused)
+ *	@dev_instance: pointer to our ata_host information structure
+ *
+ *	Default interrupt handler for PCI IDE devices.  Calls
+ *	ata_sff_host_intr() for each port that is not disabled.
+ *
+ *	LOCKING:
+ *	Obtains host lock during operation.
+ *
+ *	RETURNS:
+ *	IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	unsigned int i;
+	unsigned int handled = 0;
+	unsigned long flags;
+
+	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
+	spin_lock_irqsave(&host->lock, flags);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap;
+
+		ap = host->ports[i];
+		if (ap &&
+		    !(ap->flags & ATA_FLAG_DISABLED)) {
+			struct ata_queued_cmd *qc;
+
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
+			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
+			    (qc->flags & ATA_QCFLAG_ACTIVE))
+				handled |= ata_sff_host_intr(ap, qc);
+		}
+	}
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+/**
+ *	ata_sff_freeze - Freeze SFF controller port
+ *	@ap: port to freeze
+ *
+ *	Freeze BMDMA controller port.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_sff_freeze(struct ata_port *ap)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	ap->ctl |= ATA_NIEN;
+	ap->last_ctl = ap->ctl;
+
+	if (ioaddr->ctl_addr)
+		iowrite8(ap->ctl, ioaddr->ctl_addr);
+
+	/* Under certain circumstances, some controllers raise IRQ on
+	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
+	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
+	 */
+	ap->ops->sff_check_status(ap);
+
+	ap->ops->sff_irq_clear(ap);
+}
+
+/**
+ *	ata_sff_thaw - Thaw SFF controller port
+ *	@ap: port to thaw
+ *
+ *	Thaw SFF controller port.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_sff_thaw(struct ata_port *ap)
+{
+	/* clear & re-enable interrupts */
+	ap->ops->sff_check_status(ap);
+	ap->ops->sff_irq_clear(ap);
+	ap->ops->sff_irq_on(ap);
+}
+
+/**
+ *	ata_sff_prereset - prepare SFF link for reset
+ *	@link: SFF link to be reset
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	SFF link @link is about to be reset.  Initialize it.  It first
+ *	calls ata_std_prereset() and wait for !BSY if the port is
+ *	being softreset.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_eh_context *ehc = &link->eh_context;
+	int rc;
+
+	rc = ata_std_prereset(link, deadline);
+	if (rc)
+		return rc;
+
+	/* if we're about to do hardreset, nothing more to do */
+	if (ehc->i.action & ATA_EH_HARDRESET)
+		return 0;
+
+	/* wait for !BSY if we don't know that no device is attached */
+	if (!ata_link_offline(link)) {
+		rc = ata_sff_wait_ready(link, deadline);
+		if (rc && rc != -ENODEV) {
+			ata_link_printk(link, KERN_WARNING, "device not ready "
+					"(errno=%d), forcing hardreset\n", rc);
+			ehc->i.action |= ATA_EH_HARDRESET;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *	ata_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	This technique was originally described in
+ *	Hale Landis's ATADRVR (www.ata-atapi.com), and
+ *	later found its way into the ATA/ATAPI spec.
+ *
+ *	Write a pattern to the ATA shadow registers,
+ *	and if a device is present, it will respond by
+ *	correctly storing and echoing back the
+ *	ATA shadow register contents.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 nsect, lbal;
+
+	ap->ops->sff_dev_select(ap, device);
+
+	iowrite8(0x55, ioaddr->nsect_addr);
+	iowrite8(0xaa, ioaddr->lbal_addr);
+
+	iowrite8(0xaa, ioaddr->nsect_addr);
+	iowrite8(0x55, ioaddr->lbal_addr);
+
+	iowrite8(0x55, ioaddr->nsect_addr);
+	iowrite8(0xaa, ioaddr->lbal_addr);
+
+	nsect = ioread8(ioaddr->nsect_addr);
+	lbal = ioread8(ioaddr->lbal_addr);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return 1;	/* we found a device */
+
+	return 0;		/* nothing found */
+}
+
+/**
+ *	ata_sff_dev_classify - Parse returned ATA device signature
+ *	@dev: ATA device to classify (starting at zero)
+ *	@present: device seems present
+ *	@r_err: Value of error register on completion
+ *
+ *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
+ *	an ATA/ATAPI-defined set of values is placed in the ATA
+ *	shadow registers, indicating the results of device detection
+ *	and diagnostics.
+ *
+ *	Select the ATA device, and read the values from the ATA shadow
+ *	registers.  Then parse according to the Error register value,
+ *	and the spec-defined values examined by ata_dev_classify().
+ *
+ *	LOCKING:
+ *	caller.
+ *
+ *	RETURNS:
+ *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
+ */
+unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
+				  u8 *r_err)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct ata_taskfile tf;
+	unsigned int class;
+	u8 err;
+
+	ap->ops->sff_dev_select(ap, dev->devno);
+
+	memset(&tf, 0, sizeof(tf));
+
+	ap->ops->sff_tf_read(ap, &tf);
+	err = tf.feature;
+	if (r_err)
+		*r_err = err;
+
+	/* see if device passed diags: continue and warn later */
+	if (err == 0)
+		/* diagnostic fail : do nothing _YET_ */
+		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
+	else if (err == 1)
+		/* do nothing */ ;
+	else if ((dev->devno == 0) && (err == 0x81))
+		/* do nothing */ ;
+	else
+		return ATA_DEV_NONE;
+
+	/* determine if device is ATA or ATAPI */
+	class = ata_dev_classify(&tf);
+
+	if (class == ATA_DEV_UNKNOWN) {
+		/* If the device failed diagnostic, it's likely to
+		 * have reported incorrect device signature too.
+		 * Assume ATA device if the device seems present but
+		 * device signature is invalid with diagnostic
+		 * failure.
+		 */
+		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
+			class = ATA_DEV_ATA;
+		else
+			class = ATA_DEV_NONE;
+	} else if ((class == ATA_DEV_ATA) &&
+		   (ap->ops->sff_check_status(ap) == 0))
+		class = ATA_DEV_NONE;
+
+	return class;
+}
+
+/**
+ *	ata_sff_wait_after_reset - wait for devices to become ready after reset
+ *	@link: SFF link which is just reset
+ *	@devmask: mask of present devices
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Wait devices attached to SFF @link to become ready after
+ *	reset.  It contains preceding 150ms wait to avoid accessing TF
+ *	status register too early.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -ENODEV if some or all of devices in @devmask
+ *	don't seem to exist.  -errno on other errors.
+ */
+int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
+			     unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int dev0 = devmask & (1 << 0);
+	unsigned int dev1 = devmask & (1 << 1);
+	int rc, ret = 0;
+
+	msleep(ATA_WAIT_AFTER_RESET_MSECS);
+
+	/* always check readiness of the master device */
+	rc = ata_sff_wait_ready(link, deadline);
+	/* -ENODEV means the odd clown forgot the D7 pulldown resistor
+	 * and TF status is 0xff, bail out on it too.
+	 */
+	if (rc)
+		return rc;
+
+	/* if device 1 was found in ata_devchk, wait for register
+	 * access briefly, then wait for BSY to clear.
+	 */
+	if (dev1) {
+		int i;
+
+		ap->ops->sff_dev_select(ap, 1);
+
+		/* Wait for register access.  Some ATAPI devices fail
+		 * to set nsect/lbal after reset, so don't waste too
+		 * much time on it.  We're gonna wait for !BSY anyway.
+		 */
+		for (i = 0; i < 2; i++) {
+			u8 nsect, lbal;
+
+			nsect = ioread8(ioaddr->nsect_addr);
+			lbal = ioread8(ioaddr->lbal_addr);
+			if ((nsect == 1) && (lbal == 1))
+				break;
+			msleep(50);	/* give drive a breather */
+		}
+
+		rc = ata_sff_wait_ready(link, deadline);
+		if (rc) {
+			if (rc != -ENODEV)
+				return rc;
+			ret = rc;
+		}
+	}
+
+	/* is all this really necessary? */
+	ap->ops->sff_dev_select(ap, 0);
+	if (dev1)
+		ap->ops->sff_dev_select(ap, 1);
+	if (dev0)
+		ap->ops->sff_dev_select(ap, 0);
+
+	return ret;
+}
+
+static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+			     unsigned long deadline)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+	/* software reset.  causes dev0 to be selected */
+	iowrite8(ap->ctl, ioaddr->ctl_addr);
+	udelay(20);	/* FIXME: flush */
+	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+	udelay(20);	/* FIXME: flush */
+	iowrite8(ap->ctl, ioaddr->ctl_addr);
+
+	/* wait the port to become ready */
+	return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+}
+
+/**
+ *	ata_sff_softreset - reset host port via ATA SRST
+ *	@link: ATA link to reset
+ *	@classes: resulting classes of attached devices
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Reset host port using ATA SRST.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
+		      unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+	unsigned int devmask = 0;
+	int rc;
+	u8 err;
+
+	DPRINTK("ENTER\n");
+
+	/* determine if device 0/1 are present */
+	if (ata_devchk(ap, 0))
+		devmask |= (1 << 0);
+	if (slave_possible && ata_devchk(ap, 1))
+		devmask |= (1 << 1);
+
+	/* select device 0 again */
+	ap->ops->sff_dev_select(ap, 0);
+
+	/* issue bus reset */
+	DPRINTK("about to softreset, devmask=%x\n", devmask);
+	rc = ata_bus_softreset(ap, devmask, deadline);
+	/* if link is occupied, -ENODEV too is an error */
+	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
+		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+		return rc;
+	}
+
+	/* determine by signature whether we have ATA or ATAPI devices */
+	classes[0] = ata_sff_dev_classify(&link->device[0],
+					  devmask & (1 << 0), &err);
+	if (slave_possible && err != 0x81)
+		classes[1] = ata_sff_dev_classify(&link->device[1],
+						  devmask & (1 << 1), &err);
+
+	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
+	return 0;
+}
+
+/**
+ *	sata_sff_hardreset - reset host port via SATA phy reset
+ *	@link: link to reset
+ *	@class: resulting class of attached device
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	SATA phy-reset host port using DET bits of SControl register,
+ *	wait for !BSY and classify the attached device.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
+		       unsigned long deadline)
+{
+	struct ata_eh_context *ehc = &link->eh_context;
+	const unsigned long *timing = sata_ehc_deb_timing(ehc);
+	bool online;
+	int rc;
+
+	rc = sata_link_hardreset(link, timing, deadline, &online,
+				 ata_sff_check_ready);
+	if (online)
+		*class = ata_sff_dev_classify(link->device, 1, NULL);
+
+	DPRINTK("EXIT, class=%u\n", *class);
+	return rc;
+}
+
+/**
+ *	ata_sff_postreset - SFF postreset callback
+ *	@link: the target SFF ata_link
+ *	@classes: classes of attached devices
+ *
+ *	This function is invoked after a successful reset.  It first
+ *	calls ata_std_postreset() and performs SFF specific postreset
+ *	processing.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
+{
+	struct ata_port *ap = link->ap;
+
+	ata_std_postreset(link, classes);
+
+	/* is double-select really necessary? */
+	if (classes[0] != ATA_DEV_NONE)
+		ap->ops->sff_dev_select(ap, 1);
+	if (classes[1] != ATA_DEV_NONE)
+		ap->ops->sff_dev_select(ap, 0);
+
+	/* bail out if no device is present */
+	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+		DPRINTK("EXIT, no device\n");
+		return;
+	}
+
+	/* set up device control */
+	if (ap->ioaddr.ctl_addr)
+		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
+}
+
+/**
+ *	ata_sff_error_handler - Stock error handler for BMDMA controller
+ *	@ap: port to handle error for
+ *
+ *	Stock error handler for SFF controller.  It can handle both
+ *	PATA and SATA controllers.  Many controllers should be able to
+ *	use this EH as-is or with some added handling before and
+ *	after.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_sff_error_handler(struct ata_port *ap)
+{
+	ata_reset_fn_t softreset = ap->ops->softreset;
+	ata_reset_fn_t hardreset = ap->ops->hardreset;
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	int thaw = 0;
+
+	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
+	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+		qc = NULL;
+
+	/* reset PIO HSM and stop DMA engine */
+	spin_lock_irqsave(ap->lock, flags);
+
+	ap->hsm_task_state = HSM_ST_IDLE;
+
+	if (ap->ioaddr.bmdma_addr &&
+	    qc && (qc->tf.protocol == ATA_PROT_DMA ||
+		   qc->tf.protocol == ATAPI_PROT_DMA)) {
+		u8 host_stat;
+
+		host_stat = ap->ops->bmdma_status(ap);
+
+		/* BMDMA controllers indicate host bus error by
+		 * setting DMA_ERR bit and timing out.  As it wasn't
+		 * really a timeout event, adjust error mask and
+		 * cancel frozen state.
+		 */
+		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
+			qc->err_mask = AC_ERR_HOST_BUS;
+			thaw = 1;
+		}
+
+		ap->ops->bmdma_stop(qc);
+	}
+
+	ata_sff_altstatus(ap);
+	ap->ops->sff_check_status(ap);
+	ap->ops->sff_irq_clear(ap);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	if (thaw)
+		ata_eh_thaw_port(ap);
+
+	/* PIO and DMA engines have been stopped, perform recovery */
+
+	/* Ignore ata_sff_softreset if ctl isn't accessible and
+	 * built-in hardresets if SCR access isn't available.
+	 */
+	if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
+		softreset = NULL;
+	if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+		hardreset = NULL;
+
+	ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
+		  ap->ops->postreset);
+}
+
+/**
+ *	ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
+ *	@qc: internal command to clean up
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	if (qc->ap->ioaddr.bmdma_addr)
+		ata_bmdma_stop(qc);
+}
+
+/**
+ *	ata_sff_port_start - Set port up for dma.
+ *	@ap: Port to initialize
+ *
+ *	Called just after data structures for each port are
+ *	initialized.  Allocates space for PRD table if the device
+ *	is DMA capable SFF.
+ *
+ *	May be used as the port_start() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+int ata_sff_port_start(struct ata_port *ap)
+{
+	if (ap->ioaddr.bmdma_addr)
+		return ata_port_start(ap);
+	return 0;
+}
+
+/**
+ *	ata_sff_std_ports - initialize ioaddr with standard port offsets.
+ *	@ioaddr: IO address structure to be initialized
+ *
+ *	Utility function which initializes data_addr, error_addr,
+ *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
+ *	device_addr, status_addr, and command_addr to standard offsets
+ *	relative to cmd_addr.
+ *
+ *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
+ */
+void ata_sff_std_ports(struct ata_ioports *ioaddr)
+{
+	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
+	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
+	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
+	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
+	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
+	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
+	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
+	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
+	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
+	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
+}
+
+unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
+				    unsigned long xfer_mask)
+{
+	/* Filter out DMA modes if the device has been configured by
+	   the BIOS as PIO only */
+
+	if (adev->link->ap->ioaddr.bmdma_addr == NULL)
+		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+	return xfer_mask;
 }
 
 /**
@@ -246,7 +2151,7 @@
 	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 
 	/* issue r/w command */
-	ap->ops->exec_command(ap, &qc->tf);
+	ap->ops->sff_exec_command(ap, &qc->tf);
 }
 
 /**
@@ -282,43 +2187,6 @@
 }
 
 /**
- *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
- *	@ap: Port associated with this ATA transaction.
- *
- *	Clear interrupt and error flags in DMA status register.
- *
- *	May be used as the irq_clear() entry in ata_port_operations.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- */
-void ata_bmdma_irq_clear(struct ata_port *ap)
-{
-	void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
-	if (!mmio)
-		return;
-
-	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
-}
-
-/**
- *	ata_bmdma_status - Read PCI IDE BMDMA status
- *	@ap: Port associated with this ATA transaction.
- *
- *	Read and return BMDMA status register.
- *
- *	May be used as the bmdma_status() entry in ata_port_operations.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- */
-u8 ata_bmdma_status(struct ata_port *ap)
-{
-	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-}
-
-/**
  *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
  *	@qc: Command we are ending DMA for
  *
@@ -339,197 +2207,145 @@
 		 mmio + ATA_DMA_CMD);
 
 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
-	ata_altstatus(ap);        /* dummy read */
+	ata_sff_altstatus(ap);        /* dummy read */
 }
 
 /**
- *	ata_bmdma_freeze - Freeze BMDMA controller port
- *	@ap: port to freeze
+ *	ata_bmdma_status - Read PCI IDE BMDMA status
+ *	@ap: Port associated with this ATA transaction.
  *
- *	Freeze BMDMA controller port.
+ *	Read and return BMDMA status register.
+ *
+ *	May be used as the bmdma_status() entry in ata_port_operations.
  *
  *	LOCKING:
- *	Inherited from caller.
+ *	spin_lock_irqsave(host lock)
  */
-void ata_bmdma_freeze(struct ata_port *ap)
+u8 ata_bmdma_status(struct ata_port *ap)
 {
+	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+}
+
+/**
+ *	ata_bus_reset - reset host port and associated ATA channel
+ *	@ap: port to reset
+ *
+ *	This is typically the first time we actually start issuing
+ *	commands to the ATA channel.  We wait for BSY to clear, then
+ *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
+ *	result.  Determine what devices, if any, are on the channel
+ *	by looking at the device 0/1 error register.  Look at the signature
+ *	stored in each device's taskfile registers, to determine if
+ *	the device is ATA or ATAPI.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *	Obtains host lock.
+ *
+ *	SIDE EFFECTS:
+ *	Sets ATA_FLAG_DISABLED if bus reset fails.
+ *
+ *	DEPRECATED:
+ *	This function is only for drivers which still use old EH and
+ *	will be removed soon.
+ */
+void ata_bus_reset(struct ata_port *ap)
+{
+	struct ata_device *device = ap->link.device;
 	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+	u8 err;
+	unsigned int dev0, dev1 = 0, devmask = 0;
+	int rc;
 
-	ap->ctl |= ATA_NIEN;
-	ap->last_ctl = ap->ctl;
+	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
 
-	if (ioaddr->ctl_addr)
-		iowrite8(ap->ctl, ioaddr->ctl_addr);
-
-	/* Under certain circumstances, some controllers raise IRQ on
-	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
-	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
-	 */
-	ata_chk_status(ap);
-
-	ap->ops->irq_clear(ap);
-}
-
-/**
- *	ata_bmdma_thaw - Thaw BMDMA controller port
- *	@ap: port to thaw
- *
- *	Thaw BMDMA controller port.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-void ata_bmdma_thaw(struct ata_port *ap)
-{
-	/* clear & re-enable interrupts */
-	ata_chk_status(ap);
-	ap->ops->irq_clear(ap);
-	ap->ops->irq_on(ap);
-}
-
-/**
- *	ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
- *	@ap: port to handle error for
- *	@prereset: prereset method (can be NULL)
- *	@softreset: softreset method (can be NULL)
- *	@hardreset: hardreset method (can be NULL)
- *	@postreset: postreset method (can be NULL)
- *
- *	Handle error for ATA BMDMA controller.  It can handle both
- *	PATA and SATA controllers.  Many controllers should be able to
- *	use this EH as-is or with some added handling before and
- *	after.
- *
- *	This function is intended to be used for constructing
- *	->error_handler callback by low level drivers.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep)
- */
-void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
-			ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
-			ata_postreset_fn_t postreset)
-{
-	struct ata_queued_cmd *qc;
-	unsigned long flags;
-	int thaw = 0;
-
-	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
-	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
-		qc = NULL;
-
-	/* reset PIO HSM and stop DMA engine */
-	spin_lock_irqsave(ap->lock, flags);
-
-	ap->hsm_task_state = HSM_ST_IDLE;
-
-	if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
-		   qc->tf.protocol == ATAPI_PROT_DMA)) {
-		u8 host_stat;
-
-		host_stat = ap->ops->bmdma_status(ap);
-
-		/* BMDMA controllers indicate host bus error by
-		 * setting DMA_ERR bit and timing out.  As it wasn't
-		 * really a timeout event, adjust error mask and
-		 * cancel frozen state.
-		 */
-		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
-			qc->err_mask = AC_ERR_HOST_BUS;
-			thaw = 1;
-		}
-
-		ap->ops->bmdma_stop(qc);
+	/* determine if device 0/1 are present */
+	if (ap->flags & ATA_FLAG_SATA_RESET)
+		dev0 = 1;
+	else {
+		dev0 = ata_devchk(ap, 0);
+		if (slave_possible)
+			dev1 = ata_devchk(ap, 1);
 	}
 
-	ata_altstatus(ap);
-	ata_chk_status(ap);
-	ap->ops->irq_clear(ap);
+	if (dev0)
+		devmask |= (1 << 0);
+	if (dev1)
+		devmask |= (1 << 1);
 
-	spin_unlock_irqrestore(ap->lock, flags);
+	/* select device 0 again */
+	ap->ops->sff_dev_select(ap, 0);
 
-	if (thaw)
-		ata_eh_thaw_port(ap);
+	/* issue bus reset */
+	if (ap->flags & ATA_FLAG_SRST) {
+		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
+		if (rc && rc != -ENODEV)
+			goto err_out;
+	}
 
-	/* PIO and DMA engines have been stopped, perform recovery */
-	ata_do_eh(ap, prereset, softreset, hardreset, postreset);
-}
+	/*
+	 * determine by signature whether we have ATA or ATAPI devices
+	 */
+	device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
+	if ((slave_possible) && (err != 0x81))
+		device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
 
-/**
- *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
- *	@ap: port to handle error for
- *
- *	Stock error handler for BMDMA controller.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep)
- */
-void ata_bmdma_error_handler(struct ata_port *ap)
-{
-	ata_reset_fn_t softreset = NULL, hardreset = NULL;
+	/* is double-select really necessary? */
+	if (device[1].class != ATA_DEV_NONE)
+		ap->ops->sff_dev_select(ap, 1);
+	if (device[0].class != ATA_DEV_NONE)
+		ap->ops->sff_dev_select(ap, 0);
 
-	if (ap->ioaddr.ctl_addr)
-		softreset = ata_std_softreset;
-	if (sata_scr_valid(&ap->link))
-		hardreset = sata_std_hardreset;
+	/* if no devices were detected, disable this port */
+	if ((device[0].class == ATA_DEV_NONE) &&
+	    (device[1].class == ATA_DEV_NONE))
+		goto err_out;
 
-	ata_bmdma_drive_eh(ap, ata_std_prereset, softreset, hardreset,
-			   ata_std_postreset);
-}
+	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
+		/* set up device control for ATA_FLAG_SATA_RESET */
+		iowrite8(ap->ctl, ioaddr->ctl_addr);
+	}
 
-/**
- *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
- *				      BMDMA controller
- *	@qc: internal command to clean up
- *
- *	LOCKING:
- *	Kernel thread context (may sleep)
- */
-void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
-{
-	if (qc->ap->ioaddr.bmdma_addr)
-		ata_bmdma_stop(qc);
-}
+	DPRINTK("EXIT\n");
+	return;
 
-/**
- *	ata_sff_port_start - Set port up for dma.
- *	@ap: Port to initialize
- *
- *	Called just after data structures for each port are
- *	initialized.  Allocates space for PRD table if the device
- *	is DMA capable SFF.
- *
- *	May be used as the port_start() entry in ata_port_operations.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
+err_out:
+	ata_port_printk(ap, KERN_ERR, "disabling port\n");
+	ata_port_disable(ap);
 
-int ata_sff_port_start(struct ata_port *ap)
-{
-	if (ap->ioaddr.bmdma_addr)
-		return ata_port_start(ap);
-	return 0;
+	DPRINTK("EXIT\n");
 }
 
 #ifdef CONFIG_PCI
 
-static int ata_resources_present(struct pci_dev *pdev, int port)
+/**
+ *	ata_pci_bmdma_clear_simplex -	attempt to kick device out of simplex
+ *	@pdev: PCI device
+ *
+ *	Some PCI ATA devices report simplex mode but in fact can be told to
+ *	enter non simplex mode. This implements the necessary logic to
+ *	perform the task on such devices. Calling it on other devices will
+ *	have -undefined- behaviour.
+ */
+int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
 {
-	int i;
+	unsigned long bmdma = pci_resource_start(pdev, 4);
+	u8 simplex;
 
-	/* Check the PCI resources for this channel are enabled */
-	port = port * 2;
-	for (i = 0; i < 2; i ++) {
-		if (pci_resource_start(pdev, port + i) == 0 ||
-		    pci_resource_len(pdev, port + i) == 0)
-			return 0;
-	}
-	return 1;
+	if (bmdma == 0)
+		return -ENOENT;
+
+	simplex = inb(bmdma + 0x02);
+	outb(simplex & 0x60, bmdma + 0x02);
+	simplex = inb(bmdma + 0x02);
+	if (simplex & 0x80)
+		return -EOPNOTSUPP;
+	return 0;
 }
 
 /**
- *	ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host
+ *	ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
  *	@host: target ATA host
  *
  *	Acquire PCI BMDMA resources and initialize @host accordingly.
@@ -540,7 +2356,7 @@
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int ata_pci_init_bmdma(struct ata_host *host)
+int ata_pci_bmdma_init(struct ata_host *host)
 {
 	struct device *gdev = host->dev;
 	struct pci_dev *pdev = to_pci_dev(gdev);
@@ -585,8 +2401,22 @@
 	return 0;
 }
 
+static int ata_resources_present(struct pci_dev *pdev, int port)
+{
+	int i;
+
+	/* Check the PCI resources for this channel are enabled */
+	port = port * 2;
+	for (i = 0; i < 2; i ++) {
+		if (pci_resource_start(pdev, port + i) == 0 ||
+		    pci_resource_len(pdev, port + i) == 0)
+			return 0;
+	}
+	return 1;
+}
+
 /**
- *	ata_pci_init_sff_host - acquire native PCI ATA resources and init host
+ *	ata_pci_sff_init_host - acquire native PCI ATA resources and init host
  *	@host: target ATA host
  *
  *	Acquire native PCI ATA resources for @host and initialize the
@@ -604,7 +2434,7 @@
  *	0 if at least one port is initialized, -ENODEV if no port is
  *	available.
  */
-int ata_pci_init_sff_host(struct ata_host *host)
+int ata_pci_sff_init_host(struct ata_host *host)
 {
 	struct device *gdev = host->dev;
 	struct pci_dev *pdev = to_pci_dev(gdev);
@@ -646,7 +2476,7 @@
 		ap->ioaddr.altstatus_addr =
 		ap->ioaddr.ctl_addr = (void __iomem *)
 			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
-		ata_std_ports(&ap->ioaddr);
+		ata_sff_std_ports(&ap->ioaddr);
 
 		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
 			(unsigned long long)pci_resource_start(pdev, base),
@@ -664,7 +2494,7 @@
 }
 
 /**
- *	ata_pci_prepare_sff_host - helper to prepare native PCI ATA host
+ *	ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
  *	@pdev: target PCI device
  *	@ppi: array of port_info, must be enough for two ports
  *	@r_host: out argument for the initialized ATA host
@@ -678,7 +2508,7 @@
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int ata_pci_prepare_sff_host(struct pci_dev *pdev,
+int ata_pci_sff_prepare_host(struct pci_dev *pdev,
 			     const struct ata_port_info * const * ppi,
 			     struct ata_host **r_host)
 {
@@ -696,12 +2526,12 @@
 		goto err_out;
 	}
 
-	rc = ata_pci_init_sff_host(host);
+	rc = ata_pci_sff_init_host(host);
 	if (rc)
 		goto err_out;
 
 	/* init DMA related stuff */
-	rc = ata_pci_init_bmdma(host);
+	rc = ata_pci_bmdma_init(host);
 	if (rc)
 		goto err_bmdma;
 
@@ -722,7 +2552,7 @@
 }
 
 /**
- *	ata_pci_activate_sff_host - start SFF host, request IRQ and register it
+ *	ata_pci_sff_activate_host - start SFF host, request IRQ and register it
  *	@host: target SFF ATA host
  *	@irq_handler: irq_handler used when requesting IRQ(s)
  *	@sht: scsi_host_template to use when registering the host
@@ -737,7 +2567,7 @@
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int ata_pci_activate_sff_host(struct ata_host *host,
+int ata_pci_sff_activate_host(struct ata_host *host,
 			      irq_handler_t irq_handler,
 			      struct scsi_host_template *sht)
 {
@@ -815,9 +2645,11 @@
 }
 
 /**
- *	ata_pci_init_one - Initialize/register PCI IDE host controller
+ *	ata_pci_sff_init_one - Initialize/register PCI IDE host controller
  *	@pdev: Controller to be initialized
  *	@ppi: array of port_info, must be enough for two ports
+ *	@sht: scsi_host_template to use when registering the host
+ *	@host_priv: host private_data
  *
  *	This is a helper function which can be called from a driver's
  *	xxx_init_one() probe function if the hardware uses traditional
@@ -837,8 +2669,9 @@
  *	RETURNS:
  *	Zero on success, negative on errno-based value on error.
  */
-int ata_pci_init_one(struct pci_dev *pdev,
-		     const struct ata_port_info * const * ppi)
+int ata_pci_sff_init_one(struct pci_dev *pdev,
+			 const struct ata_port_info * const * ppi,
+			 struct scsi_host_template *sht, void *host_priv)
 {
 	struct device *dev = &pdev->dev;
 	const struct ata_port_info *pi = NULL;
@@ -869,13 +2702,13 @@
 		goto out;
 
 	/* prepare and activate SFF host */
-	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
+	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
 	if (rc)
 		goto out;
+	host->private_data = host_priv;
 
 	pci_set_master(pdev);
-	rc = ata_pci_activate_sff_host(host, pi->port_ops->irq_handler,
-				       pi->sht);
+	rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
  out:
 	if (rc == 0)
 		devres_remove_group(&pdev->dev, NULL);
@@ -885,41 +2718,52 @@
 	return rc;
 }
 
-/**
- *	ata_pci_clear_simplex	-	attempt to kick device out of simplex
- *	@pdev: PCI device
- *
- *	Some PCI ATA devices report simplex mode but in fact can be told to
- *	enter non simplex mode. This implements the necessary logic to
- *	perform the task on such devices. Calling it on other devices will
- *	have -undefined- behaviour.
- */
-
-int ata_pci_clear_simplex(struct pci_dev *pdev)
-{
-	unsigned long bmdma = pci_resource_start(pdev, 4);
-	u8 simplex;
-
-	if (bmdma == 0)
-		return -ENOENT;
-
-	simplex = inb(bmdma + 0x02);
-	outb(simplex & 0x60, bmdma + 0x02);
-	simplex = inb(bmdma + 0x02);
-	if (simplex & 0x80)
-		return -EOPNOTSUPP;
-	return 0;
-}
-
-unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask)
-{
-	/* Filter out DMA modes if the device has been configured by
-	   the BIOS as PIO only */
-
-	if (adev->link->ap->ioaddr.bmdma_addr == NULL)
-		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
-	return xfer_mask;
-}
-
 #endif /* CONFIG_PCI */
 
+EXPORT_SYMBOL_GPL(ata_sff_port_ops);
+EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
+EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
+EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
+EXPORT_SYMBOL_GPL(ata_sff_dev_select);
+EXPORT_SYMBOL_GPL(ata_sff_check_status);
+EXPORT_SYMBOL_GPL(ata_sff_altstatus);
+EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
+EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
+EXPORT_SYMBOL_GPL(ata_sff_tf_load);
+EXPORT_SYMBOL_GPL(ata_sff_tf_read);
+EXPORT_SYMBOL_GPL(ata_sff_exec_command);
+EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
+EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
+EXPORT_SYMBOL_GPL(ata_sff_irq_on);
+EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
+EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
+EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
+EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
+EXPORT_SYMBOL_GPL(ata_sff_host_intr);
+EXPORT_SYMBOL_GPL(ata_sff_interrupt);
+EXPORT_SYMBOL_GPL(ata_sff_freeze);
+EXPORT_SYMBOL_GPL(ata_sff_thaw);
+EXPORT_SYMBOL_GPL(ata_sff_prereset);
+EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
+EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
+EXPORT_SYMBOL_GPL(ata_sff_softreset);
+EXPORT_SYMBOL_GPL(sata_sff_hardreset);
+EXPORT_SYMBOL_GPL(ata_sff_postreset);
+EXPORT_SYMBOL_GPL(ata_sff_error_handler);
+EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
+EXPORT_SYMBOL_GPL(ata_sff_port_start);
+EXPORT_SYMBOL_GPL(ata_sff_std_ports);
+EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
+EXPORT_SYMBOL_GPL(ata_bmdma_setup);
+EXPORT_SYMBOL_GPL(ata_bmdma_start);
+EXPORT_SYMBOL_GPL(ata_bmdma_stop);
+EXPORT_SYMBOL_GPL(ata_bmdma_status);
+EXPORT_SYMBOL_GPL(ata_bus_reset);
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
+EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
+EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
+EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
+EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
+#endif /* CONFIG_PCI */
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index aa884f7..4aeeabb 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -38,6 +38,17 @@
 	void			(*done)(struct scsi_cmnd *);
 };
 
+static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset)
+{
+	if (reset == sata_std_hardreset)
+		return 1;
+#ifdef CONFIG_ATA_SFF
+	if (reset == sata_sff_hardreset)
+		return 1;
+#endif
+	return 0;
+}
+
 /* libata-core.c */
 enum {
 	/* flags for ata_dev_read_id() */
@@ -61,12 +72,16 @@
 extern int libata_noacpi;
 extern int libata_allow_tpm;
 extern void ata_force_cbl(struct ata_port *ap);
+extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
+extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
 extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
 			   u64 block, u32 n_block, unsigned int tf_flags,
 			   unsigned int tag);
 extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
 extern void ata_dev_disable(struct ata_device *dev);
+extern void ata_pio_queue_task(struct ata_port *ap, void *data,
+			       unsigned long delay);
 extern void ata_port_flush_task(struct ata_port *ap);
 extern unsigned ata_exec_internal(struct ata_device *dev,
 				  struct ata_taskfile *tf, const u8 *cdb,
@@ -77,6 +92,8 @@
 				     int dma_dir, struct scatterlist *sg,
 				     unsigned int n_elem, unsigned long timeout);
 extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
+extern int ata_wait_ready(struct ata_link *link, unsigned long deadline,
+			  int (*check_ready)(struct ata_link *link));
 extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
 			   unsigned int flags, u16 *id);
 extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
@@ -91,10 +108,7 @@
 extern void ata_qc_issue(struct ata_queued_cmd *qc);
 extern void __ata_qc_complete(struct ata_queued_cmd *qc);
 extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
-extern void ata_dev_select(struct ata_port *ap, unsigned int device,
-                           unsigned int wait, unsigned int can_sleep);
 extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
-extern int ata_flush_cache(struct ata_device *dev);
 extern void ata_dev_init(struct ata_device *dev);
 extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
 extern int sata_link_init_spd(struct ata_link *link);
@@ -165,11 +179,6 @@
 extern void ata_scsi_dev_rescan(struct work_struct *work);
 extern int ata_bus_probe(struct ata_port *ap);
 
-/* libata-pmp.c */
-extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val);
-extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val);
-extern int sata_pmp_attach(struct ata_device *dev);
-
 /* libata-eh.c */
 extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
 extern void ata_scsi_error(struct Scsi_Host *host);
@@ -193,8 +202,34 @@
 			  struct ata_link **r_failed_disk);
 extern void ata_eh_finish(struct ata_port *ap);
 
-/* libata-sff.c */
-extern u8 ata_irq_on(struct ata_port *ap);
+/* libata-pmp.c */
+#ifdef CONFIG_SATA_PMP
+extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_pmp_attach(struct ata_device *dev);
+#else /* CONFIG_SATA_PMP */
+static inline int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+	return -EINVAL;
+}
 
+static inline int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
+{
+	return -EINVAL;
+}
+
+static inline int sata_pmp_attach(struct ata_device *dev)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_SATA_PMP */
+
+/* libata-sff.c */
+#ifdef CONFIG_ATA_SFF
+extern void ata_dev_select(struct ata_port *ap, unsigned int device,
+                           unsigned int wait, unsigned int can_sleep);
+extern u8 ata_irq_on(struct ata_port *ap);
+extern void ata_pio_task(struct work_struct *work);
+#endif /* CONFIG_ATA_SFF */
 
 #endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index bdc3b9d..c5f91e6 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -47,7 +47,7 @@
 	if (ap->acpi_handle == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0)
 		return -ENODEV;
 
-	return ata_std_prereset(link, deadline);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -68,20 +68,6 @@
 }
 
 /**
- *	pacpi_error_handler - Setup and error handler
- *	@ap: Port to handle
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void pacpi_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, pacpi_pre_reset, ata_std_softreset, NULL,
-			   ata_std_postreset);
-}
-
-/**
  *	pacpi_discover_modes	-	filter non ACPI modes
  *	@adev: ATA device
  *	@mask: proposed modes
@@ -120,7 +106,7 @@
 static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
 {
 	struct pata_acpi *acpi = adev->link->ap->private_data;
-	return ata_pci_default_filter(adev, mask & acpi->mask[adev->devno]);
+	return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]);
 }
 
 /**
@@ -176,7 +162,7 @@
 }
 
 /**
- *	pacpi_qc_issue_prot	-	command issue
+ *	pacpi_qc_issue	-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
@@ -184,14 +170,14 @@
  *	neccessary.
  */
 
-static unsigned int pacpi_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
 	struct pata_acpi *acpi = ap->private_data;
 
 	if (acpi->gtm.flags & 0x10)
-		return ata_qc_issue_prot(qc);
+		return ata_sff_qc_issue(qc);
 
 	if (adev != acpi->last) {
 		pacpi_set_piomode(ap, adev);
@@ -199,7 +185,7 @@
 			pacpi_set_dmamode(ap, adev);
 		acpi->last = adev;
 	}
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 /**
@@ -232,57 +218,17 @@
 }
 
 static struct scsi_host_template pacpi_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	/* Use standard CHS mapping rules */
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations pacpi_ops = {
+static struct ata_port_operations pacpi_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.qc_issue		= pacpi_qc_issue,
+	.cable_detect		= pacpi_cable_detect,
+	.mode_filter		= pacpi_mode_filter,
 	.set_piomode		= pacpi_set_piomode,
 	.set_dmamode		= pacpi_set_dmamode,
-	.mode_filter		= pacpi_mode_filter,
-
-	/* Task file is PCI ATA format, use helpers */
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= pacpi_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= pacpi_cable_detect,
-
-	/* BMDMA handling is PCI ATA format, use helpers */
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= pacpi_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	/* Timeout handling */
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	/* Generic PATA PCI ATA helpers */
+	.prereset		= pacpi_pre_reset,
 	.port_start		= pacpi_port_start,
 };
 
@@ -304,7 +250,6 @@
 static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht		= &pacpi_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
 
 		.pio_mask	= 0x1f,
@@ -314,7 +259,7 @@
 		.port_ops	= &pacpi_ops,
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL);
 }
 
 static const struct pci_device_id pacpi_pci_tbl[] = {
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 511a830..fcabe46 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -121,7 +121,7 @@
 	ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
 	if (strstr(model_num, "WDC"))
 		return mask &= ~ATA_MASK_UDMA;
-	return ata_pci_default_filter(adev, mask);
+	return ata_bmdma_mode_filter(adev, mask);
 }
 
 /**
@@ -339,21 +339,7 @@
 }
 
 static struct scsi_host_template ali_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 /*
@@ -361,29 +347,15 @@
  */
 
 static struct ata_port_operations ali_early_port_ops = {
-	.set_piomode	= ali_set_piomode,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
 	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= ali_set_piomode,
+};
 
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+static const struct ata_port_operations ali_dma_base_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.set_piomode	= ali_set_piomode,
+	.set_dmamode	= ali_set_dmamode,
 };
 
 /*
@@ -391,115 +363,31 @@
  *	detect
  */
 static struct ata_port_operations ali_20_port_ops = {
-	.set_piomode	= ali_set_piomode,
-	.set_dmamode	= ali_set_dmamode,
-	.mode_filter	= ali_20_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_atapi_dma = ali_check_atapi_dma,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-	.dev_config	= ali_lock_sectors,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ali_dma_base_ops,
 	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.mode_filter	= ali_20_filter,
+	.check_atapi_dma = ali_check_atapi_dma,
+	.dev_config	= ali_lock_sectors,
 };
 
 /*
  *	Port operations for DMA capable ALi with cable detect
  */
 static struct ata_port_operations ali_c2_port_ops = {
-	.set_piomode	= ali_set_piomode,
-	.set_dmamode	= ali_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
+	.inherits	= &ali_dma_base_ops,
 	.check_atapi_dma = ali_check_atapi_dma,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-	.dev_config	= ali_lock_sectors,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
 	.cable_detect	= ali_c2_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.dev_config	= ali_lock_sectors,
 };
 
 /*
  *	Port operations for DMA capable ALi with cable detect and LBA48
  */
 static struct ata_port_operations ali_c5_port_ops = {
-	.set_piomode	= ali_set_piomode,
-	.set_dmamode	= ali_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
+	.inherits	= &ali_dma_base_ops,
 	.check_atapi_dma = ali_check_atapi_dma,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
 	.dev_config	= ali_warn_atapi_dma,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
 	.cable_detect	= ali_c2_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 
@@ -561,7 +449,7 @@
 	}
 	pci_dev_put(isa_bridge);
 	pci_dev_put(north);
-	ata_pci_clear_simplex(pdev);
+	ata_pci_bmdma_clear_simplex(pdev);
 }
 /**
  *	ali_init_one		-	discovery callback
@@ -575,14 +463,12 @@
 static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info_early = {
-		.sht = &ali_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &ali_early_port_ops
 	};
 	/* Revision 0x20 added DMA */
 	static const struct ata_port_info info_20 = {
-		.sht = &ali_sht,
 		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -590,7 +476,6 @@
 	};
 	/* Revision 0x20 with support logic added UDMA */
 	static const struct ata_port_info info_20_udma = {
-		.sht = &ali_sht,
 		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -599,7 +484,6 @@
 	};
 	/* Revision 0xC2 adds UDMA66 */
 	static const struct ata_port_info info_c2 = {
-		.sht = &ali_sht,
 		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -608,7 +492,6 @@
 	};
 	/* Revision 0xC3 is UDMA66 for now */
 	static const struct ata_port_info info_c3 = {
-		.sht = &ali_sht,
 		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -617,7 +500,6 @@
 	};
 	/* Revision 0xC4 is UDMA100 */
 	static const struct ata_port_info info_c4 = {
-		.sht = &ali_sht,
 		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -626,7 +508,6 @@
 	};
 	/* Revision 0xC5 is UDMA133 with LBA48 DMA */
 	static const struct ata_port_info info_c5 = {
-		.sht = &ali_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -637,6 +518,11 @@
 	const struct ata_port_info *ppi[] = { NULL, NULL };
 	u8 tmp;
 	struct pci_dev *isa_bridge;
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
 
 	/*
 	 * The chipset revision selects the driver operations and
@@ -666,14 +552,21 @@
 	        	ppi[0] = &info_20_udma;
 		pci_dev_put(isa_bridge);
 	}
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL);
 }
 
 #ifdef CONFIG_PM
 static int ali_reinit_one(struct pci_dev *pdev)
 {
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
 	ali_init_chipset(pdev);
-	return ata_pci_device_resume(pdev);
+	ata_host_resume(host);
+	return 0;
 }
 #endif
 
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 4b8d9b5..26665c3 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -56,7 +56,9 @@
 	u8 t;
 
 	T = 1000000000 / amd_clock;
-	UT = T / min_t(int, max_t(int, clock, 1), 2);
+	UT = T;
+	if (clock >= 2)
+		UT = T / 2;
 
 	if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
 		dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
@@ -141,13 +143,7 @@
 	if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-static void amd_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, amd_pre_reset, ata_std_softreset, NULL,
-			   ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 static int amd_cable_detect(struct ata_port *ap)
@@ -297,14 +293,7 @@
 	if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-static void nv_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, nv_pre_reset,
-			       ata_std_softreset, NULL,
-			       ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -353,228 +342,66 @@
 }
 
 static struct scsi_host_template amd_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static const struct ata_port_operations amd_base_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.prereset	= amd_pre_reset,
 };
 
 static struct ata_port_operations amd33_port_ops = {
+	.inherits	= &amd_base_port_ops,
+	.cable_detect	= ata_cable_40wire,
 	.set_piomode	= amd33_set_piomode,
 	.set_dmamode	= amd33_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= amd_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations amd66_port_ops = {
+	.inherits	= &amd_base_port_ops,
+	.cable_detect	= ata_cable_unknown,
 	.set_piomode	= amd66_set_piomode,
 	.set_dmamode	= amd66_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= amd_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_unknown,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations amd100_port_ops = {
+	.inherits	= &amd_base_port_ops,
+	.cable_detect	= ata_cable_unknown,
 	.set_piomode	= amd100_set_piomode,
 	.set_dmamode	= amd100_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= amd_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_unknown,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations amd133_port_ops = {
+	.inherits	= &amd_base_port_ops,
+	.cable_detect	= amd_cable_detect,
 	.set_piomode	= amd133_set_piomode,
 	.set_dmamode	= amd133_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
+};
 
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= amd_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= amd_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+static const struct ata_port_operations nv_base_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_ignore,
+	.mode_filter	= nv_mode_filter,
+	.prereset	= nv_pre_reset,
+	.host_stop	= nv_host_stop,
 };
 
 static struct ata_port_operations nv100_port_ops = {
+	.inherits	= &nv_base_port_ops,
 	.set_piomode	= nv100_set_piomode,
 	.set_dmamode	= nv100_set_dmamode,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= nv_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_ignore,
-	.mode_filter	= nv_mode_filter,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
-	.host_stop	= nv_host_stop,
 };
 
 static struct ata_port_operations nv133_port_ops = {
+	.inherits	= &nv_base_port_ops,
 	.set_piomode	= nv133_set_piomode,
 	.set_dmamode	= nv133_set_dmamode,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= nv_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_ignore,
-	.mode_filter	= nv_mode_filter,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
-	.host_stop	= nv_host_stop,
 };
 
 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info[10] = {
 		{	/* 0: AMD 7401 */
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,	/* No SWDMA */
@@ -582,7 +409,6 @@
 			.port_ops = &amd33_port_ops
 		},
 		{	/* 1: Early AMD7409 - no swdma */
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -590,7 +416,6 @@
 			.port_ops = &amd66_port_ops
 		},
 		{	/* 2: AMD 7409, no swdma errata */
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -598,7 +423,6 @@
 			.port_ops = &amd66_port_ops
 		},
 		{	/* 3: AMD 7411 */
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -606,7 +430,6 @@
 			.port_ops = &amd100_port_ops
 		},
 		{	/* 4: AMD 7441 */
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -614,7 +437,6 @@
 			.port_ops = &amd100_port_ops
 		},
 		{	/* 5: AMD 8111*/
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -622,7 +444,6 @@
 			.port_ops = &amd133_port_ops
 		},
 		{	/* 6: AMD 8111 UDMA 100 (Serenade) */
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -630,7 +451,6 @@
 			.port_ops = &amd133_port_ops
 		},
 		{	/* 7: Nvidia Nforce */
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -638,7 +458,6 @@
 			.port_ops = &nv100_port_ops
 		},
 		{	/* 8: Nvidia Nforce2 and later */
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -646,7 +465,6 @@
 			.port_ops = &nv133_port_ops
 		},
 		{	/* 9: AMD CS5536 (Geode companion) */
-			.sht = &amd_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -654,15 +472,20 @@
 			.port_ops = &amd100_port_ops
 		}
 	};
-	struct ata_port_info pi;
-	const struct ata_port_info *ppi[] = { &pi, NULL };
+	const struct ata_port_info *ppi[] = { NULL, NULL };
 	static int printed_version;
 	int type = id->driver_data;
+	void *hpriv = NULL;
 	u8 fifo;
+	int rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
 	pci_read_config_byte(pdev, 0x41, &fifo);
 
 	/* Check for AMD7409 without swdma errata and if found adjust type */
@@ -677,10 +500,10 @@
 	/*
 	 * Okay, type is determined now.  Apply type-specific workarounds.
 	 */
-	pi = info[type];
+	ppi[0] = &info[type];
 
 	if (type < 3)
-		ata_pci_clear_simplex(pdev);
+		ata_pci_bmdma_clear_simplex(pdev);
 
 	/* Check for AMD7411 */
 	if (type == 3)
@@ -696,16 +519,23 @@
 		u32 udma;
 
 		pci_read_config_dword(pdev, 0x60, &udma);
-		pi.private_data = (void *)(unsigned long)udma;
+		hpriv = (void *)(unsigned long)udma;
 	}
 
 	/* And fire it up */
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv);
 }
 
 #ifdef CONFIG_PM
 static int amd_reinit_one(struct pci_dev *pdev)
 {
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
 	if (pdev->vendor == PCI_VENDOR_ID_AMD) {
 		u8 fifo;
 		pci_read_config_byte(pdev, 0x41, &fifo);
@@ -716,9 +546,11 @@
 			pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
 		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
 		    pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
-		    	ata_pci_clear_simplex(pdev);
+			ata_pci_bmdma_clear_simplex(pdev);
 	}
-	return ata_pci_device_resume(pdev);
+
+	ata_host_resume(host);
+	return 0;
 }
 #endif
 
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index d421831..0f513bc 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -52,22 +52,7 @@
 	if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	artop6210_error_handler - Probe specified port on PATA host controller
- *	@ap: Port to probe
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void artop6210_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, artop6210_pre_reset,
-				    ata_std_softreset, NULL,
-				    ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -93,7 +78,7 @@
 	if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -114,21 +99,6 @@
 }
 
 /**
- *	artop6260_error_handler - Probe specified port on PATA host controller
- *	@ap: Port to probe
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void artop6260_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, artop6260_pre_reset,
-				    ata_std_softreset, NULL,
-				    ata_std_postreset);
-}
-
-/**
  *	artop6210_load_piomode - Load a set of PATA PIO timings
  *	@ap: Port whose timings we are configuring
  *	@adev: Device
@@ -314,85 +284,23 @@
 }
 
 static struct scsi_host_template artop_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations artop6210_ops = {
+static struct ata_port_operations artop6210_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= ata_cable_40wire,
 	.set_piomode		= artop6210_set_piomode,
 	.set_dmamode		= artop6210_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= artop6210_error_handler,
-	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= ata_cable_40wire,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
+	.prereset		= artop6210_pre_reset,
 };
 
-static const struct ata_port_operations artop6260_ops = {
+static struct ata_port_operations artop6260_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= artop6260_cable_detect,
 	.set_piomode		= artop6260_set_piomode,
 	.set_dmamode		= artop6260_set_dmamode,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= artop6260_error_handler,
-	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= artop6260_cable_detect,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
+	.prereset		= artop6260_pre_reset,
 };
 
 
@@ -414,7 +322,6 @@
 {
 	static int printed_version;
 	static const struct ata_port_info info_6210 = {
-		.sht		= &artop_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -422,7 +329,6 @@
 		.port_ops	= &artop6210_ops,
 	};
 	static const struct ata_port_info info_626x = {
-		.sht		= &artop_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -430,7 +336,6 @@
 		.port_ops	= &artop6260_ops,
 	};
 	static const struct ata_port_info info_628x = {
-		.sht		= &artop_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -438,7 +343,6 @@
 		.port_ops	= &artop6260_ops,
 	};
 	static const struct ata_port_info info_628x_fast = {
-		.sht		= &artop_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -446,11 +350,16 @@
 		.port_ops	= &artop6260_ops,
 	};
 	const struct ata_port_info *ppi[] = { NULL, NULL };
+	int rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "version " DRV_VERSION "\n");
 
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
 	if (id->driver_data == 0) {	/* 6210 variant */
 		ppi[0] = &info_6210;
 		ppi[1] = &ata_dummy_port_info;
@@ -491,7 +400,7 @@
 
 	BUG_ON(ppi[0] == NULL);
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL);
 }
 
 static const struct pci_device_id artop_pci_tbl[] = {
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index db057b1..3e8651d 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -166,52 +166,14 @@
 	}
 }
 
-static void pata_at32_irq_clear(struct ata_port *ap)
-{
-	/* No DMA controller yet */
-}
-
 static struct scsi_host_template at32_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations at32_port_ops = {
-	.set_piomode		= pata_at32_set_piomode,
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.exec_command		= ata_exec_command,
-	.check_status		= ata_check_status,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.inherits		= &ata_sff_port_ops,
 	.cable_detect		= ata_cable_40wire,
-
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-
-	.data_xfer		= ata_data_xfer,
-
-	.irq_clear		= pata_at32_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
+	.set_piomode		= pata_at32_set_piomode,
 };
 
 static int __init pata_at32_init_one(struct device *dev,
@@ -261,7 +223,7 @@
 	host->private_data = info;
 
 	/* Register ATA device and return */
-	return ata_host_activate(host, info->irq, ata_interrupt,
+	return ata_host_activate(host, info->irq, ata_sff_interrupt,
 				 IRQF_SHARED | IRQF_TRIGGER_RISING,
 				 &at32_sht);
 }
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 408bdc1..78738fb 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -45,12 +45,7 @@
 	if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-static void atiixp_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, atiixp_pre_reset, ata_std_softreset, NULL,   ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 static int atiixp_cable_detect(struct ata_port *ap)
@@ -221,60 +216,26 @@
 }
 
 static struct scsi_host_template atiixp_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
+	ATA_BMDMA_SHT(DRV_NAME),
 	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
 static struct ata_port_operations atiixp_port_ops = {
-	.set_piomode	= atiixp_set_piomode,
-	.set_dmamode	= atiixp_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
+	.inherits	= &ata_bmdma_port_ops,
 
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= atiixp_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= atiixp_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
+	.qc_prep 	= ata_sff_dumb_qc_prep,
 	.bmdma_start 	= atiixp_bmdma_start,
 	.bmdma_stop	= atiixp_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
 
-	.qc_prep 	= ata_dumb_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.cable_detect	= atiixp_cable_detect,
+	.set_piomode	= atiixp_set_piomode,
+	.set_dmamode	= atiixp_set_dmamode,
+	.prereset	= atiixp_pre_reset,
 };
 
 static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &atiixp_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x06,	/* No MWDMA0 support */
@@ -282,7 +243,7 @@
 		.port_ops = &atiixp_port_ops
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &atiixp_sht, NULL);
 }
 
 static const struct pci_device_id atiixp[] = {
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 7f87f10..0a5ad98 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -674,7 +674,7 @@
  *	@ap: Port to which output is sent
  *	@tf: ATA taskfile register set
  *
- *	Note: Original code is ata_tf_load().
+ *	Note: Original code is ata_sff_tf_load().
  */
 
 static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
@@ -745,7 +745,7 @@
  *	@ap: Port from which input is read
  *	@tf: ATA taskfile register set for storing input
  *
- *	Note: Original code is ata_tf_read().
+ *	Note: Original code is ata_sff_tf_read().
  */
 
 static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
@@ -775,7 +775,7 @@
  *	@ap: port to which command is being issued
  *	@tf: ATA taskfile register set
  *
- *	Note: Original code is ata_exec_command().
+ *	Note: Original code is ata_sff_exec_command().
  */
 
 static void bfin_exec_command(struct ata_port *ap,
@@ -785,7 +785,7 @@
 	dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command);
 
 	write_atapi_register(base, ATA_REG_CMD, tf->command);
-	ata_pause(ap);
+	ata_sff_pause(ap);
 }
 
 /**
@@ -800,14 +800,14 @@
 }
 
 /**
- *	bfin_std_dev_select - Select device 0/1 on ATA bus
+ *	bfin_dev_select - Select device 0/1 on ATA bus
  *	@ap: ATA channel to manipulate
  *	@device: ATA device (numbered from zero) to select
  *
- *	Note: Original code is ata_std_dev_select().
+ *	Note: Original code is ata_sff_dev_select().
  */
 
-static void bfin_std_dev_select(struct ata_port *ap, unsigned int device)
+static void bfin_dev_select(struct ata_port *ap, unsigned int device)
 {
 	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
 	u8 tmp;
@@ -818,7 +818,7 @@
 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
 
 	write_atapi_register(base, ATA_REG_DEVICE, tmp);
-	ata_pause(ap);
+	ata_sff_pause(ap);
 }
 
 /**
@@ -977,7 +977,7 @@
 	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
 	u8 nsect, lbal;
 
-	bfin_std_dev_select(ap, device);
+	bfin_dev_select(ap, device);
 
 	write_atapi_register(base, ATA_REG_NSECT, 0x55);
 	write_atapi_register(base, ATA_REG_LBAL, 0xaa);
@@ -1014,7 +1014,7 @@
 	 * BSY bit to clear
 	 */
 	if (dev0)
-		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+		ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
 
 	/* if device 1 was found in ata_devchk, wait for
 	 * register access, then wait for BSY to clear
@@ -1023,7 +1023,7 @@
 	while (dev1) {
 		u8 nsect, lbal;
 
-		bfin_std_dev_select(ap, 1);
+		bfin_dev_select(ap, 1);
 		nsect = read_atapi_register(base, ATA_REG_NSECT);
 		lbal = read_atapi_register(base, ATA_REG_LBAL);
 		if ((nsect == 1) && (lbal == 1))
@@ -1035,14 +1035,14 @@
 		msleep(50);	/* give drive a breather */
 	}
 	if (dev1)
-		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+		ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
 
 	/* is all this really necessary? */
-	bfin_std_dev_select(ap, 0);
+	bfin_dev_select(ap, 0);
 	if (dev1)
-		bfin_std_dev_select(ap, 1);
+		bfin_dev_select(ap, 1);
 	if (dev0)
-		bfin_std_dev_select(ap, 0);
+		bfin_dev_select(ap, 0);
 }
 
 /**
@@ -1088,26 +1088,21 @@
 }
 
 /**
- *	bfin_std_softreset - reset host port via ATA SRST
+ *	bfin_softreset - reset host port via ATA SRST
  *	@ap: port to reset
  *	@classes: resulting classes of attached devices
  *
- *	Note: Original code is ata_std_softreset().
+ *	Note: Original code is ata_sff_softreset().
  */
 
-static int bfin_std_softreset(struct ata_link *link, unsigned int *classes,
-		unsigned long deadline)
+static int bfin_softreset(struct ata_link *link, unsigned int *classes,
+			  unsigned long deadline)
 {
 	struct ata_port *ap = link->ap;
 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
 	unsigned int devmask = 0, err_mask;
 	u8 err;
 
-	if (ata_link_offline(link)) {
-		classes[0] = ATA_DEV_NONE;
-		goto out;
-	}
-
 	/* determine if device 0/1 are present */
 	if (bfin_devchk(ap, 0))
 		devmask |= (1 << 0);
@@ -1115,7 +1110,7 @@
 		devmask |= (1 << 1);
 
 	/* select device 0 again */
-	bfin_std_dev_select(ap, 0);
+	bfin_dev_select(ap, 0);
 
 	/* issue bus reset */
 	err_mask = bfin_bus_softreset(ap, devmask);
@@ -1126,13 +1121,12 @@
 	}
 
 	/* determine by signature whether we have ATA or ATAPI devices */
-	classes[0] = ata_dev_try_classify(&ap->link.device[0],
+	classes[0] = ata_sff_dev_classify(&ap->link.device[0],
 				devmask & (1 << 0), &err);
 	if (slave_possible && err != 0x81)
-		classes[1] = ata_dev_try_classify(&ap->link.device[1],
+		classes[1] = ata_sff_dev_classify(&ap->link.device[1],
 					devmask & (1 << 1), &err);
 
- out:
 	return 0;
 }
 
@@ -1167,7 +1161,7 @@
  *	@buflen: buffer length
  *	@write_data: read/write
  *
- *	Note: Original code is ata_data_xfer().
+ *	Note: Original code is ata_sff_data_xfer().
  */
 
 static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
@@ -1206,7 +1200,7 @@
  *	bfin_irq_clear - Clear ATAPI interrupt.
  *	@ap: Port associated with this ATA transaction.
  *
- *	Note: Original code is ata_bmdma_irq_clear().
+ *	Note: Original code is ata_sff_irq_clear().
  */
 
 static void bfin_irq_clear(struct ata_port *ap)
@@ -1223,7 +1217,7 @@
  *	bfin_irq_on - Enable interrupts on a port.
  *	@ap: Port on which interrupts are enabled.
  *
- *	Note: Original code is ata_irq_on().
+ *	Note: Original code is ata_sff_irq_on().
  */
 
 static unsigned char bfin_irq_on(struct ata_port *ap)
@@ -1244,13 +1238,13 @@
 }
 
 /**
- *	bfin_bmdma_freeze - Freeze DMA controller port
+ *	bfin_freeze - Freeze DMA controller port
  *	@ap: port to freeze
  *
- *	Note: Original code is ata_bmdma_freeze().
+ *	Note: Original code is ata_sff_freeze().
  */
 
-static void bfin_bmdma_freeze(struct ata_port *ap)
+static void bfin_freeze(struct ata_port *ap)
 {
 	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
 
@@ -1264,19 +1258,19 @@
 	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
 	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
 	 */
-	ata_chk_status(ap);
+	ap->ops->sff_check_status(ap);
 
 	bfin_irq_clear(ap);
 }
 
 /**
- *	bfin_bmdma_thaw - Thaw DMA controller port
+ *	bfin_thaw - Thaw DMA controller port
  *	@ap: port to thaw
  *
- *	Note: Original code is ata_bmdma_thaw().
+ *	Note: Original code is ata_sff_thaw().
  */
 
-void bfin_bmdma_thaw(struct ata_port *ap)
+void bfin_thaw(struct ata_port *ap)
 {
 	bfin_check_status(ap);
 	bfin_irq_clear(ap);
@@ -1284,14 +1278,14 @@
 }
 
 /**
- *	bfin_std_postreset - standard postreset callback
+ *	bfin_postreset - standard postreset callback
  *	@ap: the target ata_port
  *	@classes: classes of attached devices
  *
- *	Note: Original code is ata_std_postreset().
+ *	Note: Original code is ata_sff_postreset().
  */
 
-static void bfin_std_postreset(struct ata_link *link, unsigned int *classes)
+static void bfin_postreset(struct ata_link *link, unsigned int *classes)
 {
 	struct ata_port *ap = link->ap;
 	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
@@ -1301,9 +1295,9 @@
 
 	/* is double-select really necessary? */
 	if (classes[0] != ATA_DEV_NONE)
-		bfin_std_dev_select(ap, 1);
+		bfin_dev_select(ap, 1);
 	if (classes[1] != ATA_DEV_NONE)
-		bfin_std_dev_select(ap, 0);
+		bfin_dev_select(ap, 0);
 
 	/* bail out if no device is present */
 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
@@ -1314,17 +1308,6 @@
 	write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
 }
 
-/**
- *	bfin_error_handler - Stock error handler for DMA controller
- *	@ap: port to handle error for
- */
-
-static void bfin_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, ata_std_prereset, bfin_std_softreset, NULL,
-			   bfin_std_postreset);
-}
-
 static void bfin_port_stop(struct ata_port *ap)
 {
 	dev_dbg(ap->dev, "in atapi port stop\n");
@@ -1357,51 +1340,40 @@
 }
 
 static struct scsi_host_template bfin_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
+	ATA_BASE_SHT(DRV_NAME),
 	.sg_tablesize		= SG_NONE,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
 static const struct ata_port_operations bfin_pata_ops = {
+	.inherits		= &ata_sff_port_ops,
+
 	.set_piomode		= bfin_set_piomode,
 	.set_dmamode		= bfin_set_dmamode,
 
-	.tf_load		= bfin_tf_load,
-	.tf_read		= bfin_tf_read,
-	.exec_command		= bfin_exec_command,
-	.check_status		= bfin_check_status,
-	.check_altstatus	= bfin_check_altstatus,
-	.dev_select		= bfin_std_dev_select,
+	.sff_tf_load		= bfin_tf_load,
+	.sff_tf_read		= bfin_tf_read,
+	.sff_exec_command	= bfin_exec_command,
+	.sff_check_status	= bfin_check_status,
+	.sff_check_altstatus	= bfin_check_altstatus,
+	.sff_dev_select		= bfin_dev_select,
 
 	.bmdma_setup		= bfin_bmdma_setup,
 	.bmdma_start		= bfin_bmdma_start,
 	.bmdma_stop		= bfin_bmdma_stop,
 	.bmdma_status		= bfin_bmdma_status,
-	.data_xfer		= bfin_data_xfer,
+	.sff_data_xfer		= bfin_data_xfer,
 
 	.qc_prep		= ata_noop_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
 
-	.freeze			= bfin_bmdma_freeze,
-	.thaw			= bfin_bmdma_thaw,
-	.error_handler		= bfin_error_handler,
+	.freeze			= bfin_freeze,
+	.thaw			= bfin_thaw,
+	.softreset		= bfin_softreset,
+	.postreset		= bfin_postreset,
 	.post_internal_cmd	= bfin_bmdma_stop,
 
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= bfin_irq_clear,
-	.irq_on			= bfin_irq_on,
+	.sff_irq_clear		= bfin_irq_clear,
+	.sff_irq_on		= bfin_irq_on,
 
 	.port_start		= bfin_port_start,
 	.port_stop		= bfin_port_stop,
@@ -1409,7 +1381,6 @@
 
 static struct ata_port_info bfin_port_info[] = {
 	{
-		.sht		= &bfin_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS
 				| ATA_FLAG_MMIO
 				| ATA_FLAG_NO_LEGACY,
@@ -1536,7 +1507,7 @@
 	}
 
 	if (ata_host_activate(host, platform_get_irq(pdev, 0),
-		ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
+		ata_sff_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
 		peripheral_free_list(atapi_io_port);
 		dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
 		return -ENODEV;
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 43d198f..2de30b9 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -107,8 +107,8 @@
 		pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover);
 	} else {
 		/* Save the shared timings for channel, they will be loaded
-		   by qc_issue_prot. Reloading the setup time is expensive
-		   so we keep a merged one loaded */
+		   by qc_issue. Reloading the setup time is expensive so we
+		   keep a merged one loaded */
 		pci_read_config_byte(pdev, ARTIM23, &reg);
 		reg &= 0x3F;
 		reg |= t.setup;
@@ -119,14 +119,14 @@
 
 
 /**
- *	cmd640_qc_issue_prot	-	command preparation hook
+ *	cmd640_qc_issue	-	command preparation hook
  *	@qc: Command to be issued
  *
  *	Channel 1 has shared timings. We must reprogram the
  *	clock each drive 2/3 switch we do.
  */
 
-static unsigned int cmd640_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int cmd640_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -137,7 +137,7 @@
 		pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]);
 		timing->last = adev->devno;
 	}
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 /**
@@ -166,53 +166,16 @@
 }
 
 static struct scsi_host_template cmd640_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations cmd640_port_ops = {
-	.set_piomode	= cmd640_set_piomode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_bmdma_port_ops,
+	/* In theory xfer_noirq is not needed once we kill the prefetcher */
+	.sff_data_xfer	= ata_sff_data_xfer_noirq,
+	.qc_issue	= cmd640_qc_issue,
 	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= cmd640_qc_issue_prot,
-
-	/* In theory this is not needed once we kill the prefetcher */
-	.data_xfer	= ata_data_xfer_noirq,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
+	.set_piomode	= cmd640_set_piomode,
 	.port_start	= cmd640_port_start,
 };
 
@@ -248,26 +211,36 @@
 static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &cmd640_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &cmd640_port_ops
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
 
 	cmd640_hardware_init(pdev);
-	return ata_pci_init_one(pdev, ppi);
+
+	return ata_pci_sff_init_one(pdev, ppi, &cmd640_sht, NULL);
 }
 
+#ifdef CONFIG_PM
 static int cmd640_reinit_one(struct pci_dev *pdev)
 {
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
 	cmd640_hardware_init(pdev);
-#ifdef CONFIG_PM
-	return ata_pci_device_resume(pdev);
-#else
+	ata_host_resume(host);
 	return 0;
-#endif
 }
+#endif
 
 static const struct pci_device_id cmd640[] = {
 	{ PCI_VDEVICE(CMD, 0x640), 0 },
@@ -281,8 +254,8 @@
 	.remove		= ata_pci_remove_one,
 #ifdef CONFIG_PM
 	.suspend	= ata_pci_device_suspend,
-#endif
 	.resume		= cmd640_reinit_one,
+#endif
 };
 
 static int __init cmd640_init(void)
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 7acbbd9..ddd09b7 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -266,120 +266,30 @@
 }
 
 static struct scsi_host_template cmd64x_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static const struct ata_port_operations cmd64x_base_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.set_piomode	= cmd64x_set_piomode,
+	.set_dmamode	= cmd64x_set_dmamode,
 };
 
 static struct ata_port_operations cmd64x_port_ops = {
-	.set_piomode	= cmd64x_set_piomode,
-	.set_dmamode	= cmd64x_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &cmd64x_base_ops,
 	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_port_start,
 };
 
 static struct ata_port_operations cmd646r1_port_ops = {
-	.set_piomode	= cmd64x_set_piomode,
-	.set_dmamode	= cmd64x_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
+	.inherits	= &cmd64x_base_ops,
 	.bmdma_stop	= cmd646r1_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_port_start,
+	.cable_detect	= ata_cable_40wire,
 };
 
 static struct ata_port_operations cmd648_port_ops = {
-	.set_piomode	= cmd64x_set_piomode,
-	.set_dmamode	= cmd64x_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= cmd648_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
+	.inherits	= &cmd64x_base_ops,
 	.bmdma_stop	= cmd648_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_port_start,
+	.cable_detect	= cmd648_cable_detect,
 };
 
 static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -388,21 +298,18 @@
 
 	static const struct ata_port_info cmd_info[6] = {
 		{	/* CMD 643 - no UDMA */
-			.sht = &cmd64x_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.port_ops = &cmd64x_port_ops
 		},
 		{	/* CMD 646 with broken UDMA */
-			.sht = &cmd64x_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.port_ops = &cmd64x_port_ops
 		},
 		{	/* CMD 646 with working UDMA */
-			.sht = &cmd64x_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -410,14 +317,12 @@
 			.port_ops = &cmd64x_port_ops
 		},
 		{	/* CMD 646 rev 1  */
-			.sht = &cmd64x_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.port_ops = &cmd646r1_port_ops
 		},
 		{	/* CMD 648 */
-			.sht = &cmd64x_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -425,7 +330,6 @@
 			.port_ops = &cmd648_port_ops
 		},
 		{	/* CMD 649 */
-			.sht = &cmd64x_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -435,12 +339,17 @@
 	};
 	const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
 	u8 mrdmode;
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
 
 	pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
 	class_rev &= 0xFF;
 
 	if (id->driver_data == 0)	/* 643 */
-		ata_pci_clear_simplex(pdev);
+		ata_pci_bmdma_clear_simplex(pdev);
 
 	if (pdev->device == PCI_DEVICE_ID_CMD_646) {
 		/* Does UDMA work ? */
@@ -464,13 +373,20 @@
 	pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
 #endif
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL);
 }
 
 #ifdef CONFIG_PM
 static int cmd64x_reinit_one(struct pci_dev *pdev)
 {
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
 	u8 mrdmode;
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
 	pci_read_config_byte(pdev, MRDMODE, &mrdmode);
 	mrdmode &= ~ 0x30;	/* IRQ set up */
@@ -479,7 +395,8 @@
 #ifdef CONFIG_PPC
 	pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
 #endif
-	return ata_pci_device_resume(pdev);
+	ata_host_resume(host);
+	return 0;
 }
 #endif
 
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 7ed279b..1186bcd 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -140,51 +140,16 @@
 }
 
 static struct scsi_host_template cs5520_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
+	ATA_BMDMA_SHT(DRV_NAME),
 	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
 static struct ata_port_operations cs5520_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.qc_prep		= ata_sff_dumb_qc_prep,
+	.cable_detect		= ata_cable_40wire,
 	.set_piomode		= cs5520_set_piomode,
 	.set_dmamode		= cs5520_set_dmamode,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= ata_cable_40wire,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_dumb_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
 };
 
 static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -203,6 +168,10 @@
 	struct ata_ioports *ioaddr;
 	int i, rc;
 
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
 	/* IDE port enable bits */
 	pci_read_config_byte(pdev, 0x60, &pcicfg);
 
@@ -258,7 +227,7 @@
 	ioaddr->ctl_addr = iomap[1];
 	ioaddr->altstatus_addr = iomap[1];
 	ioaddr->bmdma_addr = iomap[4];
-	ata_std_ports(ioaddr);
+	ata_sff_std_ports(ioaddr);
 
 	ata_port_desc(host->ports[0],
 		      "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
@@ -269,7 +238,7 @@
 	ioaddr->ctl_addr = iomap[3];
 	ioaddr->altstatus_addr = iomap[3];
 	ioaddr->bmdma_addr = iomap[4] + 8;
-	ata_std_ports(ioaddr);
+	ata_sff_std_ports(ioaddr);
 
 	ata_port_desc(host->ports[1],
 		      "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
@@ -289,7 +258,7 @@
 			continue;
 
 		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
-				      ata_interrupt, 0, DRV_NAME, host);
+				      ata_sff_interrupt, 0, DRV_NAME, host);
 		if (rc)
 			return rc;
 
@@ -310,11 +279,20 @@
 
 static int cs5520_reinit_one(struct pci_dev *pdev)
 {
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
 	u8 pcicfg;
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
 	pci_read_config_byte(pdev, 0x60, &pcicfg);
 	if ((pcicfg & 0x40) == 0)
 		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
-	return ata_pci_device_resume(pdev);
+
+	ata_host_resume(host);
+	return 0;
 }
 
 /**
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index e1818fd..744beeb 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -133,7 +133,7 @@
 }
 
 /**
- *	cs5530_qc_issue_prot	-	command issue
+ *	cs5530_qc_issue		-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
@@ -142,7 +142,7 @@
  *	one MWDMA/UDMA bit.
  */
 
-static unsigned int cs5530_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -157,59 +157,23 @@
 		    	cs5530_set_dmamode(ap, adev);
 	}
 
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static struct scsi_host_template cs5530_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
+	.sg_tablesize	= LIBATA_DUMB_MAX_PRD,
 };
 
 static struct ata_port_operations cs5530_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.qc_prep 	= ata_sff_dumb_qc_prep,
+	.qc_issue	= cs5530_qc_issue,
+
+	.cable_detect	= ata_cable_40wire,
 	.set_piomode	= cs5530_set_piomode,
 	.set_dmamode	= cs5530_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_dumb_qc_prep,
-	.qc_issue	= cs5530_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 static const struct dmi_system_id palmax_dmi_table[] = {
@@ -334,7 +298,6 @@
 static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &cs5530_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -343,12 +306,16 @@
 	};
 	/* The docking connector doesn't do UDMA, and it seems not MWDMA */
 	static const struct ata_port_info info_palmax_secondary = {
-		.sht = &cs5530_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &cs5530_port_ops
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
 
 	/* Chip initialisation */
 	if (cs5530_init_chip())
@@ -358,16 +325,25 @@
 		ppi[1] = &info_palmax_secondary;
 
 	/* Now kick off ATA set up */
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL);
 }
 
 #ifdef CONFIG_PM
 static int cs5530_reinit_one(struct pci_dev *pdev)
 {
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
 	/* If we fail on resume we are doomed */
 	if (cs5530_init_chip())
-		BUG();
-	return ata_pci_device_resume(pdev);
+		return -EIO;
+
+	ata_host_resume(host);
+	return 0;
 }
 #endif /* CONFIG_PM */
 
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 0132453..f1b6556 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -158,55 +158,14 @@
 }
 
 static struct scsi_host_template cs5535_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations cs5535_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= cs5535_cable_detect,
 	.set_piomode	= cs5535_set_piomode,
 	.set_dmamode	= cs5535_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= cs5535_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -222,7 +181,6 @@
 static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &cs5535_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -241,7 +199,7 @@
 	rdmsr(ATAC_CH0D1_PIO, timings, dummy);
 	if (CS5535_BAD_PIO(timings))
 		wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL);
 }
 
 static const struct pci_device_id cs5535[] = {
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 1c4ff9b..73f8332 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -221,55 +221,14 @@
 }
 
 static struct scsi_host_template cs5536_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations cs5536_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= cs5536_cable_detect,
 	.set_piomode		= cs5536_set_piomode,
 	.set_dmamode		= cs5536_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= cs5536_cable_detect,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_port_start,
 };
 
 /**
@@ -282,7 +241,6 @@
 static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &cs5536_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -303,7 +261,7 @@
 		return -ENODEV;
 	}
 
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL);
 }
 
 static const struct pci_device_id cs5536[] = {
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index fc5f9c4..a9c3218 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -110,61 +110,19 @@
 }
 
 static struct scsi_host_template cy82c693_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations cy82c693_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_40wire,
 	.set_piomode	= cy82c693_set_piomode,
 	.set_dmamode	= cy82c693_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &cy82c693_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -178,7 +136,7 @@
 	if (PCI_FUNC(pdev->devfn) != 1)
 		return -ENODEV;
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL);
 }
 
 static const struct pci_device_id cy82c693[] = {
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index dc33220..9fba829 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -45,20 +45,7 @@
 	if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	efar_probe_reset - Probe specified port on PATA host controller
- *	@ap: Port to probe
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void efar_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, efar_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -233,53 +220,15 @@
 }
 
 static struct scsi_host_template efar_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations efar_ops = {
+static struct ata_port_operations efar_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= efar_cable_detect,
 	.set_piomode		= efar_set_piomode,
 	.set_dmamode		= efar_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= efar_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= efar_cable_detect,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
+	.prereset		= efar_pre_reset,
 };
 
 
@@ -301,7 +250,6 @@
 {
 	static int printed_version;
 	static const struct ata_port_info info = {
-		.sht		= &efar_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma1-2 */
@@ -314,7 +262,7 @@
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "version " DRV_VERSION "\n");
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL);
 }
 
 static const struct pci_device_id efar_pci_tbl[] = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index a742efa..f2b83ea 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -184,7 +184,7 @@
 		if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
 			mask &= ~(0xF0 << ATA_SHIFT_UDMA);
 	}
-	return ata_pci_default_filter(adev, mask);
+	return ata_bmdma_mode_filter(adev, mask);
 }
 
 /**
@@ -290,21 +290,7 @@
 }
 
 static struct scsi_host_template hpt36x_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 /*
@@ -312,37 +298,11 @@
  */
 
 static struct ata_port_operations hpt366_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= hpt36x_cable_detect,
+	.mode_filter	= hpt366_filter,
 	.set_piomode	= hpt366_set_piomode,
 	.set_dmamode	= hpt366_set_dmamode,
-	.mode_filter	= hpt366_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= hpt36x_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -390,18 +350,22 @@
 static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info_hpt366 = {
-		.sht = &hpt36x_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = ATA_UDMA4,
 		.port_ops = &hpt366_port_ops
 	};
-	struct ata_port_info info = info_hpt366;
-	const struct ata_port_info *ppi[] = { &info, NULL };
+	const struct ata_port_info *ppi[] = { &info_hpt366, NULL };
 
+	void *hpriv = NULL;
 	u32 class_rev;
 	u32 reg1;
+	int rc;
+
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
 
 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
 	class_rev &= 0xFF;
@@ -419,24 +383,31 @@
 	/* info_hpt366 is safe against re-entry so we can scribble on it */
 	switch((reg1 & 0x700) >> 8) {
 		case 5:
-			info.private_data = &hpt366_40;
+			hpriv = &hpt366_40;
 			break;
 		case 9:
-			info.private_data = &hpt366_25;
+			hpriv = &hpt366_25;
 			break;
 		default:
-			info.private_data = &hpt366_33;
+			hpriv = &hpt366_33;
 			break;
 	}
 	/* Now kick off ATA set up */
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv);
 }
 
 #ifdef CONFIG_PM
 static int hpt36x_reinit_one(struct pci_dev *dev)
 {
+	struct ata_host *host = dev_get_drvdata(&dev->dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(dev);
+	if (rc)
+		return rc;
 	hpt36x_init_chipset(dev);
-	return ata_pci_device_resume(dev);
+	ata_host_resume(host);
+	return 0;
 }
 #endif
 
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 9a10878..4216399 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -283,7 +283,7 @@
 		if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
 			mask &= ~(0xE0 << ATA_SHIFT_UDMA);
 	}
-	return ata_pci_default_filter(adev, mask);
+	return ata_bmdma_mode_filter(adev, mask);
 }
 
 /**
@@ -299,7 +299,7 @@
 		if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
 			mask &= ~(0xE0 << ATA_SHIFT_UDMA);
 	}
-	return ata_pci_default_filter(adev, mask);
+	return ata_bmdma_mode_filter(adev, mask);
 }
 
 /**
@@ -338,22 +338,10 @@
 	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
 	udelay(100);
 
-	return ata_std_prereset(link, deadline);
+	return ata_sff_prereset(link, deadline);
 }
 
-/**
- *	hpt37x_error_handler	-	reset the hpt374
- *	@ap: ATA port to reset
- *
- *	Perform probe for HPT37x, except for HPT374 channel 2
- */
-
-static void hpt37x_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
-}
-
-static int hpt374_pre_reset(struct ata_link *link, unsigned long deadline)
+static int hpt374_fn1_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits hpt37x_enable_bits[] = {
 		{ 0x50, 1, 0x04, 0x04 },
@@ -386,26 +374,7 @@
 	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
 	udelay(100);
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	hpt374_error_handler	-	reset the hpt374
- *	@classes:
- *
- *	The 374 cable detect is a little different due to the extra
- *	channels. The function 0 channels work like usual but function 1
- *	is special
- */
-
-static void hpt374_error_handler(struct ata_port *ap)
-{
-	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-
-	if (!(PCI_FUNC(pdev->devfn) & 1))
-		hpt37x_error_handler(ap);
-	else
-		ata_bmdma_drive_eh(ap, hpt374_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -619,21 +588,7 @@
 
 
 static struct scsi_host_template hpt37x_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 /*
@@ -641,36 +596,15 @@
  */
 
 static struct ata_port_operations hpt370_port_ops = {
-	.set_piomode	= hpt370_set_piomode,
-	.set_dmamode	= hpt370_set_dmamode,
-	.mode_filter	= hpt370_filter,
+	.inherits	= &ata_bmdma_port_ops,
 
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= hpt37x_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-
-	.bmdma_setup 	= ata_bmdma_setup,
 	.bmdma_start 	= hpt370_bmdma_start,
 	.bmdma_stop	= hpt370_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
 
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.mode_filter	= hpt370_filter,
+	.set_piomode	= hpt370_set_piomode,
+	.set_dmamode	= hpt370_set_dmamode,
+	.prereset	= hpt37x_pre_reset,
 };
 
 /*
@@ -678,36 +612,8 @@
  */
 
 static struct ata_port_operations hpt370a_port_ops = {
-	.set_piomode	= hpt370_set_piomode,
-	.set_dmamode	= hpt370_set_dmamode,
+	.inherits	= &hpt370_port_ops,
 	.mode_filter	= hpt370a_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= hpt37x_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= hpt370_bmdma_start,
-	.bmdma_stop	= hpt370_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /*
@@ -716,74 +622,23 @@
  */
 
 static struct ata_port_operations hpt372_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.bmdma_stop	= hpt37x_bmdma_stop,
+
 	.set_piomode	= hpt372_set_piomode,
 	.set_dmamode	= hpt372_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= hpt37x_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= hpt37x_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.prereset	= hpt37x_pre_reset,
 };
 
 /*
  *	Configuration for HPT374. Mode setting works like 372 and friends
- *	but we have a different cable detection procedure.
+ *	but we have a different cable detection procedure for function 1.
  */
 
-static struct ata_port_operations hpt374_port_ops = {
-	.set_piomode	= hpt372_set_piomode,
-	.set_dmamode	= hpt372_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= hpt374_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= hpt37x_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+static struct ata_port_operations hpt374_fn1_port_ops = {
+	.inherits	= &hpt372_port_ops,
+	.prereset	= hpt374_fn1_pre_reset,
 };
 
 /**
@@ -897,7 +752,6 @@
 {
 	/* HPT370 - UDMA100 */
 	static const struct ata_port_info info_hpt370 = {
-		.sht = &hpt37x_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -906,7 +760,6 @@
 	};
 	/* HPT370A - UDMA100 */
 	static const struct ata_port_info info_hpt370a = {
-		.sht = &hpt37x_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -915,7 +768,6 @@
 	};
 	/* HPT370 - UDMA100 */
 	static const struct ata_port_info info_hpt370_33 = {
-		.sht = &hpt37x_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -924,7 +776,6 @@
 	};
 	/* HPT370A - UDMA100 */
 	static const struct ata_port_info info_hpt370a_33 = {
-		.sht = &hpt37x_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -933,28 +784,31 @@
 	};
 	/* HPT371, 372 and friends - UDMA133 */
 	static const struct ata_port_info info_hpt372 = {
-		.sht = &hpt37x_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = ATA_UDMA6,
 		.port_ops = &hpt372_port_ops
 	};
-	/* HPT374 - UDMA100 */
-	static const struct ata_port_info info_hpt374 = {
-		.sht = &hpt37x_sht,
+	/* HPT374 - UDMA100, function 1 uses different prereset method */
+	static const struct ata_port_info info_hpt374_fn0 = {
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = ATA_UDMA5,
-		.port_ops = &hpt374_port_ops
+		.port_ops = &hpt372_port_ops
+	};
+	static const struct ata_port_info info_hpt374_fn1 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &hpt374_fn1_port_ops
 	};
 
 	static const int MHz[4] = { 33, 40, 50, 66 };
-	const struct ata_port_info *port;
 	void *private_data = NULL;
-	struct ata_port_info port_info;
-	const struct ata_port_info *ppi[] = { &port_info, NULL };
+	const struct ata_port_info *ppi[] = { NULL, NULL };
 
 	u8 irqmask;
 	u32 class_rev;
@@ -966,6 +820,11 @@
 
 	const struct hpt_chip *chip_table;
 	int clock_slot;
+	int rc;
+
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
 
 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
 	class_rev &= 0xFF;
@@ -981,17 +840,17 @@
 
 		switch(class_rev) {
 			case 3:
-				port = &info_hpt370;
+				ppi[0] = &info_hpt370;
 				chip_table = &hpt370;
 				prefer_dpll = 0;
 				break;
 			case 4:
-				port = &info_hpt370a;
+				ppi[0] = &info_hpt370a;
 				chip_table = &hpt370a;
 				prefer_dpll = 0;
 				break;
 			case 5:
-				port = &info_hpt372;
+				ppi[0] = &info_hpt372;
 				chip_table = &hpt372;
 				break;
 			default:
@@ -1004,21 +863,21 @@
 				/* 372N if rev >= 2*/
 				if (class_rev >= 2)
 					return -ENODEV;
-				port = &info_hpt372;
+				ppi[0] = &info_hpt372;
 				chip_table = &hpt372a;
 				break;
 			case PCI_DEVICE_ID_TTI_HPT302:
 				/* 302N if rev > 1 */
 				if (class_rev > 1)
 					return -ENODEV;
-				port = &info_hpt372;
+				ppi[0] = &info_hpt372;
 				/* Check this */
 				chip_table = &hpt302;
 				break;
 			case PCI_DEVICE_ID_TTI_HPT371:
 				if (class_rev > 1)
 					return -ENODEV;
-				port = &info_hpt372;
+				ppi[0] = &info_hpt372;
 				chip_table = &hpt371;
 				/* Single channel device, master is not present
 				   but the BIOS (or us for non x86) must mark it
@@ -1029,7 +888,10 @@
 				break;
 			case PCI_DEVICE_ID_TTI_HPT374:
 				chip_table = &hpt374;
-				port = &info_hpt374;
+				if (!(PCI_FUNC(dev->devfn) & 1))
+					*ppi = &info_hpt374_fn0;
+				else
+					*ppi = &info_hpt374_fn1;
 				break;
 			default:
 				printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device);
@@ -1108,7 +970,7 @@
 		int dpll, adjust;
 
 		/* Compute DPLL */
-		dpll = (port->udma_mask & 0xC0) ? 3 : 2;
+		dpll = (ppi[0]->udma_mask & 0xC0) ? 3 : 2;
 
 		f_low = (MHz[clock_slot] * 48) / MHz[dpll];
 		f_high = f_low + 2;
@@ -1148,19 +1010,16 @@
 		 *	about lack of UDMA133 support on lower clocks
  		 */
 
-		if (clock_slot < 2 && port == &info_hpt370)
-			port = &info_hpt370_33;
-		if (clock_slot < 2 && port == &info_hpt370a)
-			port = &info_hpt370a_33;
+		if (clock_slot < 2 && ppi[0] == &info_hpt370)
+			ppi[0] = &info_hpt370_33;
+		if (clock_slot < 2 && ppi[0] == &info_hpt370a)
+			ppi[0] = &info_hpt370a_33;
 		printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n",
 		       chip_table->name, MHz[clock_slot]);
 	}
 
 	/* Now kick off ATA set up */
-	port_info = *port;
-	port_info.private_data = private_data;
-
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data);
 }
 
 static const struct pci_device_id hpt37x[] = {
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 9f1c084..d5c9fd7 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -148,7 +148,7 @@
  *	Reset the hardware and state machine,
  */
 
-static int hpt3xn_pre_reset(struct ata_link *link, unsigned long deadline)
+static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -156,19 +156,7 @@
 	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
 	udelay(100);
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	hpt3x2n_error_handler	-	probe the hpt3x2n bus
- *	@ap: ATA port to reset
- *
- *	Perform the probe reset handling for the 3x2N
- */
-
-static void hpt3x2n_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, hpt3xn_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -320,7 +308,7 @@
 	return 0;
 }
 
-static unsigned int hpt3x2n_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_taskfile *tf = &qc->tf;
 	struct ata_port *ap = qc->ap;
@@ -335,25 +323,11 @@
 				hpt3x2n_set_clock(ap, 0x23);
 		}
 	}
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static struct scsi_host_template hpt3x2n_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 /*
@@ -361,37 +335,15 @@
  */
 
 static struct ata_port_operations hpt3x2n_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.bmdma_stop	= hpt3x2n_bmdma_stop,
+	.qc_issue	= hpt3x2n_qc_issue,
+
+	.cable_detect	= hpt3x2n_cable_detect,
 	.set_piomode	= hpt3x2n_set_piomode,
 	.set_dmamode	= hpt3x2n_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= hpt3x2n_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= hpt3x2n_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= hpt3x2n_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= hpt3x2n_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.prereset	= hpt3x2n_pre_reset,
 };
 
 /**
@@ -488,15 +440,13 @@
 {
 	/* HPT372N and friends - UDMA133 */
 	static const struct ata_port_info info = {
-		.sht = &hpt3x2n_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = ATA_UDMA6,
 		.port_ops = &hpt3x2n_port_ops
 	};
-	struct ata_port_info port = info;
-	const struct ata_port_info *ppi[] = { &port, NULL };
+	const struct ata_port_info *ppi[] = { &info, NULL };
 
 	u8 irqmask;
 	u32 class_rev;
@@ -505,6 +455,12 @@
 	unsigned int f_low, f_high;
 	int adjust;
 	unsigned long iobase = pci_resource_start(dev, 4);
+	void *hpriv = NULL;
+	int rc;
+
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
 
 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
 	class_rev &= 0xFF;
@@ -586,9 +542,8 @@
 	       pci_mhz);
 	/* Set our private data up. We only need a few flags so we use
 	   it directly */
-	port.private_data = NULL;
 	if (pci_mhz > 60) {
-		port.private_data = (void *)PCI66;
+		hpriv = (void *)PCI66;
 		/*
 		 * On  HPT371N, if ATA clock is 66 MHz we must set bit 2 in
 		 * the MISC. register to stretch the UltraDMA Tss timing.
@@ -599,7 +554,7 @@
 	}
 
 	/* Now kick off ATA set up */
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv);
 }
 
 static const struct pci_device_id hpt3x2n[] = {
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index cb8bdb6..f11a320 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -102,58 +102,17 @@
 }
 
 static struct scsi_host_template hpt3x3_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations hpt3x3_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.check_atapi_dma= hpt3x3_atapi_dma,
+	.cable_detect	= ata_cable_40wire,
 	.set_piomode	= hpt3x3_set_piomode,
 #if defined(CONFIG_PATA_HPT3X3_DMA)
 	.set_dmamode	= hpt3x3_set_dmamode,
 #endif
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-	.check_atapi_dma= hpt3x3_atapi_dma,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -189,7 +148,6 @@
 {
 	static int printed_version;
 	static const struct ata_port_info info = {
-		.sht = &hpt3x3_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 #if defined(CONFIG_PATA_HPT3X3_DMA)
@@ -244,15 +202,15 @@
 		ioaddr->altstatus_addr =
 		ioaddr->ctl_addr = base + offset_ctl[i];
 		ioaddr->scr_addr = NULL;
-		ata_std_ports(ioaddr);
+		ata_sff_std_ports(ioaddr);
 		ioaddr->bmdma_addr = base + 8 * i;
 
 		ata_port_pbar_desc(ap, 4, -1, "ioport");
 		ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
 	}
 	pci_set_master(pdev);
-	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
-				 &hpt3x3_sht);
+	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+				 IRQF_SHARED, &hpt3x3_sht);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index f97068b..1713843 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -250,7 +250,7 @@
 	set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ);
 
 	/* issue r/w command */
-	ap->ops->exec_command(ap, &qc->tf);
+	ap->ops->sff_exec_command(ap, &qc->tf);
 }
 
 static void pata_icside_bmdma_start(struct ata_queued_cmd *qc)
@@ -270,7 +270,7 @@
 	disable_dma(state->dma);
 
 	/* see ata_bmdma_stop */
-	ata_altstatus(ap);
+	ata_sff_altstatus(ap);
 }
 
 static u8 pata_icside_bmdma_status(struct ata_port *ap)
@@ -305,35 +305,18 @@
 
 
 static struct scsi_host_template pata_icside_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
+	ATA_BASE_SHT(DRV_NAME),
 	.sg_tablesize		= PATA_ICSIDE_MAX_SG,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= ~0, /* no dma boundaries */
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
-/* wish this was exported from libata-core */
-static void ata_dummy_noret(struct ata_port *port)
-{
-}
-
 static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
 {
 	struct ata_port *ap = link->ap;
 	struct pata_icside_state *state = ap->host->private_data;
 
 	if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE)
-		return ata_std_postreset(link, classes);
+		return ata_sff_postreset(link, classes);
 
 	state->port[ap->port_no].disabled = 1;
 
@@ -349,42 +332,20 @@
 	}
 }
 
-static void pata_icside_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL,
-			   pata_icside_postreset);
-}
-
 static struct ata_port_operations pata_icside_port_ops = {
-	.set_dmamode		= pata_icside_set_dmamode,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.exec_command		= ata_exec_command,
-	.check_status		= ata_check_status,
-	.dev_select		= ata_std_dev_select,
-
-	.cable_detect		= ata_cable_40wire,
-
-	.bmdma_setup		= pata_icside_bmdma_setup,
-	.bmdma_start		= pata_icside_bmdma_start,
-
-	.data_xfer		= ata_data_xfer_noirq,
-
+	.inherits		= &ata_sff_port_ops,
 	/* no need to build any PRD tables for DMA */
 	.qc_prep		= ata_noop_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= pata_icside_error_handler,
-	.post_internal_cmd	= pata_icside_bmdma_stop,
-
-	.irq_clear		= ata_dummy_noret,
-	.irq_on			= ata_irq_on,
-
+	.sff_data_xfer		= ata_sff_data_xfer_noirq,
+	.bmdma_setup		= pata_icside_bmdma_setup,
+	.bmdma_start		= pata_icside_bmdma_start,
 	.bmdma_stop		= pata_icside_bmdma_stop,
 	.bmdma_status		= pata_icside_bmdma_status,
+
+	.cable_detect		= ata_cable_40wire,
+	.set_dmamode		= pata_icside_set_dmamode,
+	.postreset		= pata_icside_postreset,
+	.post_internal_cmd	= pata_icside_bmdma_stop,
 };
 
 static void __devinit
@@ -520,7 +481,7 @@
 		pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
 	}
 
-	return ata_host_activate(host, ec->irq, ata_interrupt, 0,
+	return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0,
 				 &pata_icside_sht);
 }
 
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 4320e79..6a111ba 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -20,45 +20,12 @@
 #define DRV_VERSION "0.2.2"
 
 static struct scsi_host_template isapnp_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations isapnp_port_ops = {
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
 	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -83,7 +50,7 @@
 
 	if (pnp_irq_valid(idev, 0)) {
 		irq = pnp_irq(idev, 0);
-		handler = ata_interrupt;
+		handler = ata_sff_interrupt;
 	}
 
 	/* allocate host */
@@ -111,7 +78,7 @@
 		ap->ioaddr.ctl_addr = ctl_addr;
 	}
 
-	ata_std_ports(&ap->ioaddr);
+	ata_sff_std_ports(&ap->ioaddr);
 
 	ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
 		      (unsigned long long)pnp_port_start(idev, 0),
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index e0c2cc2..c113d7c 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -40,20 +40,7 @@
 	if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	it8213_error_handler - Probe specified port on PATA host controller
- *	@ap: Port to probe
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void it8213_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, it8213_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -243,53 +230,16 @@
 }
 
 static struct scsi_host_template it8213_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations it8213_ops = {
+
+static struct ata_port_operations it8213_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= it8213_cable_detect,
 	.set_piomode		= it8213_set_piomode,
 	.set_dmamode		= it8213_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= it8213_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= it8213_cable_detect,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
+	.prereset		= it8213_pre_reset,
 };
 
 
@@ -311,7 +261,6 @@
 {
 	static int printed_version;
 	static const struct ata_port_info info = {
-		.sht		= &it8213_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -325,7 +274,7 @@
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "version " DRV_VERSION "\n");
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL);
 }
 
 static const struct pci_device_id it8213_pci_tbl[] = {
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 257951d..e108169 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -395,11 +395,11 @@
 		it821x_program(ap, adev, itdev->pio[adev->devno]);
 		itdev->last_device = device;
 	}
-	ata_std_dev_select(ap, device);
+	ata_sff_dev_select(ap, device);
 }
 
 /**
- *	it821x_smart_qc_issue_prot	-	wrap qc issue prot
+ *	it821x_smart_qc_issue		-	wrap qc issue prot
  *	@qc: command
  *
  *	Wrap the command issue sequence for the IT821x. We need to
@@ -407,7 +407,7 @@
  *	usual happenings kick off
  */
 
-static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
 {
 	switch(qc->tf.command)
 	{
@@ -427,14 +427,14 @@
 		case ATA_CMD_ID_ATA:
 		/* Arguably should just no-op this one */
 		case ATA_CMD_SET_FEATURES:
-			return ata_qc_issue_prot(qc);
+			return ata_sff_qc_issue(qc);
 	}
 	printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
 	return AC_ERR_DEV;
 }
 
 /**
- *	it821x_passthru_qc_issue_prot	-	wrap qc issue prot
+ *	it821x_passthru_qc_issue	-	wrap qc issue prot
  *	@qc: command
  *
  *	Wrap the command issue sequence for the IT821x. We need to
@@ -442,10 +442,10 @@
  *	usual happenings kick off
  */
 
-static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc)
 {
 	it821x_passthru_dev_select(qc->ap, qc->dev->devno);
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 /**
@@ -632,89 +632,34 @@
 }
 
 static struct scsi_host_template it821x_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations it821x_smart_port_ops = {
-	.set_mode	= it821x_smart_set_mode,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.mode_filter	= ata_pci_default_filter,
+	.inherits	= &ata_bmdma_port_ops,
 
-	.check_status 	= ata_check_status,
 	.check_atapi_dma= it821x_check_atapi_dma,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-	.dev_config	= it821x_dev_config,
+	.qc_issue	= it821x_smart_qc_issue,
 
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
 	.cable_detect	= it821x_ident_hack,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= it821x_smart_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
+	.set_mode	= it821x_smart_set_mode,
+	.dev_config	= it821x_dev_config,
 
 	.port_start	= it821x_port_start,
 };
 
 static struct ata_port_operations it821x_passthru_port_ops = {
-	.set_piomode	= it821x_passthru_set_piomode,
-	.set_dmamode	= it821x_passthru_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
+	.inherits	= &ata_bmdma_port_ops,
 
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
 	.check_atapi_dma= it821x_check_atapi_dma,
-	.dev_select 	= it821x_passthru_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_unknown,
-
-	.bmdma_setup 	= ata_bmdma_setup,
+	.sff_dev_select	= it821x_passthru_dev_select,
 	.bmdma_start 	= it821x_passthru_bmdma_start,
 	.bmdma_stop	= it821x_passthru_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
+	.qc_issue	= it821x_passthru_qc_issue,
 
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= it821x_passthru_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_handler	= ata_interrupt,
-	.irq_on		= ata_irq_on,
+	.cable_detect	= ata_cable_unknown,
+	.set_piomode	= it821x_passthru_set_piomode,
+	.set_dmamode	= it821x_passthru_set_dmamode,
 
 	.port_start	= it821x_port_start,
 };
@@ -742,14 +687,12 @@
 	u8 conf;
 
 	static const struct ata_port_info info_smart = {
-		.sht = &it821x_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &it821x_smart_port_ops
 	};
 	static const struct ata_port_info info_passthru = {
-		.sht = &it821x_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -759,6 +702,11 @@
 
 	const struct ata_port_info *ppi[] = { NULL, NULL };
 	static char *mode[2] = { "pass through", "smart" };
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
 
 	/* Force the card into bypass mode if so requested */
 	if (it8212_noraid) {
@@ -774,16 +722,23 @@
 	else
 		ppi[0] = &info_smart;
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL);
 }
 
 #ifdef CONFIG_PM
 static int it821x_reinit_one(struct pci_dev *pdev)
 {
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
 	/* Resume - turn raid back off if need be */
 	if (it8212_noraid)
 		it821x_disable_raid(pdev);
-	return ata_pci_device_resume(pdev);
+	ata_host_resume(host);
+	return rc;
 }
 #endif
 
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 030878f..8a175f2 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -88,48 +88,14 @@
 }
 
 static struct scsi_host_template ixp4xx_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations ixp4xx_port_ops = {
-	.set_mode		= ixp4xx_set_mode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.exec_command		= ata_exec_command,
-	.check_status 		= ata_check_status,
-	.dev_select 		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-
-	.qc_prep 		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ixp4xx_mmio_data_xfer,
+	.inherits		= &ata_sff_port_ops,
+	.sff_data_xfer		= ixp4xx_mmio_data_xfer,
 	.cable_detect		= ata_cable_40wire,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_port_start,
+	.set_mode		= ixp4xx_set_mode,
 };
 
 static void ixp4xx_setup_port(struct ata_port *ap,
@@ -144,7 +110,7 @@
 	ioaddr->altstatus_addr	= data->cs1 + 0x06;
 	ioaddr->ctl_addr	= data->cs1 + 0x06;
 
-	ata_std_ports(ioaddr);
+	ata_sff_std_ports(ioaddr);
 
 #ifndef __ARMEB__
 
@@ -220,7 +186,7 @@
 	dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
 
 	/* activate host */
-	return ata_host_activate(host, irq, ata_interrupt, 0, &ixp4xx_sht);
+	return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
 }
 
 static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 00bbbbd..73b7596 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -102,73 +102,18 @@
 		ap->cbl = ATA_CBL_SATA;
 		break;
 	}
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	jmicron_error_handler - Setup and error handler
- *	@ap: Port to handle
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void jmicron_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL,
-			   ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /* No PIO or DMA methods needed for this device */
 
 static struct scsi_host_template jmicron_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	/* Use standard CHS mapping rules */
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations jmicron_ops = {
-	/* Task file is PCI ATA format, use helpers */
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= jmicron_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-
-	/* BMDMA handling is PCI ATA format, use helpers */
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	/* IRQ-related hooks */
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	/* Generic PATA PCI ATA helpers */
-	.port_start		= ata_port_start,
+static struct ata_port_operations jmicron_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.prereset		= jmicron_pre_reset,
 };
 
 
@@ -189,7 +134,6 @@
 static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht		= &jmicron_sht,
 		.flags	= ATA_FLAG_SLAVE_POSS,
 
 		.pio_mask	= 0x1f,
@@ -200,7 +144,7 @@
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL);
 }
 
 static const struct pci_device_id jmicron_pci_tbl[] = {
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 50fe08e..7af4b29 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -208,21 +208,12 @@
 }
 
 static struct scsi_host_template legacy_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static const struct ata_port_operations legacy_base_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.cable_detect	= ata_cable_40wire,
 };
 
 /*
@@ -234,55 +225,14 @@
  */
 
 static struct ata_port_operations simple_port_ops = {
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer_noirq,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.inherits	= &legacy_base_port_ops,
+	.sff_data_xfer	= ata_sff_data_xfer_noirq,
 };
 
 static struct ata_port_operations legacy_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.sff_data_xfer	= ata_sff_data_xfer_noirq,
 	.set_mode	= legacy_set_mode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-	.cable_detect	= ata_cable_40wire,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer_noirq,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /*
@@ -367,36 +317,15 @@
 		}
 		local_irq_restore(flags);
 	} else
-		buflen = ata_data_xfer_noirq(dev, buf, buflen, rw);
+		buflen = ata_sff_data_xfer_noirq(dev, buf, buflen, rw);
 
 	return buflen;
 }
 
 static struct ata_port_operations pdc20230_port_ops = {
+	.inherits	= &legacy_base_port_ops,
 	.set_piomode	= pdc20230_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= pdc_data_xfer_vlb,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.sff_data_xfer	= pdc_data_xfer_vlb,
 };
 
 /*
@@ -427,30 +356,8 @@
 }
 
 static struct ata_port_operations ht6560a_port_ops = {
+	.inherits	= &legacy_base_port_ops,
 	.set_piomode	= ht6560a_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,	/* Check vlb/noirq */
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /*
@@ -492,30 +399,8 @@
 }
 
 static struct ata_port_operations ht6560b_port_ops = {
+	.inherits	= &legacy_base_port_ops,
 	.set_piomode	= ht6560b_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,    /* FIXME: Check 32bit and noirq */
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /*
@@ -613,30 +498,8 @@
 
 
 static struct ata_port_operations opti82c611a_port_ops = {
+	.inherits	= &legacy_base_port_ops,
 	.set_piomode	= opti82c611a_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /*
@@ -716,7 +579,7 @@
 }
 
 /**
- *	opt82c465mv_qc_issue_prot	-	command issue
+ *	opt82c465mv_qc_issue		-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
@@ -730,7 +593,7 @@
  *	FIXME: dual channel needs ->serialize support
  */
 
-static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -741,34 +604,13 @@
 	    && ap->host->private_data != NULL)
 		opti82c46x_set_piomode(ap, adev);
 
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static struct ata_port_operations opti82c46x_port_ops = {
+	.inherits	= &legacy_base_port_ops,
 	.set_piomode	= opti82c46x_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= opti82c46x_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.qc_issue	= opti82c46x_qc_issue,
 };
 
 static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
@@ -802,7 +644,7 @@
  *	@irq: interrupt line
  *
  *	In dual channel mode the 6580 has one clock per channel and we have
- *	to software clockswitch in qc_issue_prot.
+ *	to software clockswitch in qc_issue.
  */
 
 static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
@@ -868,14 +710,14 @@
 }
 
 /**
- *	qdi_qc_issue_prot	-	command issue
+ *	qdi_qc_issue		-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
  *	this interface so that we can load the correct ATA timings.
  */
 
-static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -888,7 +730,7 @@
 							2 * ap->port_no);
 		}
 	}
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
@@ -917,7 +759,7 @@
 		}
 		return (buflen + 3) & ~3;
 	} else
-		return ata_data_xfer(adev, buf, buflen, rw);
+		return ata_sff_data_xfer(adev, buf, buflen, rw);
 }
 
 static int qdi_port(struct platform_device *dev,
@@ -930,84 +772,22 @@
 }
 
 static struct ata_port_operations qdi6500_port_ops = {
+	.inherits	= &legacy_base_port_ops,
 	.set_piomode	= qdi6500_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= qdi_qc_issue_prot,
-
-	.data_xfer	= vlb32_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.qc_issue	= qdi_qc_issue,
+	.sff_data_xfer	= vlb32_data_xfer,
 };
 
 static struct ata_port_operations qdi6580_port_ops = {
+	.inherits	= &legacy_base_port_ops,
 	.set_piomode	= qdi6580_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= vlb32_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.sff_data_xfer	= vlb32_data_xfer,
 };
 
 static struct ata_port_operations qdi6580dp_port_ops = {
+	.inherits	= &legacy_base_port_ops,
 	.set_piomode	= qdi6580dp_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= qdi_qc_issue_prot,
-
-	.data_xfer	= vlb32_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.sff_data_xfer	= vlb32_data_xfer,
 };
 
 static DEFINE_SPINLOCK(winbond_lock);
@@ -1076,29 +856,9 @@
 }
 
 static struct ata_port_operations winbond_port_ops = {
+	.inherits	= &legacy_base_port_ops,
 	.set_piomode	= winbond_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= vlb32_data_xfer,
-
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.sff_data_xfer	= vlb32_data_xfer,
 };
 
 static struct legacy_controller controllers[] = {
@@ -1256,13 +1016,13 @@
 	ap->ioaddr.cmd_addr = io_addr;
 	ap->ioaddr.altstatus_addr = ctrl_addr;
 	ap->ioaddr.ctl_addr = ctrl_addr;
-	ata_std_ports(&ap->ioaddr);
+	ata_sff_std_ports(&ap->ioaddr);
 	ap->host->private_data = ld;
 
 	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206);
 
-	ret = ata_host_activate(host, probe->irq, ata_interrupt, 0,
-								&legacy_sht);
+	ret = ata_host_activate(host, probe->irq, ata_sff_interrupt, 0,
+				&legacy_sht);
 	if (ret)
 		goto fail;
 	ld->platform_dev = pdev;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index a81f25d..24a011b 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -55,7 +55,7 @@
 	    (!(devices & 0x10)))	/* PATA enable ? */
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
+	return ata_sff_prereset(link, deadline);
 }
 
 static int marvell_cable_detect(struct ata_port *ap)
@@ -75,71 +75,16 @@
 	return 0;	/* Our BUG macro needs the right markup */
 }
 
-/**
- *	marvell_error_handler - Setup and error handler
- *	@ap: Port to handle
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void marvell_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset, NULL,
-			   ata_std_postreset);
-}
-
 /* No PIO or DMA methods needed for this device */
 
 static struct scsi_host_template marvell_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	/* Use standard CHS mapping rules */
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations marvell_ops = {
-	/* Task file is PCI ATA format, use helpers */
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= marvell_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+static struct ata_port_operations marvell_ops = {
+	.inherits		= &ata_bmdma_port_ops,
 	.cable_detect		= marvell_cable_detect,
-
-	/* BMDMA handling is PCI ATA format, use helpers */
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	/* Timeout handling */
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	/* Generic PATA PCI ATA helpers */
-	.port_start		= ata_sff_port_start,
+	.prereset		= marvell_pre_reset,
 };
 
 
@@ -160,7 +105,6 @@
 static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht		= &marvell_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 
 		.pio_mask	= 0x1f,
@@ -170,7 +114,6 @@
 		.port_ops	= &marvell_ops,
 	};
 	static const struct ata_port_info info_sata = {
-		.sht		= &marvell_sht,
 		/* Slave possible as its magically mapped not real */
 		.flags		= ATA_FLAG_SLAVE_POSS,
 
@@ -185,7 +128,7 @@
 	if (pdev->device == 0x6101)
 		ppi[1] = &ata_dummy_port_info;
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL);
 }
 
 static const struct pci_device_id marvell_pci_tbl[] = {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 5413ebf..bc79df6 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -252,53 +252,19 @@
 	if (device != priv->csel)
 		mpc52xx_ata_apply_timings(priv, device);
 
-	ata_std_dev_select(ap,device);
+	ata_sff_dev_select(ap,device);
 }
 
-static void
-mpc52xx_ata_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL,
-			ata_std_postreset);
-}
-
-
-
 static struct scsi_host_template mpc52xx_ata_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations mpc52xx_ata_port_ops = {
-	.set_piomode		= mpc52xx_ata_set_piomode,
-	.dev_select		= mpc52xx_ata_dev_select,
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= mpc52xx_ata_error_handler,
+	.inherits		= &ata_sff_port_ops,
+	.sff_dev_select		= mpc52xx_ata_dev_select,
 	.cable_detect		= ata_cable_40wire,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-	.port_start		= ata_port_start,
+	.set_piomode		= mpc52xx_ata_set_piomode,
+	.post_internal_cmd	= ATA_OP_NULL,
 };
 
 static int __devinit
@@ -339,7 +305,7 @@
 	ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
 
 	/* activate host */
-	return ata_host_activate(host, priv->ata_irq, ata_interrupt, 0,
+	return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0,
 				 &mpc52xx_ata_sht);
 }
 
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index c0d9e0c..7d7e3fd 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -55,21 +55,7 @@
 	if (!pci_test_config_bits(pdev, &mpiix_enable_bits))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	mpiix_error_handler		-	probe reset
- *	@ap: ATA port
- *
- *	Perform the ATA probe and bus reset sequence plus specific handling
- *	for this hardware. The MPIIX has the enable bits in a different place
- *	to PIIX4 and friends. As a pure PIO device it has no cable detect
- */
-
-static void mpiix_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, mpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -83,8 +69,8 @@
  *
  *	This would get very ugly because we can only program timing for one
  *	device at a time, the other gets PIO0. Fortunately libata calls
- *	our qc_issue_prot command before a command is issued so we can
- *	flip the timings back and forth to reduce the pain.
+ *	our qc_issue command before a command is issued so we can flip the
+ *	timings back and forth to reduce the pain.
  */
 
 static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
@@ -124,7 +110,7 @@
 }
 
 /**
- *	mpiix_qc_issue_prot	-	command issue
+ *	mpiix_qc_issue		-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
@@ -134,7 +120,7 @@
  *	be made PIO0.
  */
 
-static unsigned int mpiix_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -147,50 +133,19 @@
 	if (adev->pio_mode && adev != ap->private_data)
 		mpiix_set_piomode(ap, adev);
 
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static struct scsi_host_template mpiix_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations mpiix_port_ops = {
-	.set_piomode	= mpiix_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= mpiix_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
+	.qc_issue	= mpiix_qc_issue,
 	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= mpiix_qc_issue_prot,
-	.data_xfer	= ata_data_xfer,
-
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_piomode	= mpiix_set_piomode,
+	.prereset	= mpiix_pre_reset,
 };
 
 static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
@@ -252,10 +207,10 @@
 	ap->ioaddr.altstatus_addr = ctl_addr;
 
 	/* Let libata fill in the port details */
-	ata_std_ports(&ap->ioaddr);
+	ata_sff_std_ports(&ap->ioaddr);
 
 	/* activate host */
-	return ata_host_activate(host, irq, ata_interrupt, IRQF_SHARED,
+	return ata_host_activate(host, irq, ata_sff_interrupt, IRQF_SHARED,
 				 &mpiix_sht);
 }
 
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 25c922a..d9719c8 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -21,54 +21,12 @@
 /* No PIO or DMA methods needed for this device */
 
 static struct scsi_host_template netcell_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	/* Use standard CHS mapping rules */
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations netcell_ops = {
-	/* Task file is PCI ATA format, use helpers */
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+static struct ata_port_operations netcell_ops = {
+	.inherits	= &ata_bmdma_port_ops,
 	.cable_detect		= ata_cable_80wire,
-
-	/* BMDMA handling is PCI ATA format, use helpers */
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	/* IRQ-related hooks */
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	/* Generic PATA PCI ATA helpers */
-	.port_start		= ata_sff_port_start,
 };
 
 
@@ -90,7 +48,6 @@
 {
 	static int printed_version;
 	static const struct ata_port_info info = {
-		.sht		= &netcell_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		/* Actually we don't really care about these as the
 		   firmware deals with it */
@@ -100,16 +57,21 @@
 		.port_ops	= &netcell_ops,
 	};
 	const struct ata_port_info *port_info[] = { &info, NULL };
+	int rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "version " DRV_VERSION "\n");
 
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
 	/* Any chip specific setup/optimisation/messages here */
-	ata_pci_clear_simplex(pdev);
+	ata_pci_bmdma_clear_simplex(pdev);
 
 	/* And let the library code do the work */
-	return ata_pci_init_one(pdev, port_info);
+	return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL);
 }
 
 static const struct pci_device_id netcell_pci_tbl[] = {
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 15dd649..565e67c 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -73,60 +73,20 @@
 	struct ata_device *adev = &ap->link.device[device];
 	if (ap->private_data != adev) {
 		iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f);
-		ata_std_dev_select(ap, device);
+		ata_sff_dev_select(ap, device);
 		ninja32_set_piomode(ap, adev);
 	}
 }
 
 static struct scsi_host_template ninja32_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations ninja32_port_ops = {
-	.set_piomode	= ninja32_set_piomode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ninja32_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_bmdma_port_ops,
+	.sff_dev_select = ninja32_dev_select,
 	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_piomode	= ninja32_set_piomode,
 };
 
 static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
@@ -172,7 +132,7 @@
 	ap->ioaddr.ctl_addr = base + 0x1E;
 	ap->ioaddr.altstatus_addr = base + 0x1E;
 	ap->ioaddr.bmdma_addr = base;
-	ata_std_ports(&ap->ioaddr);
+	ata_sff_std_ports(&ap->ioaddr);
 
 	iowrite8(0x05, base + 0x01);	/* Enable interrupt lines */
 	iowrite8(0xBE, base + 0x02);	/* Burst, ?? setup */
@@ -182,7 +142,7 @@
 	iowrite8(0xa4, base + 0x1c);	/* Unknown */
 	iowrite8(0x83, base + 0x1d);	/* BMDMA control: WAIT0 */
 	/* FIXME: Should we disable them at remove ? */
-	return ata_host_activate(host, dev->irq, ata_interrupt,
+	return ata_host_activate(host, dev->irq, ata_sff_interrupt,
 				 IRQF_SHARED, &ninja32_sht);
 }
 
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 9fe66fd..76d2455 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -50,21 +50,7 @@
 	if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	ns87410_error_handler		-	probe reset
- *	@ap: ATA port
- *
- *	Perform the ATA probe and bus reset sequence plus specific handling
- *	for this hardware. The MPIIX has the enable bits in a different place
- *	to PIIX4 and friends. As a pure PIO device it has no cable detect
- */
-
-static void ns87410_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, ns87410_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -119,7 +105,7 @@
 }
 
 /**
- *	ns87410_qc_issue_prot	-	command issue
+ *	ns87410_qc_issue	-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
@@ -127,7 +113,7 @@
  *	necessary.
  */
 
-static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int ns87410_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -140,64 +126,30 @@
 	if (adev->pio_mode && adev != ap->private_data)
 		ns87410_set_piomode(ap, adev);
 
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static struct scsi_host_template ns87410_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations ns87410_port_ops = {
-	.set_piomode	= ns87410_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ns87410_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
+	.qc_issue	= ns87410_qc_issue,
 	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ns87410_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_piomode	= ns87410_set_piomode,
+	.prereset	= ns87410_pre_reset,
 };
 
 static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &ns87410_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x0F,
 		.port_ops = &ns87410_port_ops
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &ns87410_sht, NULL);
 }
 
 static const struct pci_device_id ns87410[] = {
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index d0e2e50..ae92b00 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -138,7 +138,7 @@
 		dmactl |= ATA_DMA_WR;
 	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 	/* issue r/w command */
-	ap->ops->exec_command(ap, &qc->tf);
+	ap->ops->sff_exec_command(ap, &qc->tf);
 }
 
 /**
@@ -172,14 +172,14 @@
 }
 
 /**
- *	ns87415_bmdma_irq_clear		-	Clear interrupt
+ *	ns87415_irq_clear		-	Clear interrupt
  *	@ap: Channel to clear
  *
  *	Erratum: Due to a chip bug regisers 02 and 0A bit 1 and 2 (the
  *	error bits) are reset by writing to register 00 or 08.
  */
 
-static void ns87415_bmdma_irq_clear(struct ata_port *ap)
+static void ns87415_irq_clear(struct ata_port *ap)
 {
 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
 
@@ -297,90 +297,32 @@
 {
 	return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
 }
-
-static const struct ata_port_operations ns87560_pata_ops = {
-	.set_piomode		= ns87415_set_piomode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ns87560_tf_read,
-	.check_status		= ns87560_check_status,
-	.check_atapi_dma	= ns87415_check_atapi_dma,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= ata_cable_40wire,
-
-	.bmdma_setup		= ns87415_bmdma_setup,
-	.bmdma_start		= ns87415_bmdma_start,
-	.bmdma_stop		= ns87415_bmdma_stop,
-	.bmdma_status		= ns87560_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ns87415_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
-};
-
 #endif		/* 87560 SuperIO Support */
 
+static struct ata_port_operations ns87415_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
 
-static const struct ata_port_operations ns87415_pata_ops = {
-	.set_piomode		= ns87415_set_piomode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
 	.check_atapi_dma	= ns87415_check_atapi_dma,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= ata_cable_40wire,
-
 	.bmdma_setup		= ns87415_bmdma_setup,
 	.bmdma_start		= ns87415_bmdma_start,
 	.bmdma_stop		= ns87415_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
+	.sff_irq_clear		= ns87415_irq_clear,
 
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ns87415_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= ns87415_set_piomode,
 };
 
+#if defined(CONFIG_SUPERIO)
+static struct ata_port_operations ns87560_pata_ops = {
+	.inherits		= &ns87415_pata_ops,
+	.sff_tf_read		= ns87560_tf_read,
+	.sff_check_status	= ns87560_check_status,
+	.bmdma_status		= ns87560_bmdma_status,
+};
+#endif
+
 static struct scsi_host_template ns87415_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 
@@ -403,16 +345,15 @@
 {
 	static int printed_version;
 	static const struct ata_port_info info = {
-		.sht		= &ns87415_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.port_ops	= &ns87415_pata_ops,
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
+	int rc;
 #if defined(CONFIG_SUPERIO)
 	static const struct ata_port_info info87560 = {
-		.sht		= &ns87415_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -425,11 +366,16 @@
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "version " DRV_VERSION "\n");
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
 	/* Select 512 byte sectors */
 	pci_write_config_byte(pdev, 0x55, 0xEE);
 	/* Select PIO0 8bit clocking */
 	pci_write_config_byte(pdev, 0x54, 0xB7);
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL);
 }
 
 static const struct pci_device_id ns87415_pci_tbl[] = {
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 44da09a..e678af3 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -47,21 +47,7 @@
 	if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	oldpiix_pata_error_handler - Probe specified port on PATA host controller
- *	@ap: Port to probe
- *	@classes:
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void oldpiix_pata_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, oldpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -195,7 +181,7 @@
 }
 
 /**
- *	oldpiix_qc_issue_prot	-	command issue
+ *	oldpiix_qc_issue	-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
@@ -205,7 +191,7 @@
  *	be made PIO0.
  */
 
-static unsigned int oldpiix_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -215,58 +201,21 @@
 		if (adev->dma_mode)
 			oldpiix_set_dmamode(ap, adev);
 	}
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 
 static struct scsi_host_template oldpiix_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations oldpiix_pata_ops = {
+static struct ata_port_operations oldpiix_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.qc_issue		= oldpiix_qc_issue,
+	.cable_detect		= ata_cable_40wire,
 	.set_piomode		= oldpiix_set_piomode,
 	.set_dmamode		= oldpiix_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= oldpiix_pata_error_handler,
-	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= ata_cable_40wire,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= oldpiix_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
+	.prereset		= oldpiix_pre_reset,
 };
 
 
@@ -289,7 +238,6 @@
 {
 	static int printed_version;
 	static const struct ata_port_info info = {
-		.sht		= &oldpiix_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma1-2 */
@@ -301,7 +249,7 @@
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "version " DRV_VERSION "\n");
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL);
 }
 
 static const struct pci_device_id oldpiix_pci_tbl[] = {
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 8f79447..fb2cf66 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -64,22 +64,7 @@
 	if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	opti_probe_reset		-	probe reset
- *	@ap: ATA port
- *
- *	Perform the ATA probe and bus reset sequence plus specific handling
- *	for this hardware. The Opti needs little handling - we have no UDMA66
- *	capability that needs cable detection. All we must do is check the port
- *	is enabled.
- */
-
-static void opti_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, opti_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -165,58 +150,19 @@
 }
 
 static struct scsi_host_template opti_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations opti_port_ops = {
-	.set_piomode	= opti_set_piomode,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= opti_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
 	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_piomode	= opti_set_piomode,
+	.prereset	= opti_pre_reset,
 };
 
 static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &opti_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &opti_port_ops
@@ -227,7 +173,7 @@
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
 
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &opti_sht, NULL);
 }
 
 static const struct pci_device_id opti[] = {
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index f9b485a..4cd7444 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -64,22 +64,7 @@
 	if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	optidma_probe_reset		-	probe reset
- *	@ap: ATA port
- *
- *	Perform the ATA probe and bus reset sequence plus specific handling
- *	for this hardware. The Opti needs little handling - we have no UDMA66
- *	capability that needs cable detection. All we must do is check the port
- *	is enabled.
- */
-
-static void optidma_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, optidma_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -350,89 +335,22 @@
 }
 
 static struct scsi_host_template optidma_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations optidma_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_40wire,
 	.set_piomode	= optidma_set_pio_mode,
 	.set_dmamode	= optidma_set_dma_mode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.error_handler	= optidma_error_handler,
 	.set_mode	= optidma_set_mode,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.prereset	= optidma_pre_reset,
 };
 
 static struct ata_port_operations optiplus_port_ops = {
+	.inherits	= &optidma_port_ops,
 	.set_piomode	= optiplus_set_pio_mode,
 	.set_dmamode	= optiplus_set_dma_mode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.error_handler	= optidma_error_handler,
-	.set_mode	= optidma_set_mode,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -481,14 +399,12 @@
 static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info_82c700 = {
-		.sht = &optidma_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &optidma_port_ops
 	};
 	static const struct ata_port_info info_82c700_udma = {
-		.sht = &optidma_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -497,10 +413,15 @@
 	};
 	const struct ata_port_info *ppi[] = { &info_82c700, NULL };
 	static int printed_version;
+	int rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
 
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
+
 	/* Fixed location chipset magic */
 	inw(0x1F1);
 	inw(0x1F1);
@@ -509,7 +430,7 @@
 	if (optiplus_with_udma(dev))
 		ppi[0] = &info_82c700_udma;
 
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL);
 }
 
 static const struct pci_device_id optidma[] = {
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 3e7f6a9..3d39f9d 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -128,71 +128,21 @@
 
 
 static struct scsi_host_template pcmcia_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations pcmcia_port_ops = {
-	.set_mode	= pcmcia_set_mode,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
+	.sff_data_xfer	= ata_sff_data_xfer_noirq,
 	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer_noirq,
-
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_mode	= pcmcia_set_mode,
 };
 
 static struct ata_port_operations pcmcia_8bit_port_ops = {
-	.set_mode	= pcmcia_set_mode_8bit,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
+	.sff_data_xfer	= ata_data_xfer_8bit,
 	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer_8bit,
-
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_mode	= pcmcia_set_mode_8bit,
 };
 
 #define CS_CHECK(fn, ret) \
@@ -373,13 +323,13 @@
 		ap->ioaddr.cmd_addr = io_addr + 0x10 * p;
 		ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p;
 		ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p;
-		ata_std_ports(&ap->ioaddr);
+		ata_sff_std_ports(&ap->ioaddr);
 
 		ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
 	}
 
 	/* activate */
-	ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt,
+	ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt,
 				IRQF_SHARED, &pcmcia_sht);
 	if (ret)
 		goto failed;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 511c89b..0e1c2c1 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -63,7 +63,7 @@
 };
 
 static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void pdc2027x_error_handler(struct ata_port *ap);
+static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline);
 static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
 static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
 static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
@@ -129,84 +129,22 @@
 };
 
 static struct scsi_host_template pdc2027x_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations pdc2027x_pata100_ops = {
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
+	.inherits		= &ata_bmdma_port_ops,
 	.check_atapi_dma	= pdc2027x_check_atapi_dma,
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= pdc2027x_error_handler,
-	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
 	.cable_detect		= pdc2027x_cable_detect,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
+	.prereset		= pdc2027x_prereset,
 };
 
 static struct ata_port_operations pdc2027x_pata133_ops = {
+	.inherits		= &pdc2027x_pata100_ops,
+	.mode_filter		= pdc2027x_mode_filter,
 	.set_piomode		= pdc2027x_set_piomode,
 	.set_dmamode		= pdc2027x_set_dmamode,
 	.set_mode		= pdc2027x_set_mode,
-	.mode_filter		= pdc2027x_mode_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.check_atapi_dma	= pdc2027x_check_atapi_dma,
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= pdc2027x_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= pdc2027x_cable_detect,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
 };
 
 static struct ata_port_info pdc2027x_port_info[] = {
@@ -310,22 +248,7 @@
 	/* Check whether port enabled */
 	if (!pdc2027x_port_enabled(link->ap))
 		return -ENOENT;
-	return ata_std_prereset(link, deadline);
-}
-
-/**
- *	pdc2027x_error_handler - Perform reset on PATA port and classify
- *	@ap: Port to reset
- *
- *	Reset PATA phy and classify attached devices.
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void pdc2027x_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, pdc2027x_prereset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
@@ -342,7 +265,7 @@
 	struct ata_device *pair = ata_dev_pair(adev);
 
 	if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
-		return ata_pci_default_filter(adev, mask);
+		return ata_bmdma_mode_filter(adev, mask);
 
 	/* Check for slave of a Maxtor at UDMA6 */
 	ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
@@ -351,7 +274,7 @@
 	if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
 		mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
 
-	return ata_pci_default_filter(adev, mask);
+	return ata_bmdma_mode_filter(adev, mask);
 }
 
 /**
@@ -836,8 +759,8 @@
 		return -EIO;
 
 	pci_set_master(pdev);
-	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
-				 &pdc2027x_sht);
+	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+				 IRQF_SHARED, &pdc2027x_sht);
 }
 
 /**
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 3ed8667..d267306 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -262,94 +262,34 @@
 }
 
 static struct scsi_host_template pdc202xx_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations pdc2024x_port_ops = {
-	.set_piomode	= pdc202xx_set_piomode,
-	.set_dmamode	= pdc202xx_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
+	.inherits		= &ata_bmdma_port_ops,
 
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= pdc202xx_set_piomode,
+	.set_dmamode		= pdc202xx_set_dmamode,
 };
 
 static struct ata_port_operations pdc2026x_port_ops = {
-	.set_piomode	= pdc202xx_set_piomode,
-	.set_dmamode	= pdc202xx_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-	.dev_config	= pdc2026x_dev_config,
+	.inherits		= &pdc2024x_port_ops,
 
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= pdc2026x_cable_detect,
+	.check_atapi_dma	= pdc2026x_check_atapi_dma,
+	.bmdma_start		= pdc2026x_bmdma_start,
+	.bmdma_stop		= pdc2026x_bmdma_stop,
 
-	.check_atapi_dma= pdc2026x_check_atapi_dma,
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= pdc2026x_bmdma_start,
-	.bmdma_stop	= pdc2026x_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
+	.cable_detect		= pdc2026x_cable_detect,
+	.dev_config		= pdc2026x_dev_config,
 
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= pdc2026x_port_start,
+	.port_start		= pdc2026x_port_start,
 };
 
 static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info[3] = {
 		{
-			.sht = &pdc202xx_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -357,7 +297,6 @@
 			.port_ops = &pdc2024x_port_ops
 		},
 		{
-			.sht = &pdc202xx_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -365,7 +304,6 @@
 			.port_ops = &pdc2026x_port_ops
 		},
 		{
-			.sht = &pdc202xx_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -386,7 +324,7 @@
 				return -ENODEV;
 		}
 	}
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL);
 }
 
 static const struct pci_device_id pdc202xx[] = {
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index aad7adc..6527c56 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -46,50 +46,16 @@
 	return 0;
 }
 
-static int ata_dummy_ret0(struct ata_port *ap)	{ return 0; }
-
 static struct scsi_host_template pata_platform_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations pata_platform_port_ops = {
-	.set_mode		= pata_platform_set_mode,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.inherits		= &ata_sff_port_ops,
+	.sff_data_xfer		= ata_sff_data_xfer_noirq,
 	.cable_detect		= ata_cable_unknown,
-
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-
-	.data_xfer		= ata_data_xfer_noirq,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_dummy_ret0,
+	.set_mode		= pata_platform_set_mode,
+	.port_start		= ATA_OP_NULL,
 };
 
 static void pata_platform_setup_port(struct ata_ioports *ioaddr,
@@ -210,7 +176,7 @@
 		      (unsigned long long)ctl_res->start);
 
 	/* activate */
-	return ata_host_activate(host, irq, irq ? ata_interrupt : NULL,
+	return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
 				 irq_flags, &pata_platform_sht);
 }
 EXPORT_SYMBOL_GPL(__pata_platform_probe);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 9f308ed..bf45cf0 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -102,14 +102,14 @@
 }
 
 /**
- *	qdi_qc_issue_prot	-	command issue
+ *	qdi_qc_issue		-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
  *	this interface so that we can load the correct ATA timings.
  */
 
-static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -121,7 +121,7 @@
 			outb(qdi->clock[adev->devno], qdi->timing);
 		}
 	}
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf,
@@ -148,79 +148,26 @@
 			buflen += 4 - slop;
 		}
 	} else
-		buflen = ata_data_xfer(dev, buf, buflen, rw);
+		buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
 
 	return buflen;
 }
 
 static struct scsi_host_template qdi_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations qdi6500_port_ops = {
-	.set_piomode	= qdi6500_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
+	.qc_issue	= qdi_qc_issue,
+	.sff_data_xfer	= qdi_data_xfer,
 	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= qdi_qc_issue_prot,
-
-	.data_xfer	= qdi_data_xfer,
-
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_piomode	= qdi6500_set_piomode,
 };
 
 static struct ata_port_operations qdi6580_port_ops = {
+	.inherits	= &qdi6500_port_ops,
 	.set_piomode	= qdi6580_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= qdi_qc_issue_prot,
-
-	.data_xfer	= qdi_data_xfer,
-
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -276,7 +223,7 @@
 	ap->ioaddr.cmd_addr = io_addr;
 	ap->ioaddr.altstatus_addr = ctl_addr;
 	ap->ioaddr.ctl_addr = ctl_addr;
-	ata_std_ports(&ap->ioaddr);
+	ata_sff_std_ports(&ap->ioaddr);
 
 	ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl);
 
@@ -292,7 +239,7 @@
 	printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
 
 	/* activate */
-	ret = ata_host_activate(host, irq, ata_interrupt, 0, &qdi_sht);
+	ret = ata_host_activate(host, irq, ata_sff_interrupt, 0, &qdi_sht);
 	if (ret)
 		goto fail;
 
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 8109b08..1c0d9fa 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -156,7 +156,7 @@
 }
 
 /**
- *	radisys_qc_issue_prot	-	command issue
+ *	radisys_qc_issue	-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
@@ -166,7 +166,7 @@
  *	be made PIO0.
  */
 
-static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -180,58 +180,20 @@
 				radisys_set_piomode(ap, adev);
 		}
 	}
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 
 static struct scsi_host_template radisys_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations radisys_pata_ops = {
+static struct ata_port_operations radisys_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.qc_issue		= radisys_qc_issue,
+	.cable_detect		= ata_cable_unknown,
 	.set_piomode		= radisys_set_piomode,
 	.set_dmamode		= radisys_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= ata_cable_unknown,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= radisys_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
 };
 
 
@@ -254,7 +216,6 @@
 {
 	static int printed_version;
 	static const struct ata_port_info info = {
-		.sht		= &radisys_sht,
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma1-2 */
@@ -267,7 +228,7 @@
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "version " DRV_VERSION "\n");
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL);
 }
 
 static const struct pci_device_id radisys_pci_tbl[] = {
diff --git a/drivers/ata/pata_rb500_cf.c b/drivers/ata/pata_rb500_cf.c
index 4ce9b03f..800ae46 100644
--- a/drivers/ata/pata_rb500_cf.c
+++ b/drivers/ata/pata_rb500_cf.c
@@ -57,7 +57,7 @@
 	struct ata_host *ah = ap->host;
 	struct rb500_cf_info *info = ah->private_data;
 
-	ata_altstatus(ap);
+	ata_sff_altstatus(ap);
 	ndelay(RB500_CF_IO_DELAY);
 
 	set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
@@ -109,7 +109,7 @@
 	if (gpio_get_value(info->gpio_line)) {
 		set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
 		if (!info->frozen)
-			ata_interrupt(info->irq, dev_instance);
+			ata_sff_interrupt(info->irq, dev_instance);
 	} else {
 		set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
 	}
@@ -117,58 +117,18 @@
 	return IRQ_HANDLED;
 }
 
-static void rb500_pata_irq_clear(struct ata_port *ap)
-{
-}
-
-static int rb500_pata_port_start(struct ata_port *ap)
-{
-	return 0;
-}
-
 static struct ata_port_operations rb500_pata_port_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-
-	.exec_command		= rb500_pata_exec_command,
-	.check_status 		= ata_check_status,
-	.dev_select 		= ata_std_dev_select,
-
-	.data_xfer		= rb500_pata_data_xfer,
-
-	.qc_prep 		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-
+	.inherits		= &ata_sff_port_ops,
+	.sff_exec_command	= rb500_pata_exec_command,
+	.sff_data_xfer		= rb500_pata_data_xfer,
 	.freeze			= rb500_pata_freeze,
 	.thaw			= rb500_pata_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-
-	.irq_handler		= rb500_pata_irq_handler,
-	.irq_clear		= rb500_pata_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= rb500_pata_port_start,
 };
 
 /* ------------------------------------------------------------------------ */
 
 static struct scsi_host_template rb500_pata_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
-	.proc_name		= DRV_NAME,
-
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 /* ------------------------------------------------------------------------ */
@@ -188,7 +148,7 @@
 	ap->ioaddr.ctl_addr	= info->iobase + RB500_CF_REG_CTRL;
 	ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL;
 
-	ata_std_ports(&ap->ioaddr);
+	ata_sff_std_ports(&ap->ioaddr);
 
 	ap->ioaddr.data_addr	= info->iobase + RB500_CF_REG_DATA;
 }
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index ba8a31c..7dfd1f3 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -53,53 +53,13 @@
 
 
 static struct scsi_host_template rz1000_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations rz1000_port_ops = {
-	.set_mode	= rz1000_set_mode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
 	.cable_detect	= ata_cable_40wire,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_mode	= rz1000_set_mode,
 };
 
 static int rz1000_fifo_disable(struct pci_dev *pdev)
@@ -129,7 +89,6 @@
 {
 	static int printed_version;
 	static const struct ata_port_info info = {
-		.sht = &rz1000_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &rz1000_port_ops
@@ -140,7 +99,7 @@
 		printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
 
 	if (rz1000_fifo_disable(pdev) == 0)
-		return ata_pci_init_one(pdev, ppi);
+		return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL);
 
 	printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
 	/* Not safe to use so skip */
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 725a858..cbab397 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -151,7 +151,7 @@
 }
 
 /**
- *	sc1200_qc_issue_prot	-	command issue
+ *	sc1200_qc_issue		-	command issue
  *	@qc: command pending
  *
  *	Called when the libata layer is about to issue a command. We wrap
@@ -160,7 +160,7 @@
  *	one MWDMA/UDMA bit.
  */
 
-static unsigned int sc1200_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_device *adev = qc->dev;
@@ -175,59 +175,21 @@
 		    	sc1200_set_dmamode(ap, adev);
 	}
 
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static struct scsi_host_template sc1200_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
+	.sg_tablesize	= LIBATA_DUMB_MAX_PRD,
 };
 
 static struct ata_port_operations sc1200_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.qc_prep 	= ata_sff_dumb_qc_prep,
+	.qc_issue	= sc1200_qc_issue,
+	.cable_detect	= ata_cable_40wire,
 	.set_piomode	= sc1200_set_piomode,
 	.set_dmamode	= sc1200_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_dumb_qc_prep,
-	.qc_issue	= sc1200_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -242,7 +204,6 @@
 static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &sc1200_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -252,7 +213,7 @@
 	/* Can't enable port 2 yet, see top comments */
 	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
 
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL);
 }
 
 static const struct pci_device_id sc1200[] = {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 6c016de..e965b25 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -266,7 +266,7 @@
 		printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
 		mask &= ~(0xE0 << ATA_SHIFT_UDMA);
 	}
-	return ata_pci_default_filter(adev, mask);
+	return ata_bmdma_mode_filter(adev, mask);
 }
 
 /**
@@ -274,7 +274,7 @@
  *	@ap: Port to which output is sent
  *	@tf: ATA taskfile register set
  *
- *	Note: Original code is ata_tf_load().
+ *	Note: Original code is ata_sff_tf_load().
  */
 
 static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
@@ -341,7 +341,7 @@
  *	@ap: Port from which input is read
  *	@tf: ATA taskfile register set for storing input
  *
- *	Note: Original code is ata_tf_read().
+ *	Note: Original code is ata_sff_tf_read().
  */
 
 static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
@@ -373,7 +373,7 @@
  *	@ap: port to which command is being issued
  *	@tf: ATA taskfile register set
  *
- *	Note: Original code is ata_exec_command().
+ *	Note: Original code is ata_sff_exec_command().
  */
 
 static void scc_exec_command (struct ata_port *ap,
@@ -382,7 +382,7 @@
 	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
 
 	out_be32(ap->ioaddr.command_addr, tf->command);
-	ata_pause(ap);
+	ata_sff_pause(ap);
 }
 
 /**
@@ -396,14 +396,14 @@
 }
 
 /**
- *	scc_std_dev_select - Select device 0/1 on ATA bus
+ *	scc_dev_select - Select device 0/1 on ATA bus
  *	@ap: ATA channel to manipulate
  *	@device: ATA device (numbered from zero) to select
  *
- *	Note: Original code is ata_std_dev_select().
+ *	Note: Original code is ata_sff_dev_select().
  */
 
-static void scc_std_dev_select (struct ata_port *ap, unsigned int device)
+static void scc_dev_select (struct ata_port *ap, unsigned int device)
 {
 	u8 tmp;
 
@@ -413,7 +413,7 @@
 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
 
 	out_be32(ap->ioaddr.device_addr, tmp);
-	ata_pause(ap);
+	ata_sff_pause(ap);
 }
 
 /**
@@ -441,7 +441,7 @@
 	out_be32(mmio + SCC_DMA_CMD, dmactl);
 
 	/* issue r/w command */
-	ap->ops->exec_command(ap, &qc->tf);
+	ap->ops->sff_exec_command(ap, &qc->tf);
 }
 
 /**
@@ -476,7 +476,7 @@
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 	u8 nsect, lbal;
 
-	ap->ops->dev_select(ap, device);
+	ap->ops->sff_dev_select(ap, device);
 
 	out_be32(ioaddr->nsect_addr, 0x55);
 	out_be32(ioaddr->lbal_addr, 0xaa);
@@ -497,57 +497,78 @@
 }
 
 /**
- *	scc_bus_post_reset - PATA device post reset
+ *	scc_wait_after_reset - wait for devices to become ready after reset
  *
- *	Note: Original code is ata_bus_post_reset().
+ *	Note: Original code is ata_sff_wait_after_reset
  */
 
-static int scc_bus_post_reset(struct ata_port *ap, unsigned int devmask,
-                              unsigned long deadline)
+int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
+			 unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 	unsigned int dev0 = devmask & (1 << 0);
 	unsigned int dev1 = devmask & (1 << 1);
-	int rc;
+	int rc, ret = 0;
 
-	/* if device 0 was found in ata_devchk, wait for its
-	 * BSY bit to clear
+	/* Spec mandates ">= 2ms" before checking status.  We wait
+	 * 150ms, because that was the magic delay used for ATAPI
+	 * devices in Hale Landis's ATADRVR, for the period of time
+	 * between when the ATA command register is written, and then
+	 * status is checked.  Because waiting for "a while" before
+	 * checking status is fine, post SRST, we perform this magic
+	 * delay here as well.
+	 *
+	 * Old drivers/ide uses the 2mS rule and then waits for ready.
 	 */
-	if (dev0) {
-		rc = ata_wait_ready(ap, deadline);
-		if (rc && rc != -ENODEV)
-			return rc;
-	}
+	msleep(150);
 
-	/* if device 1 was found in ata_devchk, wait for
-	 * register access, then wait for BSY to clear
+	/* always check readiness of the master device */
+	rc = ata_sff_wait_ready(link, deadline);
+	/* -ENODEV means the odd clown forgot the D7 pulldown resistor
+	 * and TF status is 0xff, bail out on it too.
 	 */
-	while (dev1) {
-		u8 nsect, lbal;
+	if (rc)
+		return rc;
 
-		ap->ops->dev_select(ap, 1);
-		nsect = in_be32(ioaddr->nsect_addr);
-		lbal = in_be32(ioaddr->lbal_addr);
-		if ((nsect == 1) && (lbal == 1))
-			break;
-		if (time_after(jiffies, deadline))
-			return -EBUSY;
-		msleep(50);	/* give drive a breather */
-	}
+	/* if device 1 was found in ata_devchk, wait for register
+	 * access briefly, then wait for BSY to clear.
+	 */
 	if (dev1) {
-		rc = ata_wait_ready(ap, deadline);
-		if (rc && rc != -ENODEV)
-			return rc;
+		int i;
+
+		ap->ops->sff_dev_select(ap, 1);
+
+		/* Wait for register access.  Some ATAPI devices fail
+		 * to set nsect/lbal after reset, so don't waste too
+		 * much time on it.  We're gonna wait for !BSY anyway.
+		 */
+		for (i = 0; i < 2; i++) {
+			u8 nsect, lbal;
+
+			nsect = in_be32(ioaddr->nsect_addr);
+			lbal = in_be32(ioaddr->lbal_addr);
+			if ((nsect == 1) && (lbal == 1))
+				break;
+			msleep(50);	/* give drive a breather */
+		}
+
+		rc = ata_sff_wait_ready(link, deadline);
+		if (rc) {
+			if (rc != -ENODEV)
+				return rc;
+			ret = rc;
+		}
 	}
 
 	/* is all this really necessary? */
-	ap->ops->dev_select(ap, 0);
+	ap->ops->sff_dev_select(ap, 0);
 	if (dev1)
-		ap->ops->dev_select(ap, 1);
+		ap->ops->sff_dev_select(ap, 1);
 	if (dev0)
-		ap->ops->dev_select(ap, 0);
+		ap->ops->sff_dev_select(ap, 0);
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -570,32 +591,22 @@
 	udelay(20);
 	out_be32(ioaddr->ctl_addr, ap->ctl);
 
-	/* wait a while before checking status */
-	ata_wait_after_reset(ap, deadline);
-
-	/* Before we perform post reset processing we want to see if
-	 * the bus shows 0xFF because the odd clown forgets the D7
-	 * pulldown resistor.
-	 */
-	if (scc_check_status(ap) == 0xFF)
-		return 0;
-
-	scc_bus_post_reset(ap, devmask, deadline);
+	scc_wait_after_reset(&ap->link, devmask, deadline);
 
 	return 0;
 }
 
 /**
- *	scc_std_softreset - reset host port via ATA SRST
+ *	scc_softreset - reset host port via ATA SRST
  *	@ap: port to reset
  *	@classes: resulting classes of attached devices
  *	@deadline: deadline jiffies for the operation
  *
- *	Note: Original code is ata_std_softreset().
+ *	Note: Original code is ata_sff_softreset().
  */
 
-static int scc_std_softreset(struct ata_link *link, unsigned int *classes,
-                             unsigned long deadline)
+static int scc_softreset(struct ata_link *link, unsigned int *classes,
+			 unsigned long deadline)
 {
 	struct ata_port *ap = link->ap;
 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
@@ -604,11 +615,6 @@
 
 	DPRINTK("ENTER\n");
 
-	if (ata_link_offline(link)) {
-		classes[0] = ATA_DEV_NONE;
-		goto out;
-	}
-
 	/* determine if device 0/1 are present */
 	if (scc_devchk(ap, 0))
 		devmask |= (1 << 0);
@@ -616,7 +622,7 @@
 		devmask |= (1 << 1);
 
 	/* select device 0 again */
-	ap->ops->dev_select(ap, 0);
+	ap->ops->sff_dev_select(ap, 0);
 
 	/* issue bus reset */
 	DPRINTK("about to softreset, devmask=%x\n", devmask);
@@ -628,13 +634,12 @@
 	}
 
 	/* determine by signature whether we have ATA or ATAPI devices */
-	classes[0] = ata_dev_try_classify(&ap->link.device[0],
+	classes[0] = ata_sff_dev_classify(&ap->link.device[0],
 					  devmask & (1 << 0), &err);
 	if (slave_possible && err != 0x81)
-		classes[1] = ata_dev_try_classify(&ap->link.device[1],
+		classes[1] = ata_sff_dev_classify(&ap->link.device[1],
 						  devmask & (1 << 1), &err);
 
- out:
 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
 	return 0;
 }
@@ -695,7 +700,7 @@
 			printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
 			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
 			/* TBD: SW reset */
-			scc_std_softreset(&ap->link, &classes, deadline);
+			scc_softreset(&ap->link, &classes, deadline);
 			continue;
 		}
 
@@ -721,7 +726,7 @@
 		 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
 
 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
-	ata_altstatus(ap);	/* dummy read */
+	ata_sff_altstatus(ap);	/* dummy read */
 }
 
 /**
@@ -742,7 +747,7 @@
 		return host_stat;
 
 	/* errata A252,A308 workaround: Step4 */
-	if ((ata_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ))
+	if ((ata_sff_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ))
 		return (host_stat | ATA_DMA_INTR);
 
 	/* errata A308 workaround Step5 */
@@ -773,7 +778,7 @@
  *	@buflen: buffer length
  *	@rw: read/write
  *
- *	Note: Original code is ata_data_xfer().
+ *	Note: Original code is ata_sff_data_xfer().
  */
 
 static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
@@ -782,28 +787,28 @@
 	struct ata_port *ap = dev->link->ap;
 	unsigned int words = buflen >> 1;
 	unsigned int i;
-	u16 *buf16 = (u16 *) buf;
+	__le16 *buf16 = (__le16 *) buf;
 	void __iomem *mmio = ap->ioaddr.data_addr;
 
 	/* Transfer multiple of 2 bytes */
 	if (rw == READ)
 		for (i = 0; i < words; i++)
-			buf16[i] = le16_to_cpu(in_be32(mmio));
+			buf16[i] = cpu_to_le16(in_be32(mmio));
 	else
 		for (i = 0; i < words; i++)
-			out_be32(mmio, cpu_to_le16(buf16[i]));
+			out_be32(mmio, le16_to_cpu(buf16[i]));
 
 	/* Transfer trailing 1 byte, if any. */
 	if (unlikely(buflen & 0x01)) {
-		u16 align_buf[1] = { 0 };
+		__le16 align_buf[1] = { 0 };
 		unsigned char *trailing_buf = buf + buflen - 1;
 
 		if (rw == READ) {
-			align_buf[0] = le16_to_cpu(in_be32(mmio));
+			align_buf[0] = cpu_to_le16(in_be32(mmio));
 			memcpy(trailing_buf, align_buf, 1);
 		} else {
 			memcpy(align_buf, trailing_buf, 1);
-			out_be32(mmio, cpu_to_le16(align_buf[0]));
+			out_be32(mmio, le16_to_cpu(align_buf[0]));
 		}
 		words++;
 	}
@@ -815,7 +820,7 @@
  *	scc_irq_on - Enable interrupts on a port.
  *	@ap: Port on which interrupts are enabled.
  *
- *	Note: Original code is ata_irq_on().
+ *	Note: Original code is ata_sff_irq_on().
  */
 
 static u8 scc_irq_on (struct ata_port *ap)
@@ -829,19 +834,19 @@
 	out_be32(ioaddr->ctl_addr, ap->ctl);
 	tmp = ata_wait_idle(ap);
 
-	ap->ops->irq_clear(ap);
+	ap->ops->sff_irq_clear(ap);
 
 	return tmp;
 }
 
 /**
- *	scc_bmdma_freeze - Freeze BMDMA controller port
+ *	scc_freeze - Freeze BMDMA controller port
  *	@ap: port to freeze
  *
- *	Note: Original code is ata_bmdma_freeze().
+ *	Note: Original code is ata_sff_freeze().
  */
 
-static void scc_bmdma_freeze (struct ata_port *ap)
+static void scc_freeze (struct ata_port *ap)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 
@@ -854,9 +859,9 @@
 	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
 	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
 	 */
-	ata_chk_status(ap);
+	ap->ops->sff_check_status(ap);
 
-	ap->ops->irq_clear(ap);
+	ap->ops->sff_irq_clear(ap);
 }
 
 /**
@@ -868,18 +873,18 @@
 static int scc_pata_prereset(struct ata_link *link, unsigned long deadline)
 {
 	link->ap->cbl = ATA_CBL_PATA80;
-	return ata_std_prereset(link, deadline);
+	return ata_sff_prereset(link, deadline);
 }
 
 /**
- *	scc_std_postreset - standard postreset callback
+ *	scc_postreset - standard postreset callback
  *	@ap: the target ata_port
  *	@classes: classes of attached devices
  *
- *	Note: Original code is ata_std_postreset().
+ *	Note: Original code is ata_sff_postreset().
  */
 
-static void scc_std_postreset(struct ata_link *link, unsigned int *classes)
+static void scc_postreset(struct ata_link *link, unsigned int *classes)
 {
 	struct ata_port *ap = link->ap;
 
@@ -887,9 +892,9 @@
 
 	/* is double-select really necessary? */
 	if (classes[0] != ATA_DEV_NONE)
-		ap->ops->dev_select(ap, 1);
+		ap->ops->sff_dev_select(ap, 1);
 	if (classes[1] != ATA_DEV_NONE)
-		ap->ops->dev_select(ap, 0);
+		ap->ops->sff_dev_select(ap, 0);
 
 	/* bail out if no device is present */
 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
@@ -905,24 +910,13 @@
 }
 
 /**
- *	scc_error_handler - Stock error handler for BMDMA controller
- *	@ap: port to handle error for
- */
-
-static void scc_error_handler (struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, scc_pata_prereset, scc_std_softreset, NULL,
-			   scc_std_postreset);
-}
-
-/**
- *	scc_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
+ *	scc_irq_clear - Clear PCI IDE BMDMA interrupt.
  *	@ap: Port associated with this ATA transaction.
  *
- *	Note: Original code is ata_bmdma_irq_clear().
+ *	Note: Original code is ata_sff_irq_clear().
  */
 
-static void scc_bmdma_irq_clear (struct ata_port *ap)
+static void scc_irq_clear (struct ata_port *ap)
 {
 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
 
@@ -968,52 +962,37 @@
 }
 
 static struct scsi_host_template scc_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations scc_pata_ops = {
+static struct ata_port_operations scc_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+
 	.set_piomode		= scc_set_piomode,
 	.set_dmamode		= scc_set_dmamode,
 	.mode_filter		= scc_mode_filter,
 
-	.tf_load		= scc_tf_load,
-	.tf_read		= scc_tf_read,
-	.exec_command		= scc_exec_command,
-	.check_status		= scc_check_status,
-	.check_altstatus	= scc_check_altstatus,
-	.dev_select		= scc_std_dev_select,
+	.sff_tf_load		= scc_tf_load,
+	.sff_tf_read		= scc_tf_read,
+	.sff_exec_command	= scc_exec_command,
+	.sff_check_status	= scc_check_status,
+	.sff_check_altstatus	= scc_check_altstatus,
+	.sff_dev_select		= scc_dev_select,
 
 	.bmdma_setup		= scc_bmdma_setup,
 	.bmdma_start		= scc_bmdma_start,
 	.bmdma_stop		= scc_bmdma_stop,
 	.bmdma_status		= scc_bmdma_status,
-	.data_xfer		= scc_data_xfer,
+	.sff_data_xfer		= scc_data_xfer,
 
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-
-	.freeze			= scc_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-
-	.error_handler		= scc_error_handler,
+	.freeze			= scc_freeze,
+	.prereset		= scc_pata_prereset,
+	.softreset		= scc_softreset,
+	.postreset		= scc_postreset,
 	.post_internal_cmd	= scc_bmdma_stop,
 
-	.irq_clear		= scc_bmdma_irq_clear,
-	.irq_on			= scc_irq_on,
+	.sff_irq_clear		= scc_irq_clear,
+	.sff_irq_on		= scc_irq_on,
 
 	.port_start		= scc_port_start,
 	.port_stop		= scc_port_stop,
@@ -1166,8 +1145,8 @@
 	if (rc)
 		return rc;
 
-	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
-				 &scc_sht);
+	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+				 IRQF_SHARED, &scc_sht);
 }
 
 static struct pci_driver scc_pci_driver = {
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index a589c0f..ffd26d0 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -199,7 +199,7 @@
 {
 	if (adev->class == ATA_DEV_ATA)
 		mask &= ~ATA_MASK_UDMA;
-	return ata_pci_default_filter(adev, mask);
+	return ata_bmdma_mode_filter(adev, mask);
 }
 
 
@@ -219,7 +219,7 @@
 
 	/* Disk, UDMA */
 	if (adev->class != ATA_DEV_ATA)
-		return ata_pci_default_filter(adev, mask);
+		return ata_bmdma_mode_filter(adev, mask);
 
 	/* Actually do need to check */
 	ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
@@ -228,7 +228,7 @@
 		if (!strcmp(p, model_num))
 			mask &= ~(0xE0 << ATA_SHIFT_UDMA);
 	}
-	return ata_pci_default_filter(adev, mask);
+	return ata_bmdma_mode_filter(adev, mask);
 }
 
 /**
@@ -298,89 +298,20 @@
 }
 
 static struct scsi_host_template serverworks_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations serverworks_osb4_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= serverworks_cable_detect,
+	.mode_filter	= serverworks_osb4_filter,
 	.set_piomode	= serverworks_set_piomode,
 	.set_dmamode	= serverworks_set_dmamode,
-	.mode_filter	= serverworks_osb4_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= serverworks_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations serverworks_csb_port_ops = {
-	.set_piomode	= serverworks_set_piomode,
-	.set_dmamode	= serverworks_set_dmamode,
+	.inherits	= &serverworks_osb4_port_ops,
 	.mode_filter	= serverworks_csb_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= serverworks_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 static int serverworks_fixup_osb4(struct pci_dev *pdev)
@@ -468,28 +399,24 @@
 {
 	static const struct ata_port_info info[4] = {
 		{ /* OSB4 */
-			.sht = &serverworks_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.udma_mask = 0x07,
 			.port_ops = &serverworks_osb4_port_ops
 		}, { /* OSB4 no UDMA */
-			.sht = &serverworks_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.udma_mask = 0x00,
 			.port_ops = &serverworks_osb4_port_ops
 		}, { /* CSB5 */
-			.sht = &serverworks_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.udma_mask = ATA_UDMA4,
 			.port_ops = &serverworks_csb_port_ops
 		}, { /* CSB5 - later revisions*/
-			.sht = &serverworks_sht,
 			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -498,6 +425,11 @@
 		}
 	};
 	const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
 
 	/* Force master latency timer to 64 PCI clocks */
 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
@@ -527,24 +459,30 @@
 		serverworks_fixup_ht1000(pdev);
 
 	if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
-		ata_pci_clear_simplex(pdev);
+		ata_pci_bmdma_clear_simplex(pdev);
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL);
 }
 
 #ifdef CONFIG_PM
 static int serverworks_reinit_one(struct pci_dev *pdev)
 {
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
 	/* Force master latency timer to 64 PCI clocks */
 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
 
-	switch (pdev->device)
-	{
+	switch (pdev->device) {
 		case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
 			serverworks_fixup_osb4(pdev);
 			break;
 		case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
-			ata_pci_clear_simplex(pdev);
+			ata_pci_bmdma_clear_simplex(pdev);
 			/* fall through */
 		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
 		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
@@ -554,7 +492,9 @@
 			serverworks_fixup_ht1000(pdev);
 			break;
 	}
-	return ata_pci_device_resume(pdev);
+
+	ata_host_resume(host);
+	return 0;
 }
 #endif
 
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 7c5b2dd..720b864 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -192,54 +192,14 @@
 }
 
 static struct scsi_host_template sil680_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations sil680_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= sil680_cable_detect,
 	.set_piomode	= sil680_set_piomode,
 	.set_dmamode	= sil680_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= sil680_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -322,7 +282,6 @@
 				     const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &sil680_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -330,7 +289,6 @@
 		.port_ops = &sil680_port_ops
 	};
 	static const struct ata_port_info info_slow = {
-		.sht = &sil680_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -346,6 +304,10 @@
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
 	switch (sil680_init_chip(pdev, &try_mmio)) {
 		case 0:
 			ppi[0] = &info_slow;
@@ -388,28 +350,33 @@
 	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
 	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
 	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
-	ata_std_ports(&host->ports[0]->ioaddr);
+	ata_sff_std_ports(&host->ports[0]->ioaddr);
 	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
 	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
 	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
 	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
-	ata_std_ports(&host->ports[1]->ioaddr);
+	ata_sff_std_ports(&host->ports[1]->ioaddr);
 
 	/* Register & activate */
-	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
-				 &sil680_sht);
+	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+				 IRQF_SHARED, &sil680_sht);
 
 use_ioports:
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL);
 }
 
 #ifdef CONFIG_PM
 static int sil680_reinit_one(struct pci_dev *pdev)
 {
-	int try_mmio;
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int try_mmio, rc;
 
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
 	sil680_init_chip(pdev, &try_mmio);
-	return ata_pci_device_resume(pdev);
+	ata_host_resume(host);
+	return 0;
 }
 #endif
 
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index dc7e915..e82c66e 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -156,24 +156,11 @@
 	/* Clear the FIFO settings. We can't enable the FIFO until
 	   we know we are poking at a disk */
 	pci_write_config_byte(pdev, 0x4B, 0);
-	return ata_std_prereset(link, deadline);
+	return ata_sff_prereset(link, deadline);
 }
 
 
 /**
- *	sis_error_handler - Probe specified port on PATA host controller
- *	@ap: Port to probe
- *
- *	LOCKING:
- *	None (inherited from caller).
- */
-
-static void sis_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, sis_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
-}
-
-/**
  *	sis_set_fifo	-	Set RWP fifo bits for this device
  *	@ap: Port
  *	@adev: Device
@@ -514,217 +501,57 @@
 }
 
 static struct scsi_host_template sis_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations sis_133_ops = {
+static struct ata_port_operations sis_133_for_sata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
 	.set_piomode		= sis_133_set_piomode,
 	.set_dmamode		= sis_133_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= sis_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.cable_detect		= sis_133_cable_detect,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
 };
 
-static const struct ata_port_operations sis_133_for_sata_ops = {
+static struct ata_port_operations sis_base_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.prereset		= sis_pre_reset,
+};
+
+static struct ata_port_operations sis_133_ops = {
+	.inherits		= &sis_base_ops,
 	.set_piomode		= sis_133_set_piomode,
 	.set_dmamode		= sis_133_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.cable_detect		= sis_133_cable_detect,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
 };
 
-static const struct ata_port_operations sis_133_early_ops = {
+static struct ata_port_operations sis_133_early_ops = {
+	.inherits		= &sis_base_ops,
 	.set_piomode		= sis_100_set_piomode,
 	.set_dmamode		= sis_133_early_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= sis_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.cable_detect		= sis_66_cable_detect,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
 };
 
-static const struct ata_port_operations sis_100_ops = {
+static struct ata_port_operations sis_100_ops = {
+	.inherits		= &sis_base_ops,
 	.set_piomode		= sis_100_set_piomode,
 	.set_dmamode		= sis_100_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= sis_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.cable_detect		= sis_66_cable_detect,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
 };
 
-static const struct ata_port_operations sis_66_ops = {
+static struct ata_port_operations sis_66_ops = {
+	.inherits		= &sis_base_ops,
 	.set_piomode		= sis_old_set_piomode,
 	.set_dmamode		= sis_66_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
 	.cable_detect		= sis_66_cable_detect,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= sis_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
 };
 
-static const struct ata_port_operations sis_old_ops = {
+static struct ata_port_operations sis_old_ops = {
+	.inherits		= &sis_base_ops,
 	.set_piomode		= sis_old_set_piomode,
 	.set_dmamode		= sis_old_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= sis_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.cable_detect		= ata_cable_40wire,
-
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_handler		= ata_interrupt,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_sff_port_start,
 };
 
 static const struct ata_port_info sis_info = {
-	.sht		= &sis_sht,
 	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.mwdma_mask	= 0x07,
@@ -732,7 +559,6 @@
 	.port_ops	= &sis_old_ops,
 };
 static const struct ata_port_info sis_info33 = {
-	.sht		= &sis_sht,
 	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.mwdma_mask	= 0x07,
@@ -740,42 +566,36 @@
 	.port_ops	= &sis_old_ops,
 };
 static const struct ata_port_info sis_info66 = {
-	.sht		= &sis_sht,
 	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.udma_mask	= ATA_UDMA4,	/* UDMA 66 */
 	.port_ops	= &sis_66_ops,
 };
 static const struct ata_port_info sis_info100 = {
-	.sht		= &sis_sht,
 	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.udma_mask	= ATA_UDMA5,
 	.port_ops	= &sis_100_ops,
 };
 static const struct ata_port_info sis_info100_early = {
-	.sht		= &sis_sht,
 	.flags		= ATA_FLAG_SLAVE_POSS,
 	.udma_mask	= ATA_UDMA5,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.port_ops	= &sis_66_ops,
 };
 static const struct ata_port_info sis_info133 = {
-	.sht		= &sis_sht,
 	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.udma_mask	= ATA_UDMA6,
 	.port_ops	= &sis_133_ops,
 };
 const struct ata_port_info sis_info133_for_sata = {
-	.sht		= &sis_sht,
 	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.udma_mask	= ATA_UDMA6,
 	.port_ops	= &sis_133_for_sata_ops,
 };
 static const struct ata_port_info sis_info133_early = {
-	.sht		= &sis_sht,
 	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.udma_mask	= ATA_UDMA6,
@@ -857,11 +677,11 @@
 static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_port_info port;
-	const struct ata_port_info *ppi[] = { &port, NULL };
+	const struct ata_port_info *ppi[] = { NULL, NULL };
 	struct pci_dev *host = NULL;
 	struct sis_chipset *chipset = NULL;
 	struct sis_chipset *sets;
+	int rc;
 
 	static struct sis_chipset sis_chipsets[] = {
 
@@ -914,8 +734,11 @@
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "version " DRV_VERSION "\n");
 
-	/* We have to find the bridge first */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
 
+	/* We have to find the bridge first */
 	for (sets = &sis_chipsets[0]; sets->device; sets++) {
 		host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL);
 		if (host != NULL) {
@@ -994,12 +817,11 @@
 	if (chipset == NULL)
 		return -ENODEV;
 
-	port = *chipset->info;
-	port.private_data = chipset;
+	ppi[0] = chipset->info;
 
 	sis_fixup(pdev, chipset);
 
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset);
 }
 
 static const struct pci_device_id sis_pci_tbl[] = {
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 81ef207..70d94fb 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -60,13 +60,7 @@
 
 	if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no]))
 		return -ENOENT;
-	return ata_std_prereset(link, deadline);
-}
-
-
-static void sl82c105_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, sl82c105_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 
@@ -235,55 +229,17 @@
 }
 
 static struct scsi_host_template sl82c105_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations sl82c105_port_ops = {
-	.set_piomode	= sl82c105_set_piomode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= sl82c105_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
+	.inherits	= &ata_bmdma_port_ops,
+	.qc_defer	= sl82c105_qc_defer,
 	.bmdma_start 	= sl82c105_bmdma_start,
 	.bmdma_stop	= sl82c105_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_defer	= sl82c105_qc_defer,
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= sl82c105_set_piomode,
+	.prereset	= sl82c105_pre_reset,
 };
 
 /**
@@ -327,14 +283,12 @@
 static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info_dma = {
-		.sht = &sl82c105_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &sl82c105_port_ops
 	};
 	static const struct ata_port_info info_early = {
-		.sht = &sl82c105_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &sl82c105_port_ops
@@ -344,6 +298,11 @@
 					       NULL };
 	u32 val;
 	int rev;
+	int rc;
+
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
 
 	rev = sl82c105_bridge_revision(dev);
 
@@ -358,7 +317,7 @@
 	val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
 	pci_write_config_dword(dev, 0x40, val);
 
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL);
 }
 
 static const struct pci_device_id sl82c105[] = {
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 403eafc..b181261 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -66,16 +66,11 @@
 	if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(link, deadline);
+	return ata_sff_prereset(link, deadline);
 }
 
 
 
-static void triflex_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, triflex_prereset, ata_std_softreset, NULL, ata_std_postreset);
-}
-
 /**
  *	triflex_load_timing		-	timing configuration
  *	@ap: ATA interface
@@ -180,60 +175,21 @@
 }
 
 static struct scsi_host_template triflex_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations triflex_port_ops = {
-	.set_piomode	= triflex_set_piomode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= triflex_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= ata_cable_40wire,
-
-	.bmdma_setup 	= ata_bmdma_setup,
+	.inherits	= &ata_bmdma_port_ops,
 	.bmdma_start 	= triflex_bmdma_start,
 	.bmdma_stop	= triflex_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= triflex_set_piomode,
+	.prereset	= triflex_prereset,
 };
 
 static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
-		.sht = &triflex_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -245,7 +201,7 @@
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
 
-	return ata_pci_init_one(dev, ppi);
+	return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL);
 }
 
 static const struct pci_device_id triflex[] = {
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index d119a68..d484074 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -210,23 +210,11 @@
 			return -ENOENT;
 	}
 
-	return ata_std_prereset(link, deadline);
+	return ata_sff_prereset(link, deadline);
 }
 
 
 /**
- *	via_error_handler		-	reset for VIA chips
- *	@ap: ATA port
- *
- *	Handle the reset callback for the later chips with cable detect
- */
-
-static void via_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, via_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
-}
-
-/**
  *	via_do_set_mode	-	set initial PIO mode data
  *	@ap: ATA interface
  *	@adev: ATA device
@@ -335,89 +323,20 @@
 }
 
 static struct scsi_host_template via_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations via_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= via_cable_detect,
 	.set_piomode	= via_set_piomode,
 	.set_dmamode	= via_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= via_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= via_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.prereset	= via_pre_reset,
 };
 
 static struct ata_port_operations via_port_ops_noirq = {
-	.set_piomode	= via_set_piomode,
-	.set_dmamode	= via_set_dmamode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= via_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-	.cable_detect	= via_cable_detect,
-
-	.bmdma_setup 	= ata_bmdma_setup,
-	.bmdma_start 	= ata_bmdma_start,
-	.bmdma_stop	= ata_bmdma_stop,
-	.bmdma_status 	= ata_bmdma_status,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= ata_data_xfer_noirq,
-
-	.irq_handler	= ata_interrupt,
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.inherits	= &via_port_ops,
+	.sff_data_xfer	= ata_sff_data_xfer_noirq,
 };
 
 /**
@@ -467,7 +386,6 @@
 {
 	/* Early VIA without UDMA support */
 	static const struct ata_port_info via_mwdma_info = {
-		.sht = &via_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -475,7 +393,6 @@
 	};
 	/* Ditto with IRQ masking required */
 	static const struct ata_port_info via_mwdma_info_borked = {
-		.sht = &via_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -483,7 +400,6 @@
 	};
 	/* VIA UDMA 33 devices (and borked 66) */
 	static const struct ata_port_info via_udma33_info = {
-		.sht = &via_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -492,7 +408,6 @@
 	};
 	/* VIA UDMA 66 devices */
 	static const struct ata_port_info via_udma66_info = {
-		.sht = &via_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -501,7 +416,6 @@
 	};
 	/* VIA UDMA 100 devices */
 	static const struct ata_port_info via_udma100_info = {
-		.sht = &via_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
@@ -510,24 +424,27 @@
 	};
 	/* UDMA133 with bad AST (All current 133) */
 	static const struct ata_port_info via_udma133_info = {
-		.sht = &via_sht,
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = ATA_UDMA6,	/* FIXME: should check north bridge */
 		.port_ops = &via_port_ops
 	};
-	struct ata_port_info type;
-	const struct ata_port_info *ppi[] = { &type, NULL };
+	const struct ata_port_info *ppi[] = { NULL, NULL };
 	struct pci_dev *isa = NULL;
 	const struct via_isa_bridge *config;
 	static int printed_version;
 	u8 enable;
 	u32 timing;
+	int rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
 	/* To find out how the IDE will behave and what features we
 	   actually have to look at the bridge not the IDE controller */
 	for (config = via_isa_bridges; config->id; config++)
@@ -561,25 +478,25 @@
 	switch(config->flags & VIA_UDMA) {
 		case VIA_UDMA_NONE:
 			if (config->flags & VIA_NO_UNMASK)
-				type = via_mwdma_info_borked;
+				ppi[0] = &via_mwdma_info_borked;
 			else
-				type = via_mwdma_info;
+				ppi[0] = &via_mwdma_info;
 			break;
 		case VIA_UDMA_33:
-			type = via_udma33_info;
+			ppi[0] = &via_udma33_info;
 			break;
 		case VIA_UDMA_66:
-			type = via_udma66_info;
+			ppi[0] = &via_udma66_info;
 			/* The 66 MHz devices require we enable the clock */
 			pci_read_config_dword(pdev, 0x50, &timing);
 			timing |= 0x80008;
 			pci_write_config_dword(pdev, 0x50, timing);
 			break;
 		case VIA_UDMA_100:
-			type = via_udma100_info;
+			ppi[0] = &via_udma100_info;
 			break;
 		case VIA_UDMA_133:
-			type = via_udma133_info;
+			ppi[0] = &via_udma133_info;
 			break;
 		default:
 			WARN_ON(1);
@@ -594,9 +511,7 @@
 	}
 
 	/* We have established the device type, now fire it up */
-	type.private_data = (void *)config;
-
-	return ata_pci_init_one(pdev, ppi);
+	return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config);
 }
 
 #ifdef CONFIG_PM
@@ -615,6 +530,11 @@
 	u32 timing;
 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
 	const struct via_isa_bridge *config = host->private_data;
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
 
 	via_config_fifo(pdev, config->flags);
 
@@ -630,7 +550,9 @@
 		timing &= ~0x80008;
 		pci_write_config_dword(pdev, 0x50, timing);
 	}
-	return ata_pci_device_resume(pdev);
+
+	ata_host_resume(host);
+	return 0;
 }
 #endif
 
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 99c92ed..6e52a35 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -116,53 +116,20 @@
 			buflen += 4 - slop;
 		}
 	} else
-		buflen = ata_data_xfer(dev, buf, buflen, rw);
+		buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
 
 	return buflen;
 }
 
 static struct scsi_host_template winbond_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_PIO_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations winbond_port_ops = {
-	.set_piomode	= winbond_set_piomode,
-
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.freeze		= ata_bmdma_freeze,
-	.thaw		= ata_bmdma_thaw,
-	.error_handler	= ata_bmdma_error_handler,
-	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.inherits	= &ata_sff_port_ops,
+	.sff_data_xfer	= winbond_data_xfer,
 	.cable_detect	= ata_cable_40wire,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-
-	.data_xfer	= winbond_data_xfer,
-
-	.irq_clear	= ata_bmdma_irq_clear,
-	.irq_on		= ata_irq_on,
-
-	.port_start	= ata_sff_port_start,
+	.set_piomode	= winbond_set_piomode,
 };
 
 /**
@@ -231,7 +198,7 @@
 		ap->ioaddr.cmd_addr = cmd_addr;
 		ap->ioaddr.altstatus_addr = ctl_addr;
 		ap->ioaddr.ctl_addr = ctl_addr;
-		ata_std_ports(&ap->ioaddr);
+		ata_sff_std_ports(&ap->ioaddr);
 
 		/* hook in a private data structure per channel */
 		host->private_data = &winbond_data[nr_winbond_host];
@@ -239,7 +206,7 @@
 		winbond_data[nr_winbond_host].platform_dev = pdev;
 
 		/* activate */
-		rc = ata_host_activate(host, 14 + i, ata_interrupt, 0,
+		rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
 				       &winbond_sht);
 		if (rc)
 			goto err_unregister;
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 8e1b7e9..be53545 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -131,56 +131,33 @@
 static int adma_ata_init_one(struct pci_dev *pdev,
 				const struct pci_device_id *ent);
 static int adma_port_start(struct ata_port *ap);
-static void adma_host_stop(struct ata_host *host);
 static void adma_port_stop(struct ata_port *ap);
 static void adma_qc_prep(struct ata_queued_cmd *qc);
 static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
 static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
-static void adma_bmdma_stop(struct ata_queued_cmd *qc);
-static u8 adma_bmdma_status(struct ata_port *ap);
-static void adma_irq_clear(struct ata_port *ap);
 static void adma_freeze(struct ata_port *ap);
 static void adma_thaw(struct ata_port *ap);
-static void adma_error_handler(struct ata_port *ap);
+static int adma_prereset(struct ata_link *link, unsigned long deadline);
 
 static struct scsi_host_template adma_ata_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
-	.proc_name		= DRV_NAME,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
+	ATA_BASE_SHT(DRV_NAME),
 	.sg_tablesize		= LIBATA_MAX_PRD,
 	.dma_boundary		= ADMA_DMA_BOUNDARY,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING,
-	.emulated		= ATA_SHT_EMULATED,
 };
 
-static const struct ata_port_operations adma_ata_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.exec_command		= ata_exec_command,
-	.check_status		= ata_check_status,
-	.dev_select		= ata_std_dev_select,
+static struct ata_port_operations adma_ata_ops = {
+	.inherits		= &ata_sff_port_ops,
+
 	.check_atapi_dma	= adma_check_atapi_dma,
-	.data_xfer		= ata_data_xfer,
 	.qc_prep		= adma_qc_prep,
 	.qc_issue		= adma_qc_issue,
+
 	.freeze			= adma_freeze,
 	.thaw			= adma_thaw,
-	.error_handler		= adma_error_handler,
-	.irq_clear		= adma_irq_clear,
-	.irq_on			= ata_irq_on,
+	.prereset		= adma_prereset,
+
 	.port_start		= adma_port_start,
 	.port_stop		= adma_port_stop,
-	.host_stop		= adma_host_stop,
-	.bmdma_stop		= adma_bmdma_stop,
-	.bmdma_status		= adma_bmdma_status,
 };
 
 static struct ata_port_info adma_port_info[] = {
@@ -213,21 +190,6 @@
 	return 1;	/* ATAPI DMA not yet supported */
 }
 
-static void adma_bmdma_stop(struct ata_queued_cmd *qc)
-{
-	/* nothing */
-}
-
-static u8 adma_bmdma_status(struct ata_port *ap)
-{
-	return 0;
-}
-
-static void adma_irq_clear(struct ata_port *ap)
-{
-	/* nothing */
-}
-
 static void adma_reset_engine(struct ata_port *ap)
 {
 	void __iomem *chan = ADMA_PORT_REGS(ap);
@@ -246,7 +208,7 @@
 
 	/* mask/clear ATA interrupts */
 	writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
-	ata_check_status(ap);
+	ata_sff_check_status(ap);
 
 	/* reset the ADMA engine */
 	adma_reset_engine(ap);
@@ -281,7 +243,7 @@
 
 	/* mask/clear ATA interrupts */
 	writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
-	ata_check_status(ap);
+	ata_sff_check_status(ap);
 
 	/* reset ADMA to idle state */
 	writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
@@ -304,13 +266,7 @@
 		pp->state = adma_state_mmio;
 	adma_reinit_engine(ap);
 
-	return ata_std_prereset(link, deadline);
-}
-
-static void adma_error_handler(struct ata_port *ap)
-{
-	ata_do_eh(ap, adma_prereset, ata_std_softreset, NULL,
-		  ata_std_postreset);
+	return ata_sff_prereset(link, deadline);
 }
 
 static int adma_fill_sg(struct ata_queued_cmd *qc)
@@ -366,7 +322,7 @@
 
 	adma_enter_reg_mode(qc->ap);
 	if (qc->tf.protocol != ATA_PROT_DMA) {
-		ata_qc_prep(qc);
+		ata_sff_qc_prep(qc);
 		return;
 	}
 
@@ -465,7 +421,7 @@
 	}
 
 	pp->state = adma_state_mmio;
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static inline unsigned int adma_intr_pkt(struct ata_host *host)
@@ -536,7 +492,7 @@
 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
 
 				/* check main status, clearing INTRQ */
-				u8 status = ata_check_status(ap);
+				u8 status = ata_sff_check_status(ap);
 				if ((status & ATA_BUSY))
 					continue;
 				DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
@@ -633,14 +589,6 @@
 	adma_reset_engine(ap);
 }
 
-static void adma_host_stop(struct ata_host *host)
-{
-	unsigned int port_no;
-
-	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
-		adma_reset_engine(host->ports[port_no]);
-}
-
 static void adma_host_init(struct ata_host *host, unsigned int chip_id)
 {
 	unsigned int port_no;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 9d1e3ca..fddd346 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -35,7 +35,6 @@
 	SATA_FSL_HOST_FLAGS	= (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
 				ATA_FLAG_NCQ),
-	SATA_FSL_HOST_LFLAGS	= ATA_LFLAG_SKIP_D2H_BSY,
 
 	SATA_FSL_MAX_CMDS	= SATA_FSL_QUEUE_DEPTH,
 	SATA_FSL_CMD_HDR_SIZE	= 16,	/* 4 DWORDS */
@@ -245,17 +244,6 @@
 	dma_addr_t cmdslot_paddr;
 	struct command_desc *cmdentry;
 	dma_addr_t cmdentry_paddr;
-
-	/*
-	 * SATA FSL controller has a Status FIS which should contain the
-	 * received D2H FIS & taskfile registers. This SFIS is present in
-	 * the command descriptor, and to have a ready reference to it,
-	 * we are caching it here, quite similar to what is done in H/W on
-	 * AHCI compliant devices by copying taskfile fields to a 32-bit
-	 * register.
-	 */
-
-	struct ata_taskfile tf;
 };
 
 /*
@@ -465,6 +453,20 @@
 	return 0;
 }
 
+static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	struct sata_fsl_port_priv *pp = qc->ap->private_data;
+	struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	unsigned int tag = sata_fsl_tag(qc->tag, hcr_base);
+	struct command_desc *cd;
+
+	cd = pp->cmdentry + tag;
+
+	ata_tf_from_fis(cd->sfis, &qc->result_tf);
+	return true;
+}
+
 static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
 			       u32 val)
 {
@@ -556,38 +558,6 @@
 		ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
 }
 
-/*
- * NOTE : 1st D2H FIS from device does not update sfis in command descriptor.
- */
-static inline void sata_fsl_cache_taskfile_from_d2h_fis(struct ata_queued_cmd
-							*qc,
-							struct ata_port *ap)
-{
-	struct sata_fsl_port_priv *pp = ap->private_data;
-	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
-	void __iomem *hcr_base = host_priv->hcr_base;
-	unsigned int tag = sata_fsl_tag(qc->tag, hcr_base);
-	struct command_desc *cd;
-
-	cd = pp->cmdentry + tag;
-
-	ata_tf_from_fis(cd->sfis, &pp->tf);
-}
-
-static u8 sata_fsl_check_status(struct ata_port *ap)
-{
-	struct sata_fsl_port_priv *pp = ap->private_data;
-
-	return pp->tf.command;
-}
-
-static void sata_fsl_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
-{
-	struct sata_fsl_port_priv *pp = ap->private_data;
-
-	*tf = pp->tf;
-}
-
 static int sata_fsl_port_start(struct ata_port *ap)
 {
 	struct device *dev = ap->host->dev;
@@ -708,6 +678,15 @@
 	return ata_dev_classify(&tf);
 }
 
+static int sata_fsl_prereset(struct ata_linke *link, unsigned long deadline)
+{
+	/* FIXME: Never skip softreset, sata_fsl_softreset() is
+	 * combination of soft and hard resets.  sata_fsl_softreset()
+	 * needs to be splitted into soft and hard resets.
+	 */
+	return 0;
+}
+
 static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
 			      unsigned long deadline)
 {
@@ -913,16 +892,6 @@
 	return -EIO;
 }
 
-static void sata_fsl_error_handler(struct ata_port *ap)
-{
-
-	DPRINTK("in xx_error_handler\n");
-
-	/* perform recovery */
-	ata_do_eh(ap, ata_std_prereset, sata_fsl_softreset, sata_std_hardreset,
-		  ata_std_postreset);
-}
-
 static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
 {
 	if (qc->flags & ATA_QCFLAG_FAILED)
@@ -934,11 +903,6 @@
 	}
 }
 
-static void sata_fsl_irq_clear(struct ata_port *ap)
-{
-	/* unused */
-}
-
 static void sata_fsl_error_intr(struct ata_port *ap)
 {
 	struct ata_link *link = &ap->link;
@@ -996,7 +960,7 @@
 	/* handle fatal errors */
 	if (hstatus & FATAL_ERROR_DECODE) {
 		err_mask |= AC_ERR_ATA_BUS;
-		action |= ATA_EH_SOFTRESET;
+		action |= ATA_EH_RESET;
 		/* how will fatal error interrupts be completed ?? */
 		freeze = 1;
 	}
@@ -1013,10 +977,9 @@
 	/* record error info */
 	qc = ata_qc_from_tag(ap, link->active_tag);
 
-	if (qc) {
-		sata_fsl_cache_taskfile_from_d2h_fis(qc, qc->ap);
+	if (qc)
 		qc->err_mask |= err_mask;
-	} else
+	else
 		ehi->err_mask |= err_mask;
 
 	ehi->action |= action;
@@ -1029,14 +992,6 @@
 		ata_port_abort(ap);
 }
 
-static void sata_fsl_qc_complete(struct ata_queued_cmd *qc)
-{
-	if (qc->flags & ATA_QCFLAG_RESULT_TF) {
-		DPRINTK("xx_qc_complete called\n");
-		sata_fsl_cache_taskfile_from_d2h_fis(qc, qc->ap);
-	}
-}
-
 static void sata_fsl_host_intr(struct ata_port *ap)
 {
 	struct ata_link *link = &ap->link;
@@ -1077,10 +1032,8 @@
 		for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
 			if (qc_active & (1 << i)) {
 				qc = ata_qc_from_tag(ap, i);
-				if (qc) {
-					sata_fsl_qc_complete(qc);
+				if (qc)
 					ata_qc_complete(qc);
-				}
 				DPRINTK
 				    ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
 				     i, ioread32(hcr_base + CC),
@@ -1096,10 +1049,8 @@
 		DPRINTK("completing non-ncq cmd, tag=%d,CC=0x%x\n",
 			link->active_tag, ioread32(hcr_base + CC));
 
-		if (qc) {
-			sata_fsl_qc_complete(qc);
+		if (qc)
 			ata_qc_complete(qc);
-		}
 	} else {
 		/* Spurious Interrupt!! */
 		DPRINTK("spurious interrupt!!, CC = 0x%x\n",
@@ -1197,41 +1148,26 @@
  * scsi mid-layer and libata interface structures
  */
 static struct scsi_host_template sata_fsl_sht = {
-	.module = THIS_MODULE,
-	.name = "sata_fsl",
-	.ioctl = ata_scsi_ioctl,
-	.queuecommand = ata_scsi_queuecmd,
-	.change_queue_depth = ata_scsi_change_queue_depth,
+	ATA_NCQ_SHT("sata_fsl"),
 	.can_queue = SATA_FSL_QUEUE_DEPTH,
-	.this_id = ATA_SHT_THIS_ID,
 	.sg_tablesize = SATA_FSL_MAX_PRD_USABLE,
-	.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
-	.emulated = ATA_SHT_EMULATED,
-	.use_clustering = ATA_SHT_USE_CLUSTERING,
-	.proc_name = "sata_fsl",
 	.dma_boundary = ATA_DMA_BOUNDARY,
-	.slave_configure = ata_scsi_slave_config,
-	.slave_destroy = ata_scsi_slave_destroy,
-	.bios_param = ata_std_bios_param,
 };
 
 static const struct ata_port_operations sata_fsl_ops = {
-	.check_status = sata_fsl_check_status,
-	.check_altstatus = sata_fsl_check_status,
-	.dev_select = ata_noop_dev_select,
-
-	.tf_read = sata_fsl_tf_read,
+	.inherits = &sata_port_ops,
 
 	.qc_prep = sata_fsl_qc_prep,
 	.qc_issue = sata_fsl_qc_issue,
-	.irq_clear = sata_fsl_irq_clear,
+	.qc_fill_rtf = sata_fsl_qc_fill_rtf,
 
 	.scr_read = sata_fsl_scr_read,
 	.scr_write = sata_fsl_scr_write,
 
 	.freeze = sata_fsl_freeze,
 	.thaw = sata_fsl_thaw,
-	.error_handler = sata_fsl_error_handler,
+	.prereset = sata_fsl_prereset,
+	.softreset = sata_fsl_softreset,
 	.post_internal_cmd = sata_fsl_post_internal_cmd,
 
 	.port_start = sata_fsl_port_start,
@@ -1241,7 +1177,6 @@
 static const struct ata_port_info sata_fsl_port_info[] = {
 	{
 	 .flags = SATA_FSL_HOST_FLAGS,
-	 .link_flags = SATA_FSL_HOST_LFLAGS,
 	 .pio_mask = 0x1f,	/* pio 0-4 */
 	 .udma_mask = 0x7f,	/* udma 0-6 */
 	 .port_ops = &sata_fsl_ops,
@@ -1297,11 +1232,6 @@
 	/* host->iomap is not used currently */
 	host->private_data = host_priv;
 
-	/* setup port(s) */
-
-	host->ports[0]->ioaddr.cmd_addr = host_priv->hcr_base;
-	host->ports[0]->ioaddr.scr_addr = host_priv->ssr_base;
-
 	/* initialize host controller */
 	sata_fsl_init_controller(host);
 
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 59e65ed..d27bb9a 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -109,21 +109,7 @@
 };
 
 static struct scsi_host_template inic_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static const int scr_map[] = {
@@ -236,7 +222,7 @@
 	writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
 
 	/* issue r/w command */
-	ap->ops->exec_command(ap, &qc->tf);
+	ap->ops->sff_exec_command(ap, &qc->tf);
 }
 
 static void inic_bmdma_start(struct ata_queued_cmd *qc)
@@ -266,11 +252,6 @@
 	return ATA_DMA_INTR;
 }
 
-static void inic_irq_clear(struct ata_port *ap)
-{
-	/* noop */
-}
-
 static void inic_host_intr(struct ata_port *ap)
 {
 	void __iomem *port_base = inic_port_base(ap);
@@ -286,14 +267,14 @@
 			ata_qc_from_tag(ap, ap->link.active_tag);
 
 		if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
-			ata_chk_status(ap);	/* clear ATA interrupt */
+			ap->ops->sff_check_status(ap); /* clear ATA interrupt */
 			return;
 		}
 
-		if (likely(ata_host_intr(ap, qc)))
+		if (likely(ata_sff_host_intr(ap, qc)))
 			return;
 
-		ata_chk_status(ap);	/* clear ATA interrupt */
+		ap->ops->sff_check_status(ap); /* clear ATA interrupt */
 		ata_port_printk(ap, KERN_WARNING, "unhandled "
 				"interrupt, irq_stat=%x\n", irq_stat);
 		return;
@@ -370,12 +351,12 @@
 	 */
 	if (unlikely(qc->tf.command == ATA_CMD_ID_ATA ||
 		     qc->tf.command == ATA_CMD_ID_ATAPI)) {
-		u8 stat = ata_chk_status(ap);
+		u8 stat = ap->ops->sff_check_status(ap);
 		if (stat == 0x7f || stat == 0xff)
 			return AC_ERR_HSM;
 	}
 
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static void inic_freeze(struct ata_port *ap)
@@ -384,7 +365,7 @@
 
 	__inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE);
 
-	ata_chk_status(ap);
+	ap->ops->sff_check_status(ap);
 	writeb(0xff, port_base + PORT_IRQ_STAT);
 
 	readb(port_base + PORT_IRQ_STAT); /* flush */
@@ -394,7 +375,7 @@
 {
 	void __iomem *port_base = inic_port_base(ap);
 
-	ata_chk_status(ap);
+	ap->ops->sff_check_status(ap);
 	writeb(0xff, port_base + PORT_IRQ_STAT);
 
 	__inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
@@ -436,10 +417,8 @@
 	if (ata_link_online(link)) {
 		struct ata_taskfile tf;
 
-		/* wait a while before checking status */
-		ata_wait_after_reset(ap, deadline);
-
-		rc = ata_wait_ready(ap, deadline);
+		/* wait for link to become ready */
+		rc = ata_sff_wait_after_reset(link, 1, deadline);
 		/* link occupied, -ENODEV too is an error */
 		if (rc) {
 			ata_link_printk(link, KERN_WARNING, "device not ready "
@@ -447,10 +426,8 @@
 			return rc;
 		}
 
-		ata_tf_read(ap, &tf);
+		ata_sff_tf_read(ap, &tf);
 		*class = ata_dev_classify(&tf);
-		if (*class == ATA_DEV_UNKNOWN)
-			*class = ATA_DEV_NONE;
 	}
 
 	return 0;
@@ -471,8 +448,7 @@
 	spin_unlock_irqrestore(ap->lock, flags);
 
 	/* PIO and DMA engines have been stopped, perform recovery */
-	ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset,
-		  ata_std_postreset);
+	ata_std_error_handler(ap);
 }
 
 static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -541,35 +517,26 @@
 }
 
 static struct ata_port_operations inic_port_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.scr_read		= inic_scr_read,
-	.scr_write		= inic_scr_write,
+	.inherits		= &ata_sff_port_ops,
 
 	.bmdma_setup		= inic_bmdma_setup,
 	.bmdma_start		= inic_bmdma_start,
 	.bmdma_stop		= inic_bmdma_stop,
 	.bmdma_status		= inic_bmdma_status,
-
-	.irq_clear		= inic_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.qc_prep	 	= ata_qc_prep,
 	.qc_issue		= inic_qc_issue,
-	.data_xfer		= ata_data_xfer,
 
 	.freeze			= inic_freeze,
 	.thaw			= inic_thaw,
+	.softreset		= ATA_OP_NULL,	/* softreset is broken */
+	.hardreset		= inic_hardreset,
 	.error_handler		= inic_error_handler,
 	.post_internal_cmd	= inic_post_internal_cmd,
 	.dev_config		= inic_dev_config,
 
-	.port_resume		= inic_port_resume,
+	.scr_read		= inic_scr_read,
+	.scr_write		= inic_scr_write,
 
+	.port_resume		= inic_port_resume,
 	.port_start		= inic_port_start,
 };
 
@@ -692,7 +659,7 @@
 			((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
 		port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR;
 
-		ata_std_ports(port);
+		ata_sff_std_ports(port);
 
 		ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio");
 		ata_port_pbar_desc(ap, MMIO_BAR, offset, "port");
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 6ebebde..05ff8c7 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1,6 +1,7 @@
 /*
  * sata_mv.c - Marvell SATA support
  *
+ * Copyright 2008: Marvell Corporation, all rights reserved.
  * Copyright 2005: EMC Corporation, all rights reserved.
  * Copyright 2005 Red Hat, Inc.  All rights reserved.
  *
@@ -39,7 +40,9 @@
 
   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
 
-  6) Add port multiplier support (intermediate)
+  6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
+
+  7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
 
   8) Develop a low-power-consumption strategy, and implement it.
 
@@ -61,7 +64,6 @@
 
 */
 
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -131,7 +133,7 @@
 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
 	/* SoC integrated controllers, no PCI interface */
-	MV_FLAG_SOC = (1 << 28),
+	MV_FLAG_SOC		= (1 << 28),
 
 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
@@ -141,6 +143,7 @@
 	CRQB_FLAG_READ		= (1 << 0),
 	CRQB_TAG_SHIFT		= 1,
 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
+	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
 	CRQB_CMD_ADDR_SHIFT	= 8,
 	CRQB_CMD_CS		= (0x2 << 11),
@@ -199,7 +202,7 @@
 	TWSI_INT		= (1 << 24),
 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
-	HC_MAIN_RSVD_SOC 	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
+	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
 				   HC_MAIN_RSVD),
@@ -223,13 +226,24 @@
 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
 	SATA_ACTIVE_OFS		= 0x350,
 	SATA_FIS_IRQ_CAUSE_OFS	= 0x364,
+
+	LTMODE_OFS		= 0x30c,
+	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
+
 	PHY_MODE3		= 0x310,
 	PHY_MODE4		= 0x314,
 	PHY_MODE2		= 0x330,
+	SATA_IFCTL_OFS		= 0x344,
+	SATA_IFSTAT_OFS		= 0x34c,
+	VENDOR_UNIQUE_FIS_OFS	= 0x35c,
+
+	FIS_CFG_OFS		= 0x360,
+	FIS_CFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
+
 	MV5_PHY_MODE		= 0x74,
 	MV5_LT_MODE		= 0x30,
 	MV5_PHY_CTL		= 0x0C,
-	SATA_INTERFACE_CTL	= 0x050,
+	SATA_INTERFACE_CFG	= 0x050,
 
 	MV_M2_PREAMP_MASK	= 0x7e0,
 
@@ -240,6 +254,8 @@
 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
+	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
+	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
 
 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
@@ -282,7 +298,9 @@
 	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
 				  EDMA_ERR_LNK_CTRL_RX_1 |
 				  EDMA_ERR_LNK_CTRL_RX_3 |
-				  EDMA_ERR_LNK_CTRL_TX,
+				  EDMA_ERR_LNK_CTRL_TX |
+				 /* temporary, until we fix hotplug: */
+				 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
 
 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
 				  EDMA_ERR_PRD_PAR |
@@ -298,6 +316,7 @@
 				  EDMA_ERR_LNK_DATA_RX |
 				  EDMA_ERR_LNK_DATA_TX |
 				  EDMA_ERR_TRANS_PROTO,
+
 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
 				  EDMA_ERR_PRD_PAR |
 				  EDMA_ERR_DEV_DCON |
@@ -344,7 +363,6 @@
 	/* Port private flags (pp_flags) */
 	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
 	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
-	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),	/* 1st hard reset complete? */
 };
 
 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
@@ -461,7 +479,6 @@
 	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
 };
 
-static void mv_irq_clear(struct ata_port *ap);
 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
@@ -471,7 +488,8 @@
 static void mv_qc_prep(struct ata_queued_cmd *qc);
 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
-static void mv_error_handler(struct ata_port *ap);
+static int mv_hardreset(struct ata_link *link, unsigned int *class,
+			unsigned long deadline);
 static void mv_eh_freeze(struct ata_port *ap);
 static void mv_eh_thaw(struct ata_port *ap);
 static void mv6_dev_config(struct ata_device *dev);
@@ -504,72 +522,46 @@
 				      void __iomem *mmio);
 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
-static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
 			     unsigned int port_no);
-static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
-			void __iomem *port_mmio, int want_ncq);
-static int __mv_stop_dma(struct ata_port *ap);
+static int mv_stop_edma(struct ata_port *ap);
+static int mv_stop_edma_engine(void __iomem *port_mmio);
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
+
+static void mv_pmp_select(struct ata_port *ap, int pmp);
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline);
+static int  mv_softreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline);
 
 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
  * because we have to allow room for worst case splitting of
  * PRDs for 64K boundaries in mv_fill_sg().
  */
 static struct scsi_host_template mv5_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
+	ATA_BASE_SHT(DRV_NAME),
 	.sg_tablesize		= MV_MAX_SG_CT / 2,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= 1,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= MV_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
 static struct scsi_host_template mv6_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.change_queue_depth	= ata_scsi_change_queue_depth,
+	ATA_NCQ_SHT(DRV_NAME),
 	.can_queue		= MV_MAX_Q_DEPTH - 1,
-	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= MV_MAX_SG_CT / 2,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= 1,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= MV_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
-static const struct ata_port_operations mv5_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.cable_detect		= ata_cable_sata,
+static struct ata_port_operations mv5_ops = {
+	.inherits		= &ata_sff_port_ops,
 
 	.qc_prep		= mv_qc_prep,
 	.qc_issue		= mv_qc_issue,
-	.data_xfer		= ata_data_xfer,
 
-	.irq_clear		= mv_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.error_handler		= mv_error_handler,
 	.freeze			= mv_eh_freeze,
 	.thaw			= mv_eh_thaw,
+	.hardreset		= mv_hardreset,
+	.error_handler		= ata_std_error_handler, /* avoid SFF EH */
+	.post_internal_cmd	= ATA_OP_NULL,
 
 	.scr_read		= mv5_scr_read,
 	.scr_write		= mv5_scr_write,
@@ -578,61 +570,24 @@
 	.port_stop		= mv_port_stop,
 };
 
-static const struct ata_port_operations mv6_ops = {
+static struct ata_port_operations mv6_ops = {
+	.inherits		= &mv5_ops,
+	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
 	.dev_config             = mv6_dev_config,
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.cable_detect		= ata_cable_sata,
-
-	.qc_prep		= mv_qc_prep,
-	.qc_issue		= mv_qc_issue,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_clear		= mv_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.error_handler		= mv_error_handler,
-	.freeze			= mv_eh_freeze,
-	.thaw			= mv_eh_thaw,
-	.qc_defer		= ata_std_qc_defer,
-
 	.scr_read		= mv_scr_read,
 	.scr_write		= mv_scr_write,
 
-	.port_start		= mv_port_start,
-	.port_stop		= mv_port_stop,
+	.pmp_hardreset		= mv_pmp_hardreset,
+	.pmp_softreset		= mv_softreset,
+	.softreset		= mv_softreset,
+	.error_handler		= sata_pmp_error_handler,
 };
 
-static const struct ata_port_operations mv_iie_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.cable_detect		= ata_cable_sata,
-
+static struct ata_port_operations mv_iie_ops = {
+	.inherits		= &mv6_ops,
+	.qc_defer		= ata_std_qc_defer, /* FIS-based switching */
+	.dev_config		= ATA_OP_NULL,
 	.qc_prep		= mv_qc_prep_iie,
-	.qc_issue		= mv_qc_issue,
-	.data_xfer		= ata_data_xfer,
-
-	.irq_clear		= mv_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.error_handler		= mv_error_handler,
-	.freeze			= mv_eh_freeze,
-	.thaw			= mv_eh_thaw,
-	.qc_defer		= ata_std_qc_defer,
-
-	.scr_read		= mv_scr_read,
-	.scr_write		= mv_scr_write,
-
-	.port_start		= mv_port_start,
-	.port_stop		= mv_port_stop,
 };
 
 static const struct ata_port_info mv_port_info[] = {
@@ -656,6 +611,7 @@
 	},
 	{  /* chip_604x */
 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
 				  ATA_FLAG_NCQ,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
@@ -663,6 +619,7 @@
 	},
 	{  /* chip_608x */
 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
 				  ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
@@ -670,6 +627,7 @@
 	},
 	{  /* chip_6042 */
 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
 				  ATA_FLAG_NCQ,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
@@ -677,16 +635,19 @@
 	},
 	{  /* chip_7042 */
 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
 				  ATA_FLAG_NCQ,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &mv_iie_ops,
 	},
 	{  /* chip_soc */
-		.flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
-		.pio_mask = 0x1f,      /* pio0-4 */
-		.udma_mask = ATA_UDMA6,
-		.port_ops = &mv_iie_ops,
+		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+				  ATA_FLAG_NCQ | MV_FLAG_SOC,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &mv_iie_ops,
 	},
 };
 
@@ -785,6 +746,14 @@
 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
 }
 
+static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
+{
+	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
+	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
+
+	return hc_mmio + ofs;
+}
+
 static inline void __iomem *mv_host_base(struct ata_host *host)
 {
 	struct mv_host_priv *hpriv = host->private_data;
@@ -801,10 +770,6 @@
 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
 }
 
-static void mv_irq_clear(struct ata_port *ap)
-{
-}
-
 static void mv_set_edma_ptrs(void __iomem *port_mmio,
 			     struct mv_host_priv *hpriv,
 			     struct mv_port_priv *pp)
@@ -864,7 +829,7 @@
 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
 		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
 		if (want_ncq != using_ncq)
-			__mv_stop_dma(ap);
+			mv_stop_edma(ap);
 	}
 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
 		struct mv_host_priv *hpriv = ap->host->private_data;
@@ -885,7 +850,7 @@
 				 hc_mmio + HC_IRQ_CAUSE_OFS);
 		}
 
-		mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
+		mv_edma_cfg(ap, want_ncq);
 
 		/* clear FIS IRQ Cause */
 		writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
@@ -899,58 +864,42 @@
 }
 
 /**
- *      __mv_stop_dma - Disable eDMA engine
- *      @ap: ATA channel to manipulate
- *
- *      Verify the local cache of the eDMA state is accurate with a
- *      WARN_ON.
+ *      mv_stop_edma_engine - Disable eDMA engine
+ *      @port_mmio: io base address
  *
  *      LOCKING:
  *      Inherited from caller.
  */
-static int __mv_stop_dma(struct ata_port *ap)
+static int mv_stop_edma_engine(void __iomem *port_mmio)
 {
-	void __iomem *port_mmio = mv_ap_base(ap);
-	struct mv_port_priv *pp	= ap->private_data;
-	u32 reg;
-	int i, err = 0;
+	int i;
 
-	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
-		/* Disable EDMA if active.   The disable bit auto clears.
-		 */
-		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
-		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
-	} else {
-		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
-	}
+	/* Disable eDMA.  The disable bit auto clears. */
+	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
 
-	/* now properly wait for the eDMA to stop */
-	for (i = 1000; i > 0; i--) {
-		reg = readl(port_mmio + EDMA_CMD_OFS);
+	/* Wait for the chip to confirm eDMA is off. */
+	for (i = 10000; i > 0; i--) {
+		u32 reg = readl(port_mmio + EDMA_CMD_OFS);
 		if (!(reg & EDMA_EN))
-			break;
-
-		udelay(100);
+			return 0;
+		udelay(10);
 	}
-
-	if (reg & EDMA_EN) {
-		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
-		err = -EIO;
-	}
-
-	return err;
+	return -EIO;
 }
 
-static int mv_stop_dma(struct ata_port *ap)
+static int mv_stop_edma(struct ata_port *ap)
 {
-	unsigned long flags;
-	int rc;
+	void __iomem *port_mmio = mv_ap_base(ap);
+	struct mv_port_priv *pp = ap->private_data;
 
-	spin_lock_irqsave(&ap->host->lock, flags);
-	rc = __mv_stop_dma(ap);
-	spin_unlock_irqrestore(&ap->host->lock, flags);
-
-	return rc;
+	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
+		return 0;
+	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+	if (mv_stop_edma_engine(port_mmio)) {
+		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
+		return -EIO;
+	}
+	return 0;
 }
 
 #ifdef ATA_DEBUG
@@ -1074,18 +1023,50 @@
 static void mv6_dev_config(struct ata_device *adev)
 {
 	/*
+	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
+	 *
+	 * Gen-II does not support NCQ over a port multiplier
+	 *  (no FIS-based switching).
+	 *
 	 * We don't have hob_nsect when doing NCQ commands on Gen-II.
 	 * See mv_qc_prep() for more info.
 	 */
-	if (adev->flags & ATA_DFLAG_NCQ)
-		if (adev->max_sectors > ATA_MAX_SECTORS)
+	if (adev->flags & ATA_DFLAG_NCQ) {
+		if (sata_pmp_attached(adev->link->ap))
+			adev->flags &= ~ATA_DFLAG_NCQ;
+		else if (adev->max_sectors > ATA_MAX_SECTORS)
 			adev->max_sectors = ATA_MAX_SECTORS;
+	}
 }
 
-static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
-			void __iomem *port_mmio, int want_ncq)
+static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
+{
+	u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
+	/*
+	 * Various bit settings required for operation
+	 * in FIS-based switching (fbs) mode on GenIIe:
+	 */
+	old_fcfg   = readl(port_mmio + FIS_CFG_OFS);
+	old_ltmode = readl(port_mmio + LTMODE_OFS);
+	if (enable_fbs) {
+		new_fcfg   = old_fcfg   |  FIS_CFG_SINGLE_SYNC;
+		new_ltmode = old_ltmode |  LTMODE_BIT8;
+	} else { /* disable fbs */
+		new_fcfg   = old_fcfg   & ~FIS_CFG_SINGLE_SYNC;
+		new_ltmode = old_ltmode & ~LTMODE_BIT8;
+	}
+	if (new_fcfg != old_fcfg)
+		writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
+	if (new_ltmode != old_ltmode)
+		writelfl(new_ltmode, port_mmio + LTMODE_OFS);
+}
+
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
 {
 	u32 cfg;
+	struct mv_port_priv *pp    = ap->private_data;
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio    = mv_ap_base(ap);
 
 	/* set up non-NCQ EDMA configuration */
 	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
@@ -1101,6 +1082,13 @@
 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
 		cfg |= (1 << 18);	/* enab early completion */
 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
+
+		if (want_ncq && sata_pmp_attached(ap)) {
+			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
+			mv_config_fbs(port_mmio, 1);
+		} else {
+			mv_config_fbs(port_mmio, 0);
+		}
 	}
 
 	if (want_ncq) {
@@ -1156,8 +1144,6 @@
 	struct device *dev = ap->host->dev;
 	struct mv_host_priv *hpriv = ap->host->private_data;
 	struct mv_port_priv *pp;
-	void __iomem *port_mmio = mv_ap_base(ap);
-	unsigned long flags;
 	int tag;
 
 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -1190,18 +1176,6 @@
 			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
 		}
 	}
-
-	spin_lock_irqsave(&ap->host->lock, flags);
-
-	mv_edma_cfg(pp, hpriv, port_mmio, 0);
-	mv_set_edma_ptrs(port_mmio, hpriv, pp);
-
-	spin_unlock_irqrestore(&ap->host->lock, flags);
-
-	/* Don't turn on EDMA here...do it before DMA commands only.  Else
-	 * we'll be unable to send non-data, PIO, etc due to restricted access
-	 * to shadow regs.
-	 */
 	return 0;
 
 out_port_free_dma_mem:
@@ -1220,7 +1194,7 @@
  */
 static void mv_port_stop(struct ata_port *ap)
 {
-	mv_stop_dma(ap);
+	mv_stop_edma(ap);
 	mv_port_free_dma_mem(ap);
 }
 
@@ -1306,6 +1280,7 @@
 		flags |= CRQB_FLAG_READ;
 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
 	flags |= qc->tag << CRQB_TAG_SHIFT;
+	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
 
 	/* get current queue index from software */
 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
@@ -1390,14 +1365,14 @@
 	    (qc->tf.protocol != ATA_PROT_NCQ))
 		return;
 
-	/* Fill in Gen IIE command request block
-	 */
+	/* Fill in Gen IIE command request block */
 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
 		flags |= CRQB_FLAG_READ;
 
 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
 	flags |= qc->tag << CRQB_TAG_SHIFT;
 	flags |= qc->tag << CRQB_HOSTQ_SHIFT;
+	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
 
 	/* get current queue index from software */
 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
@@ -1455,12 +1430,14 @@
 
 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
 	    (qc->tf.protocol != ATA_PROT_NCQ)) {
-		/* We're about to send a non-EDMA capable command to the
+		/*
+		 * We're about to send a non-EDMA capable command to the
 		 * port.  Turn off EDMA so there won't be problems accessing
 		 * shadow block, etc registers.
 		 */
-		__mv_stop_dma(ap);
-		return ata_qc_issue_prot(qc);
+		mv_stop_edma(ap);
+		mv_pmp_select(ap, qc->dev->link->pmp);
+		return ata_sff_qc_issue(qc);
 	}
 
 	mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
@@ -1482,10 +1459,10 @@
  *      @reset_allowed: bool: 0 == don't trigger from reset here
  *
  *      In most cases, just clear the interrupt and move on.  However,
- *      some cases require an eDMA reset, which is done right before
- *      the COMRESET in mv_phy_reset().  The SERR case requires a
- *      clear of pending errors in the SATA SERROR register.  Finally,
- *      if the port disabled DMA, update our cached copy to match.
+ *      some cases require an eDMA reset, which also performs a COMRESET.
+ *      The SERR case requires a clear of pending errors in the SATA
+ *      SERROR register.  Finally, if the port disabled DMA,
+ *      update our cached copy to match.
  *
  *      LOCKING:
  *      Inherited from caller.
@@ -1524,14 +1501,14 @@
 			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
 			EDMA_ERR_INTRL_PAR)) {
 		err_mask |= AC_ERR_ATA_BUS;
-		action |= ATA_EH_HARDRESET;
+		action |= ATA_EH_RESET;
 		ata_ehi_push_desc(ehi, "parity error");
 	}
 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
 		ata_ehi_hotplugged(ehi);
 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
 			"dev disconnect" : "dev connect");
-		action |= ATA_EH_HARDRESET;
+		action |= ATA_EH_RESET;
 	}
 
 	if (IS_GEN_I(hpriv)) {
@@ -1555,7 +1532,7 @@
 			sata_scr_read(&ap->link, SCR_ERROR, &serr);
 			sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
 			err_mask = AC_ERR_ATA_BUS;
-			action |= ATA_EH_HARDRESET;
+			action |= ATA_EH_RESET;
 		}
 	}
 
@@ -1564,7 +1541,7 @@
 
 	if (!err_mask) {
 		err_mask = AC_ERR_OTHER;
-		action |= ATA_EH_HARDRESET;
+		action |= ATA_EH_RESET;
 	}
 
 	ehi->serror |= serr;
@@ -1723,9 +1700,9 @@
 		pp = ap->private_data;
 
 		shift = port << 1;		/* (port * 2) */
-		if (port >= MV_PORTS_PER_HC) {
+		if (port >= MV_PORTS_PER_HC)
 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
-		}
+
 		have_err_bits = ((PORT0_ERR << shift) & relevant);
 
 		if (unlikely(have_err_bits)) {
@@ -1780,7 +1757,7 @@
 				ata_ehi_push_desc(ehi,
 					"PCI err cause 0x%08x", err_cause);
 			err_mask = AC_ERR_HOST_BUS;
-			ehi->action = ATA_EH_HARDRESET;
+			ehi->action = ATA_EH_RESET;
 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
 			if (qc)
 				qc->err_mask |= err_mask;
@@ -1814,6 +1791,7 @@
 	void __iomem *mmio = hpriv->base;
 	u32 irq_stat, irq_mask;
 
+	/* Note to self: &host->lock == &ap->host->lock == ap->lock */
 	spin_lock(&host->lock);
 
 	irq_stat = readl(hpriv->main_cause_reg_addr);
@@ -1847,14 +1825,6 @@
 	return IRQ_RETVAL(handled);
 }
 
-static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
-{
-	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
-	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
-
-	return hc_mmio + ofs;
-}
-
 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
 {
 	unsigned int ofs;
@@ -1980,9 +1950,12 @@
 {
 	void __iomem *port_mmio = mv_port_base(mmio, port);
 
-	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
-
-	mv_channel_reset(hpriv, mmio, port);
+	/*
+	 * The datasheet warns against setting ATA_RST when EDMA is active
+	 * (but doesn't say what the problem might be).  So we first try
+	 * to disable the EDMA engine before doing the ATA_RST operation.
+	 */
+	mv_reset_channel(hpriv, mmio, port);
 
 	ZERO(0x028);	/* command */
 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
@@ -2132,6 +2105,13 @@
 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
 		rc = 1;
 	}
+	/*
+	 * Temporary: wait 3 seconds before port-probing can happen,
+	 * so that we don't miss finding sleepy SilXXXX port-multipliers.
+	 * This can go away once hotplug is fully/correctly implemented.
+	 */
+	if (rc == 0)
+		msleep(3000);
 done:
 	return rc;
 }
@@ -2200,14 +2180,15 @@
 		m4 = readl(port_mmio + PHY_MODE4);
 
 		if (hp_flags & MV_HP_ERRATA_60X1B2)
-			tmp = readl(port_mmio + 0x310);
+			tmp = readl(port_mmio + PHY_MODE3);
 
+		/* workaround for errata FEr SATA#10 (part 1) */
 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
 
 		writel(m4, port_mmio + PHY_MODE4);
 
 		if (hp_flags & MV_HP_ERRATA_60X1B2)
-			writel(tmp, port_mmio + 0x310);
+			writel(tmp, port_mmio + PHY_MODE3);
 	}
 
 	/* Revert values of pre-emphasis and signal amps to the saved ones */
@@ -2255,9 +2236,12 @@
 {
 	void __iomem *port_mmio = mv_port_base(mmio, port);
 
-	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
-
-	mv_channel_reset(hpriv, mmio, port);
+	/*
+	 * The datasheet warns against setting ATA_RST when EDMA is active
+	 * (but doesn't say what the problem might be).  So we first try
+	 * to disable the EDMA engine before doing the ATA_RST operation.
+	 */
+	mv_reset_channel(hpriv, mmio, port);
 
 	ZERO(0x028);		/* command */
 	writel(0x101f, port_mmio + EDMA_CFG_OFS);
@@ -2314,25 +2298,39 @@
 	return;
 }
 
-static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
+static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
+{
+	u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
+
+	ifctl = (ifctl & 0xf7f) | 0x9b1000;	/* from chip spec */
+	if (want_gen2i)
+		ifctl |= (1 << 7);		/* enable gen2i speed */
+	writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
+}
+
+/*
+ * Caller must ensure that EDMA is not active,
+ * by first doing mv_stop_edma() where needed.
+ */
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
 			     unsigned int port_no)
 {
 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
 
+	mv_stop_edma_engine(port_mmio);
 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
 
-	if (IS_GEN_II(hpriv)) {
-		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
-		ifctl |= (1 << 7);		/* enable gen2i speed */
-		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
-		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
+	if (!IS_GEN_I(hpriv)) {
+		/* Enable 3.0gb/s link speed */
+		mv_setup_ifctl(port_mmio, 1);
 	}
-
-	udelay(25);		/* allow reset propagation */
-
-	/* Spec never mentions clearing the bit.  Marvell's driver does
-	 * clear the bit, however.
+	/*
+	 * Strobing ATA_RST here causes a hard reset of the SATA transport,
+	 * link, and physical layers.  It resets all SATA interface registers
+	 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
 	 */
+	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
+	udelay(25);	/* allow reset propagation */
 	writelfl(0, port_mmio + EDMA_CMD_OFS);
 
 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
@@ -2341,136 +2339,32 @@
 		mdelay(1);
 }
 
-/**
- *      mv_phy_reset - Perform eDMA reset followed by COMRESET
- *      @ap: ATA channel to manipulate
- *
- *      Part of this is taken from __sata_phy_reset and modified to
- *      not sleep since this routine gets called from interrupt level.
- *
- *      LOCKING:
- *      Inherited from caller.  This is coded to safe to call at
- *      interrupt level, i.e. it does not sleep.
- */
-static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
-			 unsigned long deadline)
+static void mv_pmp_select(struct ata_port *ap, int pmp)
 {
-	struct mv_port_priv *pp	= ap->private_data;
-	struct mv_host_priv *hpriv = ap->host->private_data;
-	void __iomem *port_mmio = mv_ap_base(ap);
-	int retry = 5;
-	u32 sstatus;
+	if (sata_pmp_supported(ap)) {
+		void __iomem *port_mmio = mv_ap_base(ap);
+		u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
+		int old = reg & 0xf;
 
-	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
-
-#ifdef DEBUG
-	{
-		u32 sstatus, serror, scontrol;
-
-		mv_scr_read(ap, SCR_STATUS, &sstatus);
-		mv_scr_read(ap, SCR_ERROR, &serror);
-		mv_scr_read(ap, SCR_CONTROL, &scontrol);
-		DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
-			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
+		if (old != pmp) {
+			reg = (reg & ~0xf) | pmp;
+			writelfl(reg, port_mmio + SATA_IFCTL_OFS);
+		}
 	}
-#endif
-
-	/* Issue COMRESET via SControl */
-comreset_retry:
-	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
-	msleep(1);
-
-	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
-	msleep(20);
-
-	do {
-		sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
-		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
-			break;
-
-		msleep(1);
-	} while (time_before(jiffies, deadline));
-
-	/* work around errata */
-	if (IS_GEN_II(hpriv) &&
-	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
-	    (retry-- > 0))
-		goto comreset_retry;
-
-#ifdef DEBUG
-	{
-		u32 sstatus, serror, scontrol;
-
-		mv_scr_read(ap, SCR_STATUS, &sstatus);
-		mv_scr_read(ap, SCR_ERROR, &serror);
-		mv_scr_read(ap, SCR_CONTROL, &scontrol);
-		DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
-			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
-	}
-#endif
-
-	if (ata_link_offline(&ap->link)) {
-		*class = ATA_DEV_NONE;
-		return;
-	}
-
-	/* even after SStatus reflects that device is ready,
-	 * it seems to take a while for link to be fully
-	 * established (and thus Status no longer 0x80/0x7F),
-	 * so we poll a bit for that, here.
-	 */
-	retry = 20;
-	while (1) {
-		u8 drv_stat = ata_check_status(ap);
-		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
-			break;
-		msleep(500);
-		if (retry-- <= 0)
-			break;
-		if (time_after(jiffies, deadline))
-			break;
-	}
-
-	/* FIXME: if we passed the deadline, the following
-	 * code probably produces an invalid result
-	 */
-
-	/* finally, read device signature from TF registers */
-	*class = ata_dev_try_classify(ap->link.device, 1, NULL);
-
-	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
-
-	WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
-
-	VPRINTK("EXIT\n");
 }
 
-static int mv_prereset(struct ata_link *link, unsigned long deadline)
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline)
 {
-	struct ata_port *ap = link->ap;
-	struct mv_port_priv *pp	= ap->private_data;
-	struct ata_eh_context *ehc = &link->eh_context;
-	int rc;
+	mv_pmp_select(link->ap, sata_srst_pmp(link));
+	return sata_std_hardreset(link, class, deadline);
+}
 
-	rc = mv_stop_dma(ap);
-	if (rc)
-		ehc->i.action |= ATA_EH_HARDRESET;
-
-	if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
-		pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
-		ehc->i.action |= ATA_EH_HARDRESET;
-	}
-
-	/* if we're about to do hardreset, nothing more to do */
-	if (ehc->i.action & ATA_EH_HARDRESET)
-		return 0;
-
-	if (ata_link_online(link))
-		rc = ata_wait_ready(ap, deadline);
-	else
-		rc = -ENODEV;
-
-	return rc;
+static int mv_softreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline)
+{
+	mv_pmp_select(link->ap, sata_srst_pmp(link));
+	return ata_sff_softreset(link, class, deadline);
 }
 
 static int mv_hardreset(struct ata_link *link, unsigned int *class,
@@ -2478,43 +2372,34 @@
 {
 	struct ata_port *ap = link->ap;
 	struct mv_host_priv *hpriv = ap->host->private_data;
+	struct mv_port_priv *pp = ap->private_data;
 	void __iomem *mmio = hpriv->base;
+	int rc, attempts = 0, extra = 0;
+	u32 sstatus;
+	bool online;
 
-	mv_stop_dma(ap);
+	mv_reset_channel(hpriv, mmio, ap->port_no);
+	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
 
-	mv_channel_reset(hpriv, mmio, ap->port_no);
+	/* Workaround for errata FEr SATA#10 (part 2) */
+	do {
+		const unsigned long *timing =
+				sata_ehc_deb_timing(&link->eh_context);
 
-	mv_phy_reset(ap, class, deadline);
+		rc = sata_link_hardreset(link, timing, deadline + extra,
+					 &online, NULL);
+		if (rc)
+			return rc;
+		sata_scr_read(link, SCR_STATUS, &sstatus);
+		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
+			/* Force 1.5gb/s link speed and try again */
+			mv_setup_ifctl(mv_ap_base(ap), 0);
+			if (time_after(jiffies + HZ, deadline))
+				extra = HZ; /* only extend it once, max */
+		}
+	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
 
-	return 0;
-}
-
-static void mv_postreset(struct ata_link *link, unsigned int *classes)
-{
-	struct ata_port *ap = link->ap;
-	u32 serr;
-
-	/* print link status */
-	sata_print_link_status(link);
-
-	/* clear SError */
-	sata_scr_read(link, SCR_ERROR, &serr);
-	sata_scr_write_flush(link, SCR_ERROR, serr);
-
-	/* bail out if no device is present */
-	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
-		DPRINTK("EXIT, no device\n");
-		return;
-	}
-
-	/* set up device control */
-	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
-}
-
-static void mv_error_handler(struct ata_port *ap)
-{
-	ata_do_eh(ap, mv_prereset, ata_std_softreset,
-		  mv_hardreset, mv_postreset);
+	return rc;
 }
 
 static void mv_eh_freeze(struct ata_port *ap)
@@ -2808,19 +2693,6 @@
 	hpriv->ops->enable_leds(hpriv, mmio);
 
 	for (port = 0; port < host->n_ports; port++) {
-		if (IS_GEN_II(hpriv)) {
-			void __iomem *port_mmio = mv_port_base(mmio, port);
-
-			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
-			ifctl |= (1 << 7);		/* enable gen2i speed */
-			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
-			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
-		}
-
-		hpriv->ops->phy_errata(hpriv, mmio, port);
-	}
-
-	for (port = 0; port < host->n_ports; port++) {
 		struct ata_port *ap = host->ports[port];
 		void __iomem *port_mmio = mv_port_base(mmio, port);
 
@@ -3192,7 +3064,7 @@
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
-MODULE_ALIAS("platform:sata_mv");
+MODULE_ALIAS("platform:" DRV_NAME);
 
 #ifdef CONFIG_PCI
 module_param(msi, int, 0444);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index ed5473b..109b074 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -309,7 +309,8 @@
 static void nv_nf2_thaw(struct ata_port *ap);
 static void nv_ck804_freeze(struct ata_port *ap);
 static void nv_ck804_thaw(struct ata_port *ap);
-static void nv_error_handler(struct ata_port *ap);
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
+			unsigned long deadline);
 static int nv_adma_slave_config(struct scsi_device *sdev);
 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
@@ -385,157 +386,60 @@
 };
 
 static struct scsi_host_template nv_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct scsi_host_template nv_adma_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.change_queue_depth	= ata_scsi_change_queue_depth,
+	ATA_NCQ_SHT(DRV_NAME),
 	.can_queue		= NV_ADMA_MAX_CPBS,
-	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
 	.slave_configure	= nv_adma_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
 static struct scsi_host_template nv_swncq_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.change_queue_depth	= ata_scsi_change_queue_depth,
+	ATA_NCQ_SHT(DRV_NAME),
 	.can_queue		= ATA_MAX_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= nv_swncq_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
-static const struct ata_port_operations nv_generic_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.exec_command		= ata_exec_command,
-	.check_status		= ata_check_status,
-	.dev_select		= ata_std_dev_select,
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= nv_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.data_xfer		= ata_data_xfer,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
+static struct ata_port_operations nv_generic_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.hardreset		= nv_hardreset,
 	.scr_read		= nv_scr_read,
 	.scr_write		= nv_scr_write,
-	.port_start		= ata_port_start,
 };
 
-static const struct ata_port_operations nv_nf2_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.exec_command		= ata_exec_command,
-	.check_status		= ata_check_status,
-	.dev_select		= ata_std_dev_select,
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
+static struct ata_port_operations nv_nf2_ops = {
+	.inherits		= &nv_generic_ops,
 	.freeze			= nv_nf2_freeze,
 	.thaw			= nv_nf2_thaw,
-	.error_handler		= nv_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.data_xfer		= ata_data_xfer,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-	.scr_read		= nv_scr_read,
-	.scr_write		= nv_scr_write,
-	.port_start		= ata_port_start,
 };
 
-static const struct ata_port_operations nv_ck804_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.exec_command		= ata_exec_command,
-	.check_status		= ata_check_status,
-	.dev_select		= ata_std_dev_select,
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
+static struct ata_port_operations nv_ck804_ops = {
+	.inherits		= &nv_generic_ops,
 	.freeze			= nv_ck804_freeze,
 	.thaw			= nv_ck804_thaw,
-	.error_handler		= nv_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.data_xfer		= ata_data_xfer,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-	.scr_read		= nv_scr_read,
-	.scr_write		= nv_scr_write,
-	.port_start		= ata_port_start,
 	.host_stop		= nv_ck804_host_stop,
 };
 
-static const struct ata_port_operations nv_adma_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= nv_adma_tf_read,
+static struct ata_port_operations nv_adma_ops = {
+	.inherits		= &nv_generic_ops,
+
 	.check_atapi_dma	= nv_adma_check_atapi_dma,
-	.exec_command		= ata_exec_command,
-	.check_status		= ata_check_status,
-	.dev_select		= ata_std_dev_select,
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
+	.sff_tf_read		= nv_adma_tf_read,
 	.qc_defer		= ata_std_qc_defer,
 	.qc_prep		= nv_adma_qc_prep,
 	.qc_issue		= nv_adma_qc_issue,
+	.sff_irq_clear		= nv_adma_irq_clear,
+
 	.freeze			= nv_adma_freeze,
 	.thaw			= nv_adma_thaw,
 	.error_handler		= nv_adma_error_handler,
 	.post_internal_cmd	= nv_adma_post_internal_cmd,
-	.data_xfer		= ata_data_xfer,
-	.irq_clear		= nv_adma_irq_clear,
-	.irq_on			= ata_irq_on,
-	.scr_read		= nv_scr_read,
-	.scr_write		= nv_scr_write,
+
 	.port_start		= nv_adma_port_start,
 	.port_stop		= nv_adma_port_stop,
 #ifdef CONFIG_PM
@@ -545,28 +449,17 @@
 	.host_stop		= nv_adma_host_stop,
 };
 
-static const struct ata_port_operations nv_swncq_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.exec_command		= ata_exec_command,
-	.check_status		= ata_check_status,
-	.dev_select		= ata_std_dev_select,
-	.bmdma_setup		= ata_bmdma_setup,
-	.bmdma_start		= ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
+static struct ata_port_operations nv_swncq_ops = {
+	.inherits		= &nv_generic_ops,
+
 	.qc_defer		= ata_std_qc_defer,
 	.qc_prep		= nv_swncq_qc_prep,
 	.qc_issue		= nv_swncq_qc_issue,
+
 	.freeze			= nv_mcp55_freeze,
 	.thaw			= nv_mcp55_thaw,
 	.error_handler		= nv_swncq_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.data_xfer		= ata_data_xfer,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-	.scr_read		= nv_scr_read,
-	.scr_write		= nv_scr_write,
+
 #ifdef CONFIG_PM
 	.port_suspend		= nv_swncq_port_suspend,
 	.port_resume		= nv_swncq_port_resume,
@@ -574,63 +467,61 @@
 	.port_start		= nv_swncq_port_start,
 };
 
+struct nv_pi_priv {
+	irq_handler_t			irq_handler;
+	struct scsi_host_template	*sht;
+};
+
+#define NV_PI_PRIV(_irq_handler, _sht) \
+	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
+
 static const struct ata_port_info nv_port_info[] = {
 	/* generic */
 	{
-		.sht		= &nv_sht,
 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
-		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_generic_ops,
-		.irq_handler	= nv_generic_interrupt,
+		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 	},
 	/* nforce2/3 */
 	{
-		.sht		= &nv_sht,
 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
-		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_nf2_ops,
-		.irq_handler	= nv_nf2_interrupt,
+		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
 	},
 	/* ck804 */
 	{
-		.sht		= &nv_sht,
 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
-		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_ck804_ops,
-		.irq_handler	= nv_ck804_interrupt,
+		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
 	},
 	/* ADMA */
 	{
-		.sht		= &nv_adma_sht,
 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
-		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_adma_ops,
-		.irq_handler	= nv_adma_interrupt,
+		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
 	},
 	/* SWNCQ */
 	{
-		.sht		= &nv_swncq_sht,
 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_NCQ,
-		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_swncq_ops,
-		.irq_handler	= nv_swncq_interrupt,
+		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
 	},
 };
 
@@ -640,8 +531,8 @@
 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
-static int adma_enabled = 1;
-static int swncq_enabled;
+static int adma_enabled;
+static int swncq_enabled = 1;
 
 static void nv_adma_register_mode(struct ata_port *ap)
 {
@@ -839,7 +730,7 @@
 	   ADMA mode could abort outstanding commands. */
 	nv_adma_register_mode(ap);
 
-	ata_tf_read(ap, tf);
+	ata_sff_tf_read(ap, tf);
 }
 
 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
@@ -929,7 +820,7 @@
 					"notifier for tag %d with no cmd?\n",
 					cpb_num);
 			ehi->err_mask |= AC_ERR_HSM;
-			ehi->action |= ATA_EH_SOFTRESET;
+			ehi->action |= ATA_EH_RESET;
 			ata_port_freeze(ap);
 			return 1;
 		}
@@ -953,12 +844,12 @@
 
 	/* DEV interrupt w/ no active qc? */
 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
-		ata_check_status(ap);
+		ata_sff_check_status(ap);
 		return 1;
 	}
 
 	/* handle interrupt */
-	return ata_host_intr(ap, qc);
+	return ata_sff_host_intr(ap, qc);
 }
 
 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
@@ -1137,7 +1028,7 @@
 	u32 notifier_clears[2];
 
 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
-		ata_bmdma_irq_clear(ap);
+		ata_sff_irq_clear(ap);
 		return;
 	}
 
@@ -1168,7 +1059,7 @@
 	struct nv_adma_port_priv *pp = qc->ap->private_data;
 
 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
-		ata_bmdma_post_internal_cmd(qc);
+		ata_sff_post_internal_cmd(qc);
 }
 
 static int nv_adma_port_start(struct ata_port *ap)
@@ -1445,7 +1336,7 @@
 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
 			(qc->flags & ATA_QCFLAG_DMAMAP));
 		nv_adma_register_mode(qc->ap);
-		ata_qc_prep(qc);
+		ata_sff_qc_prep(qc);
 		return;
 	}
 
@@ -1504,7 +1395,7 @@
 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
 			(qc->flags & ATA_QCFLAG_DMAMAP));
 		nv_adma_register_mode(qc->ap);
-		return ata_qc_issue_prot(qc);
+		return ata_sff_qc_issue(qc);
 	} else
 		nv_adma_mode(qc->ap);
 
@@ -1545,11 +1436,11 @@
 
 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
-				handled += ata_host_intr(ap, qc);
+				handled += ata_sff_host_intr(ap, qc);
 			else
 				// No request pending?  Clear interrupt status
 				// anyway, in case there's one pending.
-				ap->ops->check_status(ap);
+				ap->ops->sff_check_status(ap);
 		}
 
 	}
@@ -1680,7 +1571,7 @@
 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
 	mask &= ~(NV_INT_ALL_MCP55 << shift);
 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
-	ata_bmdma_freeze(ap);
+	ata_sff_freeze(ap);
 }
 
 static void nv_mcp55_thaw(struct ata_port *ap)
@@ -1694,7 +1585,7 @@
 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
 	mask |= (NV_INT_MASK_MCP55 << shift);
 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
-	ata_bmdma_thaw(ap);
+	ata_sff_thaw(ap);
 }
 
 static int nv_hardreset(struct ata_link *link, unsigned int *class,
@@ -1706,13 +1597,7 @@
 	 * some controllers.  Don't classify on hardreset.  For more
 	 * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
 	 */
-	return sata_std_hardreset(link, &dummy, deadline);
-}
-
-static void nv_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
-			   nv_hardreset, ata_std_postreset);
+	return sata_sff_hardreset(link, &dummy, deadline);
 }
 
 static void nv_adma_error_handler(struct ata_port *ap)
@@ -1768,8 +1653,7 @@
 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
 	}
 
-	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
-			   nv_hardreset, ata_std_postreset);
+	ata_sff_error_handler(ap);
 }
 
 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -1855,7 +1739,7 @@
 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
 
 	ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
-			ap->ops->check_status(ap),
+			ap->ops->sff_check_status(ap),
 			ioread8(ap->ioaddr.error_addr));
 
 	sactive = readl(pp->sactive_block);
@@ -1881,7 +1765,7 @@
 	}
 
 	nv_swncq_pp_reinit(ap);
-	ap->ops->irq_clear(ap);
+	ap->ops->sff_irq_clear(ap);
 	__ata_bmdma_stop(ap);
 	nv_swncq_irq_clear(ap, 0xffff);
 }
@@ -1892,11 +1776,10 @@
 
 	if (ap->link.sactive) {
 		nv_swncq_ncq_stop(ap);
-		ehc->i.action |= ATA_EH_HARDRESET;
+		ehc->i.action |= ATA_EH_RESET;
 	}
 
-	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
-			   nv_hardreset, ata_std_postreset);
+	ata_sff_error_handler(ap);
 }
 
 #ifdef CONFIG_PM
@@ -2042,7 +1925,7 @@
 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
 {
 	if (qc->tf.protocol != ATA_PROT_NCQ) {
-		ata_qc_prep(qc);
+		ata_sff_qc_prep(qc);
 		return;
 	}
 
@@ -2104,8 +1987,8 @@
 	pp->dmafis_bits &= ~(1 << qc->tag);
 	pp->qc_active |= (0x1 << qc->tag);
 
-	ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
-	ap->ops->exec_command(ap, &qc->tf);
+	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
+	ap->ops->sff_exec_command(ap, &qc->tf);
 
 	DPRINTK("Issued tag %u\n", qc->tag);
 
@@ -2118,7 +2001,7 @@
 	struct nv_swncq_port_priv *pp = ap->private_data;
 
 	if (qc->tf.protocol != ATA_PROT_NCQ)
-		return ata_qc_issue_prot(qc);
+		return ata_sff_qc_issue(qc);
 
 	DPRINTK("Enter\n");
 
@@ -2173,11 +2056,11 @@
 		ata_ehi_clear_desc(ehi);
 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
 		ehi->err_mask |= AC_ERR_HOST_BUS;
-		ehi->action |= ATA_EH_SOFTRESET;
+		ehi->action |= ATA_EH_RESET;
 		return -EINVAL;
 	}
 
-	ap->ops->irq_clear(ap);
+	ap->ops->sff_irq_clear(ap);
 	__ata_bmdma_stop(ap);
 
 	sactive = readl(pp->sactive_block);
@@ -2188,7 +2071,7 @@
 		ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
 				  "(%08x->%08x)", pp->qc_active, sactive);
 		ehi->err_mask |= AC_ERR_HSM;
-		ehi->action |= ATA_EH_HARDRESET;
+		ehi->action |= ATA_EH_RESET;
 		return -EINVAL;
 	}
 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
@@ -2299,7 +2182,7 @@
 	u8 ata_stat;
 	int rc = 0;
 
-	ata_stat = ap->ops->check_status(ap);
+	ata_stat = ap->ops->sff_check_status(ap);
 	nv_swncq_irq_clear(ap, fis);
 	if (!fis)
 		return;
@@ -2324,7 +2207,7 @@
 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
 		ehi->err_mask |= AC_ERR_DEV;
 		ehi->serror |= serror;
-		ehi->action |= ATA_EH_SOFTRESET;
+		ehi->action |= ATA_EH_RESET;
 		ata_port_freeze(ap);
 		return;
 	}
@@ -2356,13 +2239,13 @@
 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
 			ata_ehi_push_desc(ehi, "illegal fis transaction");
 			ehi->err_mask |= AC_ERR_HSM;
-			ehi->action |= ATA_EH_HARDRESET;
+			ehi->action |= ATA_EH_RESET;
 			goto irq_error;
 		}
 
 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
 		    !(pp->ncq_flags & ncq_saw_dmas)) {
-			ata_stat = ap->ops->check_status(ap);
+			ata_stat = ap->ops->sff_check_status(ap);
 			if (ata_stat & ATA_BUSY)
 				goto irq_exit;
 
@@ -2429,6 +2312,7 @@
 {
 	static int printed_version;
 	const struct ata_port_info *ppi[] = { NULL, NULL };
+	struct nv_pi_priv *ipriv;
 	struct ata_host *host;
 	struct nv_host_priv *hpriv;
 	int rc;
@@ -2465,7 +2349,8 @@
 	}
 
 	ppi[0] = &nv_port_info[type];
-	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
+	ipriv = ppi[0]->private_data;
+	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
 	if (rc)
 		return rc;
 
@@ -2503,8 +2388,8 @@
 		nv_swncq_host_init(host);
 
 	pci_set_master(pdev);
-	return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
-				 IRQF_SHARED, ppi[0]->sht);
+	return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
+				 IRQF_SHARED, ipriv->sht);
 }
 
 #ifdef CONFIG_PM
@@ -2600,5 +2485,5 @@
 module_param_named(adma, adma_enabled, bool, 0444);
 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
 module_param_named(swncq, swncq_enabled, bool, 0444);
-MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
+MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
 
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 11c1afe..5a10dc5 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -143,103 +143,57 @@
 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
 static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc);
 static void pdc_irq_clear(struct ata_port *ap);
-static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
+static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc);
 static void pdc_freeze(struct ata_port *ap);
 static void pdc_sata_freeze(struct ata_port *ap);
 static void pdc_thaw(struct ata_port *ap);
 static void pdc_sata_thaw(struct ata_port *ap);
-static void pdc_pata_error_handler(struct ata_port *ap);
-static void pdc_sata_error_handler(struct ata_port *ap);
+static void pdc_error_handler(struct ata_port *ap);
 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
 static int pdc_pata_cable_detect(struct ata_port *ap);
 static int pdc_sata_cable_detect(struct ata_port *ap);
 
 static struct scsi_host_template pdc_ata_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
+	ATA_BASE_SHT(DRV_NAME),
 	.sg_tablesize		= PDC_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
-static const struct ata_port_operations pdc_sata_ops = {
-	.tf_load		= pdc_tf_load_mmio,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= pdc_exec_command_mmio,
-	.dev_select		= ata_std_dev_select,
-	.check_atapi_dma	= pdc_check_atapi_dma,
+static const struct ata_port_operations pdc_common_ops = {
+	.inherits		= &ata_sff_port_ops,
 
+	.sff_tf_load		= pdc_tf_load_mmio,
+	.sff_exec_command	= pdc_exec_command_mmio,
+	.check_atapi_dma	= pdc_check_atapi_dma,
 	.qc_prep		= pdc_qc_prep,
-	.qc_issue		= pdc_qc_issue_prot,
+	.qc_issue		= pdc_qc_issue,
+	.sff_irq_clear		= pdc_irq_clear,
+
+	.post_internal_cmd	= pdc_post_internal_cmd,
+	.error_handler		= pdc_error_handler,
+};
+
+static struct ata_port_operations pdc_sata_ops = {
+	.inherits		= &pdc_common_ops,
+	.cable_detect		= pdc_sata_cable_detect,
 	.freeze			= pdc_sata_freeze,
 	.thaw			= pdc_sata_thaw,
-	.error_handler		= pdc_sata_error_handler,
-	.post_internal_cmd	= pdc_post_internal_cmd,
-	.cable_detect		= pdc_sata_cable_detect,
-	.data_xfer		= ata_data_xfer,
-	.irq_clear		= pdc_irq_clear,
-	.irq_on			= ata_irq_on,
-
 	.scr_read		= pdc_sata_scr_read,
 	.scr_write		= pdc_sata_scr_write,
 	.port_start		= pdc_sata_port_start,
 };
 
 /* First-generation chips need a more restrictive ->check_atapi_dma op */
-static const struct ata_port_operations pdc_old_sata_ops = {
-	.tf_load		= pdc_tf_load_mmio,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= pdc_exec_command_mmio,
-	.dev_select		= ata_std_dev_select,
+static struct ata_port_operations pdc_old_sata_ops = {
+	.inherits		= &pdc_sata_ops,
 	.check_atapi_dma	= pdc_old_sata_check_atapi_dma,
-
-	.qc_prep		= pdc_qc_prep,
-	.qc_issue		= pdc_qc_issue_prot,
-	.freeze			= pdc_sata_freeze,
-	.thaw			= pdc_sata_thaw,
-	.error_handler		= pdc_sata_error_handler,
-	.post_internal_cmd	= pdc_post_internal_cmd,
-	.cable_detect		= pdc_sata_cable_detect,
-	.data_xfer		= ata_data_xfer,
-	.irq_clear		= pdc_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.scr_read		= pdc_sata_scr_read,
-	.scr_write		= pdc_sata_scr_write,
-	.port_start		= pdc_sata_port_start,
 };
 
-static const struct ata_port_operations pdc_pata_ops = {
-	.tf_load		= pdc_tf_load_mmio,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= pdc_exec_command_mmio,
-	.dev_select		= ata_std_dev_select,
-	.check_atapi_dma	= pdc_check_atapi_dma,
-
-	.qc_prep		= pdc_qc_prep,
-	.qc_issue		= pdc_qc_issue_prot,
+static struct ata_port_operations pdc_pata_ops = {
+	.inherits		= &pdc_common_ops,
+	.cable_detect		= pdc_pata_cable_detect,
 	.freeze			= pdc_freeze,
 	.thaw			= pdc_thaw,
-	.error_handler		= pdc_pata_error_handler,
-	.post_internal_cmd	= pdc_post_internal_cmd,
-	.cable_detect		= pdc_pata_cable_detect,
-	.data_xfer		= ata_data_xfer,
-	.irq_clear		= pdc_irq_clear,
-	.irq_on			= ata_irq_on,
-
 	.port_start		= pdc_common_port_start,
 };
 
@@ -451,7 +405,7 @@
 	u8 *cdb = qc->cdb;
 	struct pdc_port_priv *pp = ap->private_data;
 	u8 *buf = pp->pkt;
-	u32 *buf32 = (u32 *) buf;
+	__le32 *buf32 = (__le32 *) buf;
 	unsigned int dev_sel, feature;
 
 	/* set control bits (byte 0), zero delay seq id (byte 3),
@@ -738,24 +692,12 @@
 	readl(host_mmio + hotplug_offset); /* flush */
 }
 
-static void pdc_common_error_handler(struct ata_port *ap, ata_reset_fn_t hardreset)
+static void pdc_error_handler(struct ata_port *ap)
 {
 	if (!(ap->pflags & ATA_PFLAG_FROZEN))
 		pdc_reset_port(ap);
 
-	/* perform recovery */
-	ata_do_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
-		  ata_std_postreset);
-}
-
-static void pdc_pata_error_handler(struct ata_port *ap)
-{
-	pdc_common_error_handler(ap, NULL);
-}
-
-static void pdc_sata_error_handler(struct ata_port *ap)
-{
-	pdc_common_error_handler(ap, sata_std_hardreset);
+	ata_std_error_handler(ap);
 }
 
 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -952,7 +894,7 @@
 	readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
 }
 
-static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
 {
 	switch (qc->tf.protocol) {
 	case ATAPI_PROT_NODATA:
@@ -972,20 +914,20 @@
 		break;
 	}
 
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
 	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
-	ata_tf_load(ap, tf);
+	ata_sff_tf_load(ap, tf);
 }
 
 static void pdc_exec_command_mmio(struct ata_port *ap,
 				  const struct ata_taskfile *tf)
 {
 	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
-	ata_exec_command(ap, tf);
+	ata_sff_exec_command(ap, tf);
 }
 
 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 91cc12c..1600107 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -121,50 +121,38 @@
 static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
 static void qs_bmdma_stop(struct ata_queued_cmd *qc);
 static u8 qs_bmdma_status(struct ata_port *ap);
-static void qs_irq_clear(struct ata_port *ap);
 static void qs_freeze(struct ata_port *ap);
 static void qs_thaw(struct ata_port *ap);
+static int qs_prereset(struct ata_link *link, unsigned long deadline);
 static void qs_error_handler(struct ata_port *ap);
 
 static struct scsi_host_template qs_ata_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
+	ATA_BASE_SHT(DRV_NAME),
 	.sg_tablesize		= QS_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ENABLE_CLUSTERING,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= QS_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
-static const struct ata_port_operations qs_ata_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
+static struct ata_port_operations qs_ata_ops = {
+	.inherits		= &ata_sff_port_ops,
+
 	.check_atapi_dma	= qs_check_atapi_dma,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-	.qc_prep		= qs_qc_prep,
-	.qc_issue		= qs_qc_issue,
-	.data_xfer		= ata_data_xfer,
-	.freeze			= qs_freeze,
-	.thaw			= qs_thaw,
-	.error_handler		= qs_error_handler,
-	.irq_clear		= qs_irq_clear,
-	.irq_on			= ata_irq_on,
-	.scr_read		= qs_scr_read,
-	.scr_write		= qs_scr_write,
-	.port_start		= qs_port_start,
-	.host_stop		= qs_host_stop,
 	.bmdma_stop		= qs_bmdma_stop,
 	.bmdma_status		= qs_bmdma_status,
+	.qc_prep		= qs_qc_prep,
+	.qc_issue		= qs_qc_issue,
+
+	.freeze			= qs_freeze,
+	.thaw			= qs_thaw,
+	.prereset		= qs_prereset,
+	.softreset		= ATA_OP_NULL,
+	.error_handler		= qs_error_handler,
+	.post_internal_cmd	= ATA_OP_NULL,
+
+	.scr_read		= qs_scr_read,
+	.scr_write		= qs_scr_write,
+
+	.port_start		= qs_port_start,
+	.host_stop		= qs_host_stop,
 };
 
 static const struct ata_port_info qs_port_info[] = {
@@ -211,11 +199,6 @@
 	return 0;
 }
 
-static void qs_irq_clear(struct ata_port *ap)
-{
-	/* nothing */
-}
-
 static inline void qs_enter_reg_mode(struct ata_port *ap)
 {
 	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
@@ -256,7 +239,7 @@
 	struct ata_port *ap = link->ap;
 
 	qs_reset_channel_logic(ap);
-	return ata_std_prereset(link, deadline);
+	return ata_sff_prereset(link, deadline);
 }
 
 static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
@@ -270,8 +253,7 @@
 static void qs_error_handler(struct ata_port *ap)
 {
 	qs_enter_reg_mode(ap);
-	ata_do_eh(ap, qs_prereset, NULL, sata_std_hardreset,
-		  ata_std_postreset);
+	ata_std_error_handler(ap);
 }
 
 static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
@@ -321,7 +303,7 @@
 
 	qs_enter_reg_mode(qc->ap);
 	if (qc->tf.protocol != ATA_PROT_DMA) {
-		ata_qc_prep(qc);
+		ata_sff_qc_prep(qc);
 		return;
 	}
 
@@ -380,7 +362,7 @@
 	}
 
 	pp->state = qs_state_mmio;
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
@@ -469,7 +451,7 @@
 				 * and pretend we knew it was ours.. (ugh).
 				 * This does not affect packet mode.
 				 */
-				ata_check_status(ap);
+				ata_sff_check_status(ap);
 				handled = 1;
 				continue;
 			}
@@ -477,7 +459,7 @@
 			if (!pp || pp->state != qs_state_mmio)
 				continue;
 			if (!(qc->tf.flags & ATA_TFLAG_POLLING))
-				handled |= ata_host_intr(ap, qc);
+				handled |= ata_sff_host_intr(ap, qc);
 		}
 	}
 	return handled;
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 0b8191b..88bf421 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -60,7 +60,6 @@
 
 	SIL_DFL_PORT_FLAGS	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO,
-	SIL_DFL_LINK_FLAGS	= ATA_LFLAG_HRST_TO_RESUME,
 
 	/*
 	 * Controller IDs
@@ -168,54 +167,23 @@
 };
 
 static struct scsi_host_template sil_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations sil_ops = {
+static struct ata_port_operations sil_ops = {
+	.inherits		= &ata_bmdma_port_ops,
 	.dev_config		= sil_dev_config,
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
 	.set_mode		= sil_set_mode,
-	.bmdma_setup            = ata_bmdma_setup,
-	.bmdma_start            = ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
 	.freeze			= sil_freeze,
 	.thaw			= sil_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
 	.scr_read		= sil_scr_read,
 	.scr_write		= sil_scr_write,
-	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_info sil_port_info[] = {
 	/* sil_3112 */
 	{
 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
-		.link_flags	= SIL_DFL_LINK_FLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= ATA_UDMA5,
@@ -225,7 +193,6 @@
 	{
 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
 				  SIL_FLAG_NO_SATA_IRQ,
-		.link_flags	= SIL_DFL_LINK_FLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= ATA_UDMA5,
@@ -234,7 +201,6 @@
 	/* sil_3512 */
 	{
 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
-		.link_flags	= SIL_DFL_LINK_FLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= ATA_UDMA5,
@@ -243,7 +209,6 @@
 	/* sil_3114 */
 	{
 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
-		.link_flags	= SIL_DFL_LINK_FLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= ATA_UDMA5,
@@ -404,7 +369,7 @@
 
 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 		/* this sometimes happens, just clear IRQ */
-		ata_chk_status(ap);
+		ap->ops->sff_check_status(ap);
 		return;
 	}
 
@@ -440,15 +405,15 @@
 	}
 
 	/* check main status, clearing INTRQ */
-	status = ata_chk_status(ap);
+	status = ap->ops->sff_check_status(ap);
 	if (unlikely(status & ATA_BUSY))
 		goto err_hsm;
 
 	/* ack bmdma irq events */
-	ata_bmdma_irq_clear(ap);
+	ata_sff_irq_clear(ap);
 
 	/* kick HSM in the ass */
-	ata_hsm_move(ap, qc, status, 0);
+	ata_sff_hsm_move(ap, qc, status, 0);
 
 	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
 		ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
@@ -515,8 +480,8 @@
 	u32 tmp;
 
 	/* clear IRQ */
-	ata_chk_status(ap);
-	ata_bmdma_irq_clear(ap);
+	ap->ops->sff_check_status(ap);
+	ata_sff_irq_clear(ap);
 
 	/* turn on SATA IRQ if supported */
 	if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
@@ -690,7 +655,7 @@
 		ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
 		ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
 		ioaddr->scr_addr = mmio_base + sil_port[i].scr;
-		ata_std_ports(ioaddr);
+		ata_sff_std_ports(ioaddr);
 
 		ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
 		ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index df7988d..27a1101 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -254,7 +254,6 @@
 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
 				  ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
 				  ATA_FLAG_AN | ATA_FLAG_PMP,
-	SIL24_COMMON_LFLAGS	= ATA_LFLAG_SKIP_D2H_BSY,
 	SIL24_FLAG_PCIX_IRQ_WOC	= (1 << 24), /* IRQ loss errata on PCI-X */
 
 	IRQ_STAT_4PORTS		= 0xf,
@@ -286,45 +285,45 @@
 				    "device error via D2H FIS" },
 	[PORT_CERR_SDB]		= { AC_ERR_DEV, 0,
 				    "device error via SDB FIS" },
-	[PORT_CERR_DATA]	= { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_DATA]	= { AC_ERR_ATA_BUS, ATA_EH_RESET,
 				    "error in data FIS" },
-	[PORT_CERR_SEND]	= { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_SEND]	= { AC_ERR_ATA_BUS, ATA_EH_RESET,
 				    "failed to transmit command FIS" },
-	[PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
+	[PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
 				     "protocol mismatch" },
-	[PORT_CERR_DIRECTION]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+	[PORT_CERR_DIRECTION]	= { AC_ERR_HSM, ATA_EH_RESET,
 				    "data directon mismatch" },
-	[PORT_CERR_UNDERRUN]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+	[PORT_CERR_UNDERRUN]	= { AC_ERR_HSM, ATA_EH_RESET,
 				    "ran out of SGEs while writing" },
-	[PORT_CERR_OVERRUN]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+	[PORT_CERR_OVERRUN]	= { AC_ERR_HSM, ATA_EH_RESET,
 				    "ran out of SGEs while reading" },
-	[PORT_CERR_PKT_PROT]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+	[PORT_CERR_PKT_PROT]	= { AC_ERR_HSM, ATA_EH_RESET,
 				    "invalid data directon for ATAPI CDB" },
-	[PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
+	[PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
 				     "SGT not on qword boundary" },
-	[PORT_CERR_SGT_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_SGT_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "PCI target abort while fetching SGT" },
-	[PORT_CERR_SGT_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_SGT_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "PCI master abort while fetching SGT" },
-	[PORT_CERR_SGT_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_SGT_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "PCI parity error while fetching SGT" },
-	[PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
+	[PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
 				     "PRB not on qword boundary" },
-	[PORT_CERR_CMD_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_CMD_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "PCI target abort while fetching PRB" },
-	[PORT_CERR_CMD_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_CMD_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "PCI master abort while fetching PRB" },
-	[PORT_CERR_CMD_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_CMD_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "PCI parity error while fetching PRB" },
-	[PORT_CERR_XFR_UNDEF]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_XFR_UNDEF]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "undefined error while transferring data" },
-	[PORT_CERR_XFR_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_XFR_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "PCI target abort while transferring data" },
-	[PORT_CERR_XFR_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_XFR_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "PCI master abort while transferring data" },
-	[PORT_CERR_XFR_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+	[PORT_CERR_XFR_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
 				    "PCI parity error while transferring data" },
-	[PORT_CERR_SENDSERVICE]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+	[PORT_CERR_SENDSERVICE]	= { AC_ERR_HSM, ATA_EH_RESET,
 				    "FIS received while sending service FIS" },
 };
 
@@ -337,23 +336,26 @@
 struct sil24_port_priv {
 	union sil24_cmd_block *cmd_block;	/* 32 cmd blocks */
 	dma_addr_t cmd_block_dma;		/* DMA base addr for them */
-	struct ata_taskfile tf;			/* Cached taskfile registers */
 	int do_port_rst;
 };
 
 static void sil24_dev_config(struct ata_device *dev);
-static u8 sil24_check_status(struct ata_port *ap);
 static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val);
 static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
-static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 static int sil24_qc_defer(struct ata_queued_cmd *qc);
 static void sil24_qc_prep(struct ata_queued_cmd *qc);
 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
-static void sil24_irq_clear(struct ata_port *ap);
+static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
 static void sil24_pmp_attach(struct ata_port *ap);
 static void sil24_pmp_detach(struct ata_port *ap);
 static void sil24_freeze(struct ata_port *ap);
 static void sil24_thaw(struct ata_port *ap);
+static int sil24_softreset(struct ata_link *link, unsigned int *class,
+			   unsigned long deadline);
+static int sil24_hardreset(struct ata_link *link, unsigned int *class,
+			   unsigned long deadline);
+static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
+			       unsigned long deadline);
 static void sil24_error_handler(struct ata_port *ap);
 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
 static int sil24_port_start(struct ata_port *ap);
@@ -386,52 +388,36 @@
 };
 
 static struct scsi_host_template sil24_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.change_queue_depth	= ata_scsi_change_queue_depth,
+	ATA_NCQ_SHT(DRV_NAME),
 	.can_queue		= SIL24_MAX_CMDS,
-	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= SIL24_MAX_SGE,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
-static const struct ata_port_operations sil24_ops = {
-	.dev_config		= sil24_dev_config,
-
-	.check_status		= sil24_check_status,
-	.check_altstatus	= sil24_check_status,
-	.dev_select		= ata_noop_dev_select,
-
-	.tf_read		= sil24_tf_read,
+static struct ata_port_operations sil24_ops = {
+	.inherits		= &sata_pmp_port_ops,
 
 	.qc_defer		= sil24_qc_defer,
 	.qc_prep		= sil24_qc_prep,
 	.qc_issue		= sil24_qc_issue,
-
-	.irq_clear		= sil24_irq_clear,
-
-	.scr_read		= sil24_scr_read,
-	.scr_write		= sil24_scr_write,
-
-	.pmp_attach		= sil24_pmp_attach,
-	.pmp_detach		= sil24_pmp_detach,
+	.qc_fill_rtf		= sil24_qc_fill_rtf,
 
 	.freeze			= sil24_freeze,
 	.thaw			= sil24_thaw,
+	.softreset		= sil24_softreset,
+	.hardreset		= sil24_hardreset,
+	.pmp_softreset		= sil24_softreset,
+	.pmp_hardreset		= sil24_pmp_hardreset,
 	.error_handler		= sil24_error_handler,
 	.post_internal_cmd	= sil24_post_internal_cmd,
+	.dev_config		= sil24_dev_config,
+
+	.scr_read		= sil24_scr_read,
+	.scr_write		= sil24_scr_write,
+	.pmp_attach		= sil24_pmp_attach,
+	.pmp_detach		= sil24_pmp_detach,
 
 	.port_start		= sil24_port_start,
-
 #ifdef CONFIG_PM
 	.port_resume		= sil24_port_resume,
 #endif
@@ -449,7 +435,6 @@
 	{
 		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
 				  SIL24_FLAG_PCIX_IRQ_WOC,
-		.link_flags	= SIL24_COMMON_LFLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= ATA_UDMA5,		/* udma0-5 */
@@ -458,7 +443,6 @@
 	/* sil_3132 */
 	{
 		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
-		.link_flags	= SIL24_COMMON_LFLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= ATA_UDMA5,		/* udma0-5 */
@@ -467,7 +451,6 @@
 	/* sil_3131/sil_3531 */
 	{
 		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
-		.link_flags	= SIL24_COMMON_LFLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= ATA_UDMA5,		/* udma0-5 */
@@ -482,9 +465,19 @@
 	return tag;
 }
 
+static unsigned long sil24_port_offset(struct ata_port *ap)
+{
+	return ap->port_no * PORT_REGS_SIZE;
+}
+
+static void __iomem *sil24_port_base(struct ata_port *ap)
+{
+	return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap);
+}
+
 static void sil24_dev_config(struct ata_device *dev)
 {
-	void __iomem *port = dev->link->ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(dev->link->ap);
 
 	if (dev->cdb_len == 16)
 		writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
@@ -494,7 +487,7 @@
 
 static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 	struct sil24_prb __iomem *prb;
 	u8 fis[6 * 4];
 
@@ -503,12 +496,6 @@
 	ata_tf_from_fis(fis, tf);
 }
 
-static u8 sil24_check_status(struct ata_port *ap)
-{
-	struct sil24_port_priv *pp = ap->private_data;
-	return pp->tf.command;
-}
-
 static int sil24_scr_map[] = {
 	[SCR_CONTROL]	= 0,
 	[SCR_STATUS]	= 1,
@@ -518,7 +505,7 @@
 
 static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
 {
-	void __iomem *scr_addr = ap->ioaddr.scr_addr;
+	void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL;
 
 	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
 		void __iomem *addr;
@@ -531,7 +518,7 @@
 
 static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
 {
-	void __iomem *scr_addr = ap->ioaddr.scr_addr;
+	void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL;
 
 	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
 		void __iomem *addr;
@@ -542,15 +529,9 @@
 	return -EINVAL;
 }
 
-static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
-{
-	struct sil24_port_priv *pp = ap->private_data;
-	*tf = pp->tf;
-}
-
 static void sil24_config_port(struct ata_port *ap)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 
 	/* configure IRQ WoC */
 	if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
@@ -575,7 +556,7 @@
 
 static void sil24_config_pmp(struct ata_port *ap, int attached)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 
 	if (attached)
 		writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT);
@@ -585,7 +566,7 @@
 
 static void sil24_clear_pmp(struct ata_port *ap)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 	int i;
 
 	writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
@@ -600,12 +581,12 @@
 
 static int sil24_init_port(struct ata_port *ap)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 	struct sil24_port_priv *pp = ap->private_data;
 	u32 tmp;
 
 	/* clear PMP error status */
-	if (ap->nr_pmp_links)
+	if (sata_pmp_attached(ap))
 		sil24_clear_pmp(ap);
 
 	writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
@@ -616,7 +597,7 @@
 
 	if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
 		pp->do_port_rst = 1;
-		ap->link.eh_context.i.action |= ATA_EH_HARDRESET;
+		ap->link.eh_context.i.action |= ATA_EH_RESET;
 		return -EIO;
 	}
 
@@ -628,7 +609,7 @@
 				 int is_cmd, u32 ctrl,
 				 unsigned long timeout_msec)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 	struct sil24_port_priv *pp = ap->private_data;
 	struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
 	dma_addr_t paddr = pp->cmd_block_dma;
@@ -670,10 +651,11 @@
 	return rc;
 }
 
-static int sil24_do_softreset(struct ata_link *link, unsigned int *class,
-			      int pmp, unsigned long deadline)
+static int sil24_softreset(struct ata_link *link, unsigned int *class,
+			   unsigned long deadline)
 {
 	struct ata_port *ap = link->ap;
+	int pmp = sata_srst_pmp(link);
 	unsigned long timeout_msec = 0;
 	struct ata_taskfile tf;
 	const char *reason;
@@ -681,12 +663,6 @@
 
 	DPRINTK("ENTER\n");
 
-	if (ata_link_offline(link)) {
-		DPRINTK("PHY reports no device\n");
-		*class = ATA_DEV_NONE;
-		goto out;
-	}
-
 	/* put the port into known state */
 	if (sil24_init_port(ap)) {
 		reason = "port not ready";
@@ -711,10 +687,6 @@
 	sil24_read_tf(ap, 0, &tf);
 	*class = ata_dev_classify(&tf);
 
-	if (*class == ATA_DEV_UNKNOWN)
-		*class = ATA_DEV_NONE;
-
- out:
 	DPRINTK("EXIT, class=%u\n", *class);
 	return 0;
 
@@ -723,17 +695,11 @@
 	return -EIO;
 }
 
-static int sil24_softreset(struct ata_link *link, unsigned int *class,
-			   unsigned long deadline)
-{
-	return sil24_do_softreset(link, class, SATA_PMP_CTRL_PORT, deadline);
-}
-
 static int sil24_hardreset(struct ata_link *link, unsigned int *class,
 			   unsigned long deadline)
 {
 	struct ata_port *ap = link->ap;
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 	struct sil24_port_priv *pp = ap->private_data;
 	int did_port_rst = 0;
 	const char *reason;
@@ -911,7 +877,7 @@
 {
 	struct ata_port *ap = qc->ap;
 	struct sil24_port_priv *pp = ap->private_data;
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 	unsigned int tag = sil24_tag(qc->tag);
 	dma_addr_t paddr;
 	void __iomem *activate;
@@ -925,9 +891,10 @@
 	return 0;
 }
 
-static void sil24_irq_clear(struct ata_port *ap)
+static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
-	/* unused */
+	sil24_read_tf(qc->ap, qc->tag, &qc->result_tf);
+	return true;
 }
 
 static void sil24_pmp_attach(struct ata_port *ap)
@@ -942,12 +909,6 @@
 	sil24_config_pmp(ap, 0);
 }
 
-static int sil24_pmp_softreset(struct ata_link *link, unsigned int *class,
-			       unsigned long deadline)
-{
-	return sil24_do_softreset(link, class, link->pmp, deadline);
-}
-
 static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
 			       unsigned long deadline)
 {
@@ -960,12 +921,12 @@
 		return rc;
 	}
 
-	return sata_pmp_std_hardreset(link, class, deadline);
+	return sata_std_hardreset(link, class, deadline);
 }
 
 static void sil24_freeze(struct ata_port *ap)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 
 	/* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
 	 * PORT_IRQ_ENABLE instead.
@@ -975,7 +936,7 @@
 
 static void sil24_thaw(struct ata_port *ap)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 	u32 tmp;
 
 	/* clear IRQ */
@@ -988,7 +949,7 @@
 
 static void sil24_error_intr(struct ata_port *ap)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 	struct sil24_port_priv *pp = ap->private_data;
 	struct ata_queued_cmd *qc = NULL;
 	struct ata_link *link;
@@ -1022,7 +983,7 @@
 
 	if (irq_stat & PORT_IRQ_UNK_FIS) {
 		ehi->err_mask |= AC_ERR_HSM;
-		ehi->action |= ATA_EH_SOFTRESET;
+		ehi->action |= ATA_EH_RESET;
 		ata_ehi_push_desc(ehi, "unknown FIS");
 		freeze = 1;
 	}
@@ -1043,14 +1004,14 @@
 		 */
 		if (ap->nr_active_links >= 3) {
 			ehi->err_mask |= AC_ERR_OTHER;
-			ehi->action |= ATA_EH_HARDRESET;
+			ehi->action |= ATA_EH_RESET;
 			ata_ehi_push_desc(ehi, "PMP DMA CS errata");
 			pp->do_port_rst = 1;
 			freeze = 1;
 		}
 
 		/* find out the offending link and qc */
-		if (ap->nr_pmp_links) {
+		if (sata_pmp_attached(ap)) {
 			context = readl(port + PORT_CONTEXT);
 			pmp = (context >> 5) & 0xf;
 
@@ -1064,7 +1025,7 @@
 						  irq_stat);
 			} else {
 				err_mask |= AC_ERR_HSM;
-				action |= ATA_EH_HARDRESET;
+				action |= ATA_EH_RESET;
 				freeze = 1;
 			}
 		} else
@@ -1078,28 +1039,27 @@
 		if (ci && ci->desc) {
 			err_mask |= ci->err_mask;
 			action |= ci->action;
-			if (action & ATA_EH_RESET_MASK)
+			if (action & ATA_EH_RESET)
 				freeze = 1;
 			ata_ehi_push_desc(ehi, "%s", ci->desc);
 		} else {
 			err_mask |= AC_ERR_OTHER;
-			action |= ATA_EH_SOFTRESET;
+			action |= ATA_EH_RESET;
 			freeze = 1;
 			ata_ehi_push_desc(ehi, "unknown command error %d",
 					  cerr);
 		}
 
 		/* record error info */
-		if (qc) {
-			sil24_read_tf(ap, qc->tag, &pp->tf);
+		if (qc)
 			qc->err_mask |= err_mask;
-		} else
+		else
 			ehi->err_mask |= err_mask;
 
 		ehi->action |= action;
 
 		/* if PMP, resume */
-		if (ap->nr_pmp_links)
+		if (sata_pmp_attached(ap))
 			writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT);
 	}
 
@@ -1114,18 +1074,9 @@
 	}
 }
 
-static void sil24_finish_qc(struct ata_queued_cmd *qc)
-{
-	struct ata_port *ap = qc->ap;
-	struct sil24_port_priv *pp = ap->private_data;
-
-	if (qc->flags & ATA_QCFLAG_RESULT_TF)
-		sil24_read_tf(ap, qc->tag, &pp->tf);
-}
-
 static inline void sil24_host_intr(struct ata_port *ap)
 {
-	void __iomem *port = ap->ioaddr.cmd_addr;
+	void __iomem *port = sil24_port_base(ap);
 	u32 slot_stat, qc_active;
 	int rc;
 
@@ -1147,13 +1098,13 @@
 	}
 
 	qc_active = slot_stat & ~HOST_SSTAT_ATTN;
-	rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
+	rc = ata_qc_complete_multiple(ap, qc_active);
 	if (rc > 0)
 		return;
 	if (rc < 0) {
 		struct ata_eh_info *ehi = &ap->link.eh_info;
 		ehi->err_mask |= AC_ERR_HSM;
-		ehi->action |= ATA_EH_SOFTRESET;
+		ehi->action |= ATA_EH_RESET;
 		ata_port_freeze(ap);
 		return;
 	}
@@ -1209,11 +1160,7 @@
 	if (sil24_init_port(ap))
 		ata_eh_freeze_port(ap);
 
-	/* perform recovery */
-	sata_pmp_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
-		       ata_std_postreset, sata_pmp_std_prereset,
-		       sil24_pmp_softreset, sil24_pmp_hardreset,
-		       sata_pmp_std_postreset);
+	sata_pmp_error_handler(ap);
 
 	pp->do_port_rst = 0;
 }
@@ -1239,8 +1186,6 @@
 	if (!pp)
 		return -ENOMEM;
 
-	pp->tf.command = ATA_DRDY;
-
 	cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
 	if (!cb)
 		return -ENOMEM;
@@ -1251,6 +1196,9 @@
 
 	ap->private_data = pp;
 
+	ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
+	ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port");
+
 	return 0;
 }
 
@@ -1269,7 +1217,8 @@
 	/* init ports */
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
-		void __iomem *port = ap->ioaddr.cmd_addr;
+		void __iomem *port = sil24_port_base(ap);
+
 
 		/* Initial PHY setting */
 		writel(0x20c, port + PORT_PHY_CFG);
@@ -1302,7 +1251,7 @@
 	const struct ata_port_info *ppi[] = { &pi, NULL };
 	void __iomem * const *iomap;
 	struct ata_host *host;
-	int i, rc;
+	int rc;
 	u32 tmp;
 
 	/* cause link error if sil24_cmd_block is sized wrongly */
@@ -1342,18 +1291,6 @@
 		return -ENOMEM;
 	host->iomap = iomap;
 
-	for (i = 0; i < host->n_ports; i++) {
-		struct ata_port *ap = host->ports[i];
-		size_t offset = ap->port_no * PORT_REGS_SIZE;
-		void __iomem *port = iomap[SIL24_PORT_BAR] + offset;
-
-		host->ports[i]->ioaddr.cmd_addr = port;
-		host->ports[i]->ioaddr.scr_addr = port + PORT_SCONTROL;
-
-		ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
-		ata_port_pbar_desc(ap, SIL24_PORT_BAR, offset, "port");
-	}
-
 	/* configure and activate the device */
 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index a01260a..6b8e45b 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -86,45 +86,13 @@
 };
 
 static struct scsi_host_template sis_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations sis_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-	.bmdma_setup            = ata_bmdma_setup,
-	.bmdma_start            = ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
+static struct ata_port_operations sis_ops = {
+	.inherits		= &ata_bmdma_port_ops,
 	.scr_read		= sis_scr_read,
 	.scr_write		= sis_scr_write,
-	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_info sis_port_info = {
@@ -341,7 +309,7 @@
 		break;
 	}
 
-	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
+	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
 	if (rc)
 		return rc;
 
@@ -359,8 +327,8 @@
 
 	pci_set_master(pdev);
 	pci_intx(pdev, 1);
-	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
-				 &sis_sht);
+	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+				 IRQF_SHARED, &sis_sht);
 }
 
 static int __init sis_init(void)
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 019e367..16aa683 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -233,7 +233,7 @@
 
 	/* issue r/w command if this is not a ATA DMA command*/
 	if (qc->tf.protocol != ATA_PROT_DMA)
-		ap->ops->exec_command(ap, &qc->tf);
+		ap->ops->sff_exec_command(ap, &qc->tf);
 }
 
 /**
@@ -269,7 +269,7 @@
 	   and the start command. */
 	/* issue r/w command if the access is to ATA*/
 	if (qc->tf.protocol == ATA_PROT_DMA)
-		ap->ops->exec_command(ap, &qc->tf);
+		ap->ops->sff_exec_command(ap, &qc->tf);
 }
 
 
@@ -327,50 +327,23 @@
 
 
 static struct scsi_host_template k2_sata_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
+	ATA_BMDMA_SHT(DRV_NAME),
 #ifdef CONFIG_PPC_OF
 	.proc_info		= k2_sata_proc_info,
 #endif
-	.bios_param		= ata_std_bios_param,
 };
 
 
-static const struct ata_port_operations k2_sata_ops = {
-	.tf_load		= k2_sata_tf_load,
-	.tf_read		= k2_sata_tf_read,
-	.check_status		= k2_stat_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
+static struct ata_port_operations k2_sata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.sff_tf_load		= k2_sata_tf_load,
+	.sff_tf_read		= k2_sata_tf_read,
+	.sff_check_status	= k2_stat_check_status,
 	.check_atapi_dma	= k2_sata_check_atapi_dma,
 	.bmdma_setup		= k2_bmdma_setup_mmio,
 	.bmdma_start		= k2_bmdma_start_mmio,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
 	.scr_read		= k2_sata_scr_read,
 	.scr_write		= k2_sata_scr_write,
-	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_info k2_port_info[] = {
@@ -519,8 +492,8 @@
 	writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
 
 	pci_set_master(pdev);
-	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
-				 &k2_sata_sht);
+	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+				 IRQF_SHARED, &k2_sata_sht);
 }
 
 /* 0x240 is device ID for Apple K2 device
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index e3d56bc..ec04b8d 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -232,40 +232,30 @@
 static void pdc20621_put_to_dimm(struct ata_host *host,
 				 void *psource, u32 offset, u32 size);
 static void pdc20621_irq_clear(struct ata_port *ap);
-static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
+static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
 
 
 static struct scsi_host_template pdc_sata_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
+	ATA_BASE_SHT(DRV_NAME),
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
 };
 
-static const struct ata_port_operations pdc_20621_ops = {
-	.tf_load		= pdc_tf_load_mmio,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= pdc_exec_command_mmio,
-	.dev_select		= ata_std_dev_select,
+/* TODO: inherit from base port_ops after converting to new EH */
+static struct ata_port_operations pdc_20621_ops = {
+	.sff_tf_load		= pdc_tf_load_mmio,
+	.sff_tf_read		= ata_sff_tf_read,
+	.sff_check_status	= ata_sff_check_status,
+	.sff_exec_command	= pdc_exec_command_mmio,
+	.sff_dev_select		= ata_sff_dev_select,
 	.phy_reset		= pdc_20621_phy_reset,
 	.qc_prep		= pdc20621_qc_prep,
-	.qc_issue		= pdc20621_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
+	.qc_issue		= pdc20621_qc_issue,
+	.qc_fill_rtf		= ata_sff_qc_fill_rtf,
+	.sff_data_xfer		= ata_sff_data_xfer,
 	.eng_timeout		= pdc_eng_timeout,
-	.irq_clear		= pdc20621_irq_clear,
-	.irq_on			= ata_irq_on,
+	.sff_irq_clear		= pdc20621_irq_clear,
+	.sff_irq_on		= ata_sff_irq_on,
 	.port_start		= pdc_port_start,
 };
 
@@ -475,7 +465,7 @@
 	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 	unsigned int portno = ap->port_no;
 	unsigned int i, si, idx, total_len = 0, sgt_len;
-	u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
+	__le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
 
 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
 
@@ -693,7 +683,7 @@
 	}
 }
 
-static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
+static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
 {
 	switch (qc->tf.protocol) {
 	case ATA_PROT_DMA:
@@ -709,7 +699,7 @@
 		break;
 	}
 
-	return ata_qc_issue_prot(qc);
+	return ata_sff_qc_issue(qc);
 }
 
 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
@@ -781,7 +771,7 @@
 	/* command completion, but no data xfer */
 	} else if (qc->tf.protocol == ATA_PROT_NODATA) {
 
-		status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+		status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 		DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
 		qc->err_mask |= ac_err_mask(status);
 		ata_qc_complete(qc);
@@ -890,7 +880,7 @@
 		break;
 
 	default:
-		drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+		drv_stat = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 
 		ata_port_printk(ap, KERN_ERR,
 				"unknown timeout, cmd 0x%x stat 0x%x\n",
@@ -909,7 +899,7 @@
 {
 	WARN_ON(tf->protocol == ATA_PROT_DMA ||
 		tf->protocol == ATA_PROT_NODATA);
-	ata_tf_load(ap, tf);
+	ata_sff_tf_load(ap, tf);
 }
 
 
@@ -917,7 +907,7 @@
 {
 	WARN_ON(tf->protocol == ATA_PROT_DMA ||
 		tf->protocol == ATA_PROT_NODATA);
-	ata_exec_command(ap, tf);
+	ata_sff_exec_command(ap, tf);
 }
 
 
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index e710e71..f277cea 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -76,50 +76,13 @@
 };
 
 static struct scsi_host_template uli_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations uli_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.bmdma_setup            = ata_bmdma_setup,
-	.bmdma_start            = ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
+static struct ata_port_operations uli_ops = {
+	.inherits		= &ata_bmdma_port_ops,
 	.scr_read		= uli_scr_read,
 	.scr_write		= uli_scr_write,
-
-	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_info uli_port_info = {
@@ -212,11 +175,11 @@
 	host->private_data = hpriv;
 
 	/* the first two ports are standard SFF */
-	rc = ata_pci_init_sff_host(host);
+	rc = ata_pci_sff_init_host(host);
 	if (rc)
 		return rc;
 
-	rc = ata_pci_init_bmdma(host);
+	rc = ata_pci_bmdma_init(host);
 	if (rc)
 		return rc;
 
@@ -237,7 +200,7 @@
 			((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4;
 		ioaddr->bmdma_addr = iomap[4] + 16;
 		hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
-		ata_std_ports(ioaddr);
+		ata_sff_std_ports(ioaddr);
 
 		ata_port_desc(host->ports[2],
 			"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
@@ -252,7 +215,7 @@
 			((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4;
 		ioaddr->bmdma_addr = iomap[4] + 24;
 		hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
-		ata_std_ports(ioaddr);
+		ata_sff_std_ports(ioaddr);
 
 		ata_port_desc(host->ports[2],
 			"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
@@ -279,8 +242,8 @@
 
 	pci_set_master(pdev);
 	pci_intx(pdev, 1);
-	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
-				 &uli_sht);
+	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+				 IRQF_SHARED, &uli_sht);
 }
 
 static int __init uli_init(void)
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 0d03f44..96deeb3 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -71,7 +71,7 @@
 static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
 static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
 static void svia_noop_freeze(struct ata_port *ap);
-static void vt6420_error_handler(struct ata_port *ap);
+static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
 static int vt6421_pata_cable_detect(struct ata_port *ap);
 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
@@ -100,110 +100,26 @@
 };
 
 static struct scsi_host_template svia_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static const struct ata_port_operations vt6420_sata_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.bmdma_setup            = ata_bmdma_setup,
-	.bmdma_start            = ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
+static struct ata_port_operations vt6420_sata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
 	.freeze			= svia_noop_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= vt6420_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_port_start,
+	.prereset		= vt6420_prereset,
 };
 
-static const struct ata_port_operations vt6421_pata_ops = {
+static struct ata_port_operations vt6421_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= vt6421_pata_cable_detect,
 	.set_piomode		= vt6421_set_pio_mode,
 	.set_dmamode		= vt6421_set_dma_mode,
-
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.bmdma_setup            = ata_bmdma_setup,
-	.bmdma_start            = ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= vt6421_pata_cable_detect,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
-	.port_start		= ata_port_start,
 };
 
-static const struct ata_port_operations vt6421_sata_ops = {
-	.tf_load		= ata_tf_load,
-	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.exec_command		= ata_exec_command,
-	.dev_select		= ata_std_dev_select,
-
-	.bmdma_setup            = ata_bmdma_setup,
-	.bmdma_start            = ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
-
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.cable_detect		= ata_cable_sata,
-
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
-
+static struct ata_port_operations vt6421_sata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
 	.scr_read		= svia_scr_read,
 	.scr_write		= svia_scr_write,
-
-	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_info vt6420_port_info = {
@@ -257,8 +173,8 @@
 	/* Some VIA controllers choke if ATA_NIEN is manipulated in
 	 * certain way.  Leave it alone and just clear pending IRQ.
 	 */
-	ata_chk_status(ap);
-	ata_bmdma_irq_clear(ap);
+	ap->ops->sff_check_status(ap);
+	ata_sff_irq_clear(ap);
 }
 
 /**
@@ -320,23 +236,17 @@
 
 	if (!online) {
 		/* tell EH to bail */
-		ehc->i.action &= ~ATA_EH_RESET_MASK;
+		ehc->i.action &= ~ATA_EH_RESET;
 		return 0;
 	}
 
  skip_scr:
 	/* wait for !BSY */
-	ata_wait_ready(ap, deadline);
+	ata_sff_wait_ready(link, deadline);
 
 	return 0;
 }
 
-static void vt6420_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, NULL,
-			   ata_std_postreset);
-}
-
 static int vt6421_pata_cable_detect(struct ata_port *ap)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -394,7 +304,7 @@
 	ioaddr->bmdma_addr = bmdma_addr;
 	ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
 
-	ata_std_ports(ioaddr);
+	ata_sff_std_ports(ioaddr);
 
 	ata_port_pbar_desc(ap, ap->port_no, -1, "port");
 	ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
@@ -406,7 +316,7 @@
 	struct ata_host *host;
 	int rc;
 
-	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
+	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
 	if (rc)
 		return rc;
 	*r_host = host;
@@ -538,8 +448,8 @@
 	svia_configure(pdev);
 
 	pci_set_master(pdev);
-	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
-				 &svia_sht);
+	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+				 IRQF_SHARED, &svia_sht);
 }
 
 static int __init svia_init(void)
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 95ae3ed..f3d635c 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -200,7 +200,7 @@
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 	u16 nsect, lbal, lbam, lbah, feature;
 
-	tf->command = ata_check_status(ap);
+	tf->command = ata_sff_check_status(ap);
 	tf->device = readw(ioaddr->device_addr);
 	feature = readw(ioaddr->error_addr);
 	nsect = readw(ioaddr->nsect_addr);
@@ -243,7 +243,7 @@
 
 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
 	if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
-		handled = ata_host_intr(ap, qc);
+		handled = ata_sff_host_intr(ap, qc);
 
 	/* We received an interrupt during a polled command,
 	 * or some other spurious condition.  Interrupt reporting
@@ -251,7 +251,7 @@
 	 * simply clear the interrupt
 	 */
 	if (unlikely(!handled))
-		ata_chk_status(ap);
+		ap->ops->sff_check_status(ap);
 }
 
 /*
@@ -300,46 +300,18 @@
 
 
 static struct scsi_host_template vsc_sata_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.ioctl			= ata_scsi_ioctl,
-	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= ATA_DEF_QUEUE,
-	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
-	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ATA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 
-static const struct ata_port_operations vsc_sata_ops = {
-	.tf_load		= vsc_sata_tf_load,
-	.tf_read		= vsc_sata_tf_read,
-	.exec_command		= ata_exec_command,
-	.check_status		= ata_check_status,
-	.dev_select		= ata_std_dev_select,
-	.bmdma_setup            = ata_bmdma_setup,
-	.bmdma_start            = ata_bmdma_start,
-	.bmdma_stop		= ata_bmdma_stop,
-	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
-	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_data_xfer,
+static struct ata_port_operations vsc_sata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.sff_tf_load		= vsc_sata_tf_load,
+	.sff_tf_read		= vsc_sata_tf_read,
 	.freeze			= vsc_freeze,
 	.thaw			= vsc_thaw,
-	.error_handler		= ata_bmdma_error_handler,
-	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.irq_clear		= ata_bmdma_irq_clear,
-	.irq_on			= ata_irq_on,
 	.scr_read		= vsc_sata_scr_read,
 	.scr_write		= vsc_sata_scr_write,
-	.port_start		= ata_port_start,
 };
 
 static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index 40bca48..cabd0ed 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -108,7 +108,8 @@
  */
 void anon_transport_class_unregister(struct anon_transport_class *atc)
 {
-	attribute_container_unregister(&atc->container);
+	if (unlikely(attribute_container_unregister(&atc->container)))
+		BUG();
 }
 EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
 
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 84cdf90..349b6ed 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -116,6 +116,10 @@
 		err = -EAGAIN;
 		if (!bytes_read && (filp->f_flags & O_NONBLOCK))
 			goto out;
+		if (bytes_read < 0) {
+			err = bytes_read;
+			goto out;
+		}
 
 		err = -EFAULT;
 		while (bytes_read && size) {
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 613ec81..4d3c701 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1155,6 +1155,48 @@
 	return NULL;
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+
+/**
+ *	tty_find_polling_driver	-	find device of a polled tty
+ *	@name: name string to match
+ *	@line: pointer to resulting tty line nr
+ *
+ *	This routine returns a tty driver structure, given a name
+ *	and the condition that the tty driver is capable of polled
+ *	operation.
+ */
+struct tty_driver *tty_find_polling_driver(char *name, int *line)
+{
+	struct tty_driver *p, *res = NULL;
+	int tty_line = 0;
+	char *str;
+
+	mutex_lock(&tty_mutex);
+	/* Search through the tty devices to look for a match */
+	list_for_each_entry(p, &tty_drivers, tty_drivers) {
+		str = name + strlen(p->name);
+		tty_line = simple_strtoul(str, &str, 10);
+		if (*str == ',')
+			str++;
+		if (*str == '\0')
+			str = 0;
+
+		if (tty_line >= 0 && tty_line <= p->num && p->poll_init &&
+				!p->poll_init(p, tty_line, str)) {
+
+			res = p;
+			*line = tty_line;
+			break;
+		}
+	}
+	mutex_unlock(&tty_mutex);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(tty_find_polling_driver);
+#endif
+
 /**
  *	tty_check_change	-	check for POSIX terminal changes
  *	@tty: tty to check
@@ -3850,6 +3892,11 @@
 	driver->write_proc = op->write_proc;
 	driver->tiocmget = op->tiocmget;
 	driver->tiocmset = op->tiocmset;
+#ifdef CONFIG_CONSOLE_POLL
+	driver->poll_init = op->poll_init;
+	driver->poll_get_char = op->poll_get_char;
+	driver->poll_put_char = op->poll_put_char;
+#endif
 }
 
 
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 6b658d8..6d2f0c8 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -64,6 +64,7 @@
 	tristate "Support for PCI-attached cryptographic adapters"
 	depends on S390
 	select ZCRYPT_MONOLITHIC if ZCRYPT="y"
+	select HW_RANDOM
 	help
 	  Select this option if you want to use a PCI-attached cryptographic
 	  adapter like:
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 27340a7..6239c3d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -46,14 +46,6 @@
 	  MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
 	  The MPC8349, MPC8360 is also supported.
 
-config FSL_DMA_SELFTEST
-	bool "Enable the self test for each DMA channel"
-	depends on FSL_DMA
-	default y
-	---help---
-	  Enable the self test for each DMA channel. A self test will be
-	  performed after the channel probed to ensure the DMA works well.
-
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8db0e7f..d6dc70f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -362,7 +362,6 @@
 
 	BUG_ON(!device->device_alloc_chan_resources);
 	BUG_ON(!device->device_free_chan_resources);
-	BUG_ON(!device->device_dependency_added);
 	BUG_ON(!device->device_is_tx_complete);
 	BUG_ON(!device->device_issue_pending);
 	BUG_ON(!device->dev);
@@ -479,7 +478,8 @@
 
 	dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
 	dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
-	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
+					 DMA_CTRL_ACK);
 
 	if (!tx) {
 		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
@@ -487,7 +487,6 @@
 		return -ENOMEM;
 	}
 
-	tx->ack = 1;
 	tx->callback = NULL;
 	cookie = tx->tx_submit(tx);
 
@@ -525,7 +524,8 @@
 
 	dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
 	dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
-	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
+					 DMA_CTRL_ACK);
 
 	if (!tx) {
 		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
@@ -533,7 +533,6 @@
 		return -ENOMEM;
 	}
 
-	tx->ack = 1;
 	tx->callback = NULL;
 	cookie = tx->tx_submit(tx);
 
@@ -574,7 +573,8 @@
 	dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
 	dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
 				DMA_FROM_DEVICE);
-	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
+					 DMA_CTRL_ACK);
 
 	if (!tx) {
 		dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
@@ -582,7 +582,6 @@
 		return -ENOMEM;
 	}
 
-	tx->ack = 1;
 	tx->callback = NULL;
 	cookie = tx->tx_submit(tx);
 
@@ -600,8 +599,6 @@
 {
 	tx->chan = chan;
 	spin_lock_init(&tx->lock);
-	INIT_LIST_HEAD(&tx->depend_node);
-	INIT_LIST_HEAD(&tx->depend_list);
 }
 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
 
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index df16368..054eabf 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -412,7 +412,7 @@
 }
 
 static struct dma_async_tx_descriptor *
-fsl_dma_prep_interrupt(struct dma_chan *chan)
+fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
 {
 	struct fsl_dma_chan *fsl_chan;
 	struct fsl_desc_sw *new;
@@ -429,7 +429,7 @@
 	}
 
 	new->async_tx.cookie = -EBUSY;
-	new->async_tx.ack = 0;
+	new->async_tx.flags = flags;
 
 	/* Insert the link descriptor to the LD ring */
 	list_add_tail(&new->node, &new->async_tx.tx_list);
@@ -482,7 +482,7 @@
 			set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
 
 		new->async_tx.cookie = 0;
-		new->async_tx.ack = 1;
+		async_tx_ack(&new->async_tx);
 
 		prev = new;
 		len -= copy;
@@ -493,7 +493,7 @@
 		list_add_tail(&new->node, &first->async_tx.tx_list);
 	} while (len);
 
-	new->async_tx.ack = 0; /* client is in control of this ack */
+	new->async_tx.flags = flags; /* client is in control of this ack */
 	new->async_tx.cookie = -EBUSY;
 
 	/* Set End-of-link to the last link descriptor of new list*/
@@ -658,13 +658,6 @@
 	fsl_chan_xfer_ld_queue(fsl_chan);
 }
 
-static void fsl_dma_dependency_added(struct dma_chan *chan)
-{
-	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
-
-	fsl_chan_ld_cleanup(fsl_chan);
-}
-
 /**
  * fsl_dma_is_complete - Determine the DMA status
  * @fsl_chan : Freescale DMA channel
@@ -696,6 +689,8 @@
 {
 	struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
 	u32 stat;
+	int update_cookie = 0;
+	int xfer_ld_q = 0;
 
 	stat = get_sr(fsl_chan);
 	dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
@@ -720,8 +715,8 @@
 			 * Now, update the completed cookie, and continue the
 			 * next uncompleted transfer.
 			 */
-			fsl_dma_update_completed_cookie(fsl_chan);
-			fsl_chan_xfer_ld_queue(fsl_chan);
+			update_cookie = 1;
+			xfer_ld_q = 1;
 		}
 		stat &= ~FSL_DMA_SR_PE;
 	}
@@ -734,19 +729,33 @@
 		dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n",
 			(void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan));
 		stat &= ~FSL_DMA_SR_EOSI;
-		fsl_dma_update_completed_cookie(fsl_chan);
+		update_cookie = 1;
+	}
+
+	/* For MPC8349, EOCDI event need to update cookie
+	 * and start the next transfer if it exist.
+	 */
+	if (stat & FSL_DMA_SR_EOCDI) {
+		dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
+		stat &= ~FSL_DMA_SR_EOCDI;
+		update_cookie = 1;
+		xfer_ld_q = 1;
 	}
 
 	/* If it current transfer is the end-of-transfer,
 	 * we should clear the Channel Start bit for
 	 * prepare next transfer.
 	 */
-	if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) {
+	if (stat & FSL_DMA_SR_EOLNI) {
 		dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
 		stat &= ~FSL_DMA_SR_EOLNI;
-		fsl_chan_xfer_ld_queue(fsl_chan);
+		xfer_ld_q = 1;
 	}
 
+	if (update_cookie)
+		fsl_dma_update_completed_cookie(fsl_chan);
+	if (xfer_ld_q)
+		fsl_chan_xfer_ld_queue(fsl_chan);
 	if (stat)
 		dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
 					stat);
@@ -776,15 +785,13 @@
 	fsl_chan_ld_cleanup(fsl_chan);
 }
 
-#ifdef FSL_DMA_CALLBACKTEST
-static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan)
+static void fsl_dma_callback_test(void *param)
 {
+	struct fsl_dma_chan *fsl_chan = param;
 	if (fsl_chan)
-		dev_info(fsl_chan->dev, "selftest: callback is ok!\n");
+		dev_dbg(fsl_chan->dev, "selftest: callback is ok!\n");
 }
-#endif
 
-#ifdef CONFIG_FSL_DMA_SELFTEST
 static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
 {
 	struct dma_chan *chan;
@@ -867,7 +874,7 @@
 	async_tx_ack(tx3);
 
 	/* Interrupt tx test */
-	tx1 = fsl_dma_prep_interrupt(chan);
+	tx1 = fsl_dma_prep_interrupt(chan, 0);
 	async_tx_ack(tx1);
 	cookie = fsl_dma_tx_submit(tx1);
 
@@ -875,13 +882,11 @@
 	cookie = fsl_dma_tx_submit(tx3);
 	cookie = fsl_dma_tx_submit(tx2);
 
-#ifdef FSL_DMA_CALLBACKTEST
 	if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *)
 	    dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) {
 		tx3->callback = fsl_dma_callback_test;
 		tx3->callback_param = fsl_chan;
 	}
-#endif
 	fsl_dma_memcpy_issue_pending(chan);
 	msleep(2);
 
@@ -906,7 +911,6 @@
 	kfree(src);
 	return err;
 }
-#endif
 
 static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
 			const struct of_device_id *match)
@@ -997,11 +1001,9 @@
 		}
 	}
 
-#ifdef CONFIG_FSL_DMA_SELFTEST
 	err = fsl_dma_self_test(new_fsl_chan);
 	if (err)
 		goto err;
-#endif
 
 	dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
 				match->compatible, new_fsl_chan->irq);
@@ -1080,7 +1082,6 @@
 	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
 	fdev->common.device_is_tx_complete = fsl_dma_is_complete;
 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
-	fdev->common.device_dependency_added = fsl_dma_dependency_added;
 	fdev->common.dev = &dev->dev;
 
 	irq = irq_of_parse_and_map(dev->node, 0);
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 4017d9e..318e8a2 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -212,14 +212,14 @@
 	u32 copy;
 	size_t len;
 	dma_addr_t src, dst;
-	int orig_ack;
+	unsigned long orig_flags;
 	unsigned int desc_count = 0;
 
 	/* src and dest and len are stored in the initial descriptor */
 	len = first->len;
 	src = first->src;
 	dst = first->dst;
-	orig_ack = first->async_tx.ack;
+	orig_flags = first->async_tx.flags;
 	new = first;
 
 	spin_lock_bh(&ioat_chan->desc_lock);
@@ -228,7 +228,7 @@
 	do {
 		copy = min_t(size_t, len, ioat_chan->xfercap);
 
-		new->async_tx.ack = 1;
+		async_tx_ack(&new->async_tx);
 
 		hw = new->hw;
 		hw->size = copy;
@@ -264,7 +264,7 @@
 	}
 
 	new->tx_cnt = desc_count;
-	new->async_tx.ack = orig_ack; /* client is in control of this ack */
+	new->async_tx.flags = orig_flags; /* client is in control of this ack */
 
 	/* store the original values for use in later cleanup */
 	if (new != first) {
@@ -304,14 +304,14 @@
 	u32 copy;
 	size_t len;
 	dma_addr_t src, dst;
-	int orig_ack;
+	unsigned long orig_flags;
 	unsigned int desc_count = 0;
 
 	/* src and dest and len are stored in the initial descriptor */
 	len = first->len;
 	src = first->src;
 	dst = first->dst;
-	orig_ack = first->async_tx.ack;
+	orig_flags = first->async_tx.flags;
 	new = first;
 
 	/*
@@ -321,7 +321,7 @@
 	do {
 		copy = min_t(size_t, len, ioat_chan->xfercap);
 
-		new->async_tx.ack = 1;
+		async_tx_ack(&new->async_tx);
 
 		hw = new->hw;
 		hw->size = copy;
@@ -349,7 +349,7 @@
 	}
 
 	new->tx_cnt = desc_count;
-	new->async_tx.ack = orig_ack; /* client is in control of this ack */
+	new->async_tx.flags = orig_flags; /* client is in control of this ack */
 
 	/* store the original values for use in later cleanup */
 	if (new != first) {
@@ -714,7 +714,7 @@
 		new->len = len;
 		new->dst = dma_dest;
 		new->src = dma_src;
-		new->async_tx.ack = 0;
+		new->async_tx.flags = flags;
 		return &new->async_tx;
 	} else
 		return NULL;
@@ -742,7 +742,7 @@
 		new->len = len;
 		new->dst = dma_dest;
 		new->src = dma_src;
-		new->async_tx.ack = 0;
+		new->async_tx.flags = flags;
 		return &new->async_tx;
 	} else
 		return NULL;
@@ -842,7 +842,7 @@
 				 * a completed entry, but not the last, so clean
 				 * up if the client is done with the descriptor
 				 */
-				if (desc->async_tx.ack) {
+				if (async_tx_test_ack(&desc->async_tx)) {
 					list_del(&desc->node);
 					list_add_tail(&desc->node,
 						      &ioat_chan->free_desc);
@@ -924,17 +924,6 @@
 	spin_unlock_bh(&ioat_chan->cleanup_lock);
 }
 
-static void ioat_dma_dependency_added(struct dma_chan *chan)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-	spin_lock_bh(&ioat_chan->desc_lock);
-	if (ioat_chan->pending == 0) {
-		spin_unlock_bh(&ioat_chan->desc_lock);
-		ioat_dma_memcpy_cleanup(ioat_chan);
-	} else
-		spin_unlock_bh(&ioat_chan->desc_lock);
-}
-
 /**
  * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
  * @chan: IOAT DMA channel handle
@@ -990,7 +979,7 @@
 	desc->hw->size = 0;
 	desc->hw->src_addr = 0;
 	desc->hw->dst_addr = 0;
-	desc->async_tx.ack = 1;
+	async_tx_ack(&desc->async_tx);
 	switch (ioat_chan->device->version) {
 	case IOAT_VER_1_2:
 		desc->hw->next = 0;
@@ -1316,7 +1305,6 @@
 
 	dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
 	device->common.device_is_tx_complete = ioat_dma_is_complete;
-	device->common.device_dependency_added = ioat_dma_dependency_added;
 	switch (device->version) {
 	case IOAT_VER_1_2:
 		device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index f82b090..762b729 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -63,7 +63,6 @@
 	struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
 {
 	BUG_ON(desc->async_tx.cookie < 0);
-	spin_lock_bh(&desc->async_tx.lock);
 	if (desc->async_tx.cookie > 0) {
 		cookie = desc->async_tx.cookie;
 		desc->async_tx.cookie = 0;
@@ -101,7 +100,6 @@
 
 	/* run dependent operations */
 	async_tx_run_dependencies(&desc->async_tx);
-	spin_unlock_bh(&desc->async_tx.lock);
 
 	return cookie;
 }
@@ -113,7 +111,7 @@
 	/* the client is allowed to attach dependent operations
 	 * until 'ack' is set
 	 */
-	if (!desc->async_tx.ack)
+	if (!async_tx_test_ack(&desc->async_tx))
 		return 0;
 
 	/* leave the last descriptor in the chain
@@ -150,7 +148,7 @@
 			"this_desc: %#x next_desc: %#x ack: %d\n",
 			iter->async_tx.cookie, iter->idx, busy,
 			iter->async_tx.phys, iop_desc_get_next_desc(iter),
-			iter->async_tx.ack);
+			async_tx_test_ack(&iter->async_tx));
 		prefetch(_iter);
 		prefetch(&_iter->async_tx);
 
@@ -257,8 +255,6 @@
 
 	BUG_ON(!seen_current);
 
-	iop_chan_idle(busy, iop_chan);
-
 	if (cookie > 0) {
 		iop_chan->completed_cookie = cookie;
 		pr_debug("\tcompleted cookie %d\n", cookie);
@@ -275,8 +271,11 @@
 
 static void iop_adma_tasklet(unsigned long data)
 {
-	struct iop_adma_chan *chan = (struct iop_adma_chan *) data;
-	__iop_adma_slot_cleanup(chan);
+	struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
+
+	spin_lock(&iop_chan->lock);
+	__iop_adma_slot_cleanup(iop_chan);
+	spin_unlock(&iop_chan->lock);
 }
 
 static struct iop_adma_desc_slot *
@@ -339,9 +338,7 @@
 
 				/* pre-ack all but the last descriptor */
 				if (num_slots != slots_per_op)
-					iter->async_tx.ack = 1;
-				else
-					iter->async_tx.ack = 0;
+					async_tx_ack(&iter->async_tx);
 
 				list_add_tail(&iter->chain_node, &chain);
 				alloc_tail = iter;
@@ -514,7 +511,7 @@
 }
 
 static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_interrupt(struct dma_chan *chan)
+iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 {
 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 	struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -529,6 +526,7 @@
 		grp_start = sw_desc->group_head;
 		iop_desc_init_interrupt(grp_start, iop_chan);
 		grp_start->unmap_len = 0;
+		sw_desc->async_tx.flags = flags;
 	}
 	spin_unlock_bh(&iop_chan->lock);
 
@@ -561,6 +559,7 @@
 		iop_desc_set_memcpy_src_addr(grp_start, dma_src);
 		sw_desc->unmap_src_cnt = 1;
 		sw_desc->unmap_len = len;
+		sw_desc->async_tx.flags = flags;
 	}
 	spin_unlock_bh(&iop_chan->lock);
 
@@ -593,6 +592,7 @@
 		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 		sw_desc->unmap_src_cnt = 1;
 		sw_desc->unmap_len = len;
+		sw_desc->async_tx.flags = flags;
 	}
 	spin_unlock_bh(&iop_chan->lock);
 
@@ -626,6 +626,7 @@
 		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 		sw_desc->unmap_src_cnt = src_cnt;
 		sw_desc->unmap_len = len;
+		sw_desc->async_tx.flags = flags;
 		while (src_cnt--)
 			iop_desc_set_xor_src_addr(grp_start, src_cnt,
 						  dma_src[src_cnt]);
@@ -662,6 +663,7 @@
 			__func__, grp_start->xor_check_result);
 		sw_desc->unmap_src_cnt = src_cnt;
 		sw_desc->unmap_len = len;
+		sw_desc->async_tx.flags = flags;
 		while (src_cnt--)
 			iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
 						       dma_src[src_cnt]);
@@ -671,12 +673,6 @@
 	return sw_desc ? &sw_desc->async_tx : NULL;
 }
 
-static void iop_adma_dependency_added(struct dma_chan *chan)
-{
-	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
-	tasklet_schedule(&iop_chan->irq_tasklet);
-}
-
 static void iop_adma_free_chan_resources(struct dma_chan *chan)
 {
 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
@@ -854,11 +850,11 @@
 	src_dma = dma_map_single(dma_chan->device->dev, src,
 				IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
 	tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
-				      IOP_ADMA_TEST_SIZE, 1);
+				      IOP_ADMA_TEST_SIZE,
+				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
 	cookie = iop_adma_tx_submit(tx);
 	iop_adma_issue_pending(dma_chan);
-	async_tx_ack(tx);
 	msleep(1);
 
 	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
@@ -954,11 +950,11 @@
 		dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
 					   0, PAGE_SIZE, DMA_TO_DEVICE);
 	tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
-				   IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1);
+				   IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
+				   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
 	cookie = iop_adma_tx_submit(tx);
 	iop_adma_issue_pending(dma_chan);
-	async_tx_ack(tx);
 	msleep(8);
 
 	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
@@ -1001,11 +997,11 @@
 					   DMA_TO_DEVICE);
 	tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
 					IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
-					&zero_sum_result, 1);
+					&zero_sum_result,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
 	cookie = iop_adma_tx_submit(tx);
 	iop_adma_issue_pending(dma_chan);
-	async_tx_ack(tx);
 	msleep(8);
 
 	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
@@ -1025,11 +1021,11 @@
 	/* test memset */
 	dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
 			PAGE_SIZE, DMA_FROM_DEVICE);
-	tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1);
+	tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
+				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
 	cookie = iop_adma_tx_submit(tx);
 	iop_adma_issue_pending(dma_chan);
-	async_tx_ack(tx);
 	msleep(8);
 
 	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
@@ -1057,11 +1053,11 @@
 					   DMA_TO_DEVICE);
 	tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
 					IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
-					&zero_sum_result, 1);
+					&zero_sum_result,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
 	cookie = iop_adma_tx_submit(tx);
 	iop_adma_issue_pending(dma_chan);
-	async_tx_ack(tx);
 	msleep(8);
 
 	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
@@ -1177,7 +1173,6 @@
 	dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
 	dma_dev->device_is_tx_complete = iop_adma_is_complete;
 	dma_dev->device_issue_pending = iop_adma_issue_pending;
-	dma_dev->device_dependency_added = iop_adma_dependency_added;
 	dma_dev->dev = &pdev->dev;
 
 	/* set prep routines based on capability */
@@ -1232,9 +1227,6 @@
 	}
 
 	spin_lock_init(&iop_chan->lock);
-	init_timer(&iop_chan->cleanup_watchdog);
-	iop_chan->cleanup_watchdog.data = (unsigned long) iop_chan;
-	iop_chan->cleanup_watchdog.function = iop_adma_tasklet;
 	INIT_LIST_HEAD(&iop_chan->chain);
 	INIT_LIST_HEAD(&iop_chan->all_slots);
 	INIT_RCU_HEAD(&iop_chan->common.rcu);
@@ -1298,7 +1290,7 @@
 		grp_start = sw_desc->group_head;
 
 		list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
-		sw_desc->async_tx.ack = 1;
+		async_tx_ack(&sw_desc->async_tx);
 		iop_desc_init_memcpy(grp_start, 0);
 		iop_desc_set_byte_count(grp_start, iop_chan, 0);
 		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
@@ -1354,7 +1346,7 @@
 	if (sw_desc) {
 		grp_start = sw_desc->group_head;
 		list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
-		sw_desc->async_tx.ack = 1;
+		async_tx_ack(&sw_desc->async_tx);
 		iop_desc_init_null_xor(grp_start, 2, 0);
 		iop_desc_set_byte_count(grp_start, iop_chan, 0);
 		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 25bdc2d..fb4d391 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -54,6 +54,11 @@
 	  directive, use "install modulename /bin/true" for the modules to be
 	  blacklisted.
 
+config FIREWIRE_OHCI_DEBUG
+	bool
+	depends on FIREWIRE_OHCI
+	default y
+
 config FIREWIRE_SBP2
 	tristate "Support for storage devices (SBP-2 protocol driver)"
 	depends on FIREWIRE && SCSI
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index a034627..5b4c0d9 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -167,7 +167,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(fw_core_add_descriptor);
 
 void
 fw_core_remove_descriptor(struct fw_descriptor *desc)
@@ -182,7 +181,6 @@
 
 	mutex_unlock(&card_mutex);
 }
-EXPORT_SYMBOL(fw_core_remove_descriptor);
 
 static const char gap_count_table[] = {
 	63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
@@ -220,7 +218,7 @@
 	struct bm_data bmd;
 	unsigned long flags;
 	int root_id, new_root_id, irm_id, gap_count, generation, grace;
-	int do_reset = 0;
+	bool do_reset = false;
 
 	spin_lock_irqsave(&card->lock, flags);
 	local_node = card->local_node;
@@ -331,7 +329,7 @@
 		 */
 		spin_unlock_irqrestore(&card->lock, flags);
 		goto out;
-	} else if (root_device->config_rom[2] & BIB_CMC) {
+	} else if (root_device->cmc) {
 		/*
 		 * FIXME: I suppose we should set the cmstr bit in the
 		 * STATE_CLEAR register of this node, as described in
@@ -360,14 +358,14 @@
 		gap_count = 63;
 
 	/*
-	 * Finally, figure out if we should do a reset or not.  If we've
-	 * done less that 5 resets with the same physical topology and we
+	 * Finally, figure out if we should do a reset or not.  If we have
+	 * done less than 5 resets with the same physical topology and we
 	 * have either a new root or a new gap count setting, let's do it.
 	 */
 
 	if (card->bm_retries++ < 5 &&
 	    (card->gap_count != gap_count || new_root_id != root_id))
-		do_reset = 1;
+		do_reset = true;
 
 	spin_unlock_irqrestore(&card->lock, flags);
 
@@ -398,7 +396,6 @@
 {
 	static atomic_t index = ATOMIC_INIT(-1);
 
-	kref_init(&card->kref);
 	atomic_set(&card->device_count, 0);
 	card->index = atomic_inc_return(&index);
 	card->driver = driver;
@@ -429,12 +426,6 @@
 	card->link_speed = link_speed;
 	card->guid = guid;
 
-	/*
-	 * The subsystem grabs a reference when the card is added and
-	 * drops it when the driver calls fw_core_remove_card.
-	 */
-	fw_card_get(card);
-
 	mutex_lock(&card_mutex);
 	config_rom = generate_config_rom(card, &length);
 	list_add_tail(&card->link, &card_list);
@@ -540,40 +531,9 @@
 	cancel_delayed_work_sync(&card->work);
 	fw_flush_transactions(card);
 	del_timer_sync(&card->flush_timer);
-
-	fw_card_put(card);
 }
 EXPORT_SYMBOL(fw_core_remove_card);
 
-struct fw_card *
-fw_card_get(struct fw_card *card)
-{
-	kref_get(&card->kref);
-
-	return card;
-}
-EXPORT_SYMBOL(fw_card_get);
-
-static void
-release_card(struct kref *kref)
-{
-	struct fw_card *card = container_of(kref, struct fw_card, kref);
-
-	kfree(card);
-}
-
-/*
- * An assumption for fw_card_put() is that the card driver allocates
- * the fw_card struct with kalloc and that it has been shut down
- * before the last ref is dropped.
- */
-void
-fw_card_put(struct fw_card *card)
-{
-	kref_put(&card->kref, release_card);
-}
-EXPORT_SYMBOL(fw_card_put);
-
 int
 fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
 {
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index 46bc197..4a54192 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -269,21 +269,28 @@
 {
 	struct fw_cdev_get_info *get_info = buffer;
 	struct fw_cdev_event_bus_reset bus_reset;
+	unsigned long ret = 0;
 
 	client->version = get_info->version;
 	get_info->version = FW_CDEV_VERSION;
 
+	down_read(&fw_device_rwsem);
+
 	if (get_info->rom != 0) {
 		void __user *uptr = u64_to_uptr(get_info->rom);
 		size_t want = get_info->rom_length;
 		size_t have = client->device->config_rom_length * 4;
 
-		if (copy_to_user(uptr, client->device->config_rom,
-				 min(want, have)))
-			return -EFAULT;
+		ret = copy_to_user(uptr, client->device->config_rom,
+				   min(want, have));
 	}
 	get_info->rom_length = client->device->config_rom_length * 4;
 
+	up_read(&fw_device_rwsem);
+
+	if (ret != 0)
+		return -EFAULT;
+
 	client->bus_reset_closure = get_info->bus_reset_closure;
 	if (get_info->bus_reset != 0) {
 		void __user *uptr = u64_to_uptr(get_info->bus_reset);
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index 870125a..2d01bc1 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -25,7 +25,7 @@
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/idr.h>
-#include <linux/rwsem.h>
+#include <linux/string.h>
 #include <asm/semaphore.h>
 #include <asm/system.h>
 #include <linux/ctype.h>
@@ -160,9 +160,9 @@
 	 * Take the card lock so we don't set this to NULL while a
 	 * FW_NODE_UPDATED callback is being handled.
 	 */
-	spin_lock_irqsave(&device->card->lock, flags);
+	spin_lock_irqsave(&card->lock, flags);
 	device->node->data = NULL;
-	spin_unlock_irqrestore(&device->card->lock, flags);
+	spin_unlock_irqrestore(&card->lock, flags);
 
 	fw_node_put(device->node);
 	kfree(device->config_rom);
@@ -195,7 +195,9 @@
 		container_of(dattr, struct config_rom_attribute, attr);
 	struct fw_csr_iterator ci;
 	u32 *dir;
-	int key, value;
+	int key, value, ret = -ENOENT;
+
+	down_read(&fw_device_rwsem);
 
 	if (is_fw_unit(dev))
 		dir = fw_unit(dev)->directory;
@@ -204,11 +206,15 @@
 
 	fw_csr_iterator_init(&ci, dir);
 	while (fw_csr_iterator_next(&ci, &key, &value))
-		if (attr->key == key)
-			return snprintf(buf, buf ? PAGE_SIZE : 0,
-					"0x%06x\n", value);
+		if (attr->key == key) {
+			ret = snprintf(buf, buf ? PAGE_SIZE : 0,
+				       "0x%06x\n", value);
+			break;
+		}
 
-	return -ENOENT;
+	up_read(&fw_device_rwsem);
+
+	return ret;
 }
 
 #define IMMEDIATE_ATTR(name, key)				\
@@ -221,9 +227,11 @@
 		container_of(dattr, struct config_rom_attribute, attr);
 	struct fw_csr_iterator ci;
 	u32 *dir, *block = NULL, *p, *end;
-	int length, key, value, last_key = 0;
+	int length, key, value, last_key = 0, ret = -ENOENT;
 	char *b;
 
+	down_read(&fw_device_rwsem);
+
 	if (is_fw_unit(dev))
 		dir = fw_unit(dev)->directory;
 	else
@@ -238,18 +246,20 @@
 	}
 
 	if (block == NULL)
-		return -ENOENT;
+		goto out;
 
 	length = min(block[0] >> 16, 256U);
 	if (length < 3)
-		return -ENOENT;
+		goto out;
 
 	if (block[1] != 0 || block[2] != 0)
 		/* Unknown encoding. */
-		return -ENOENT;
+		goto out;
 
-	if (buf == NULL)
-		return length * 4;
+	if (buf == NULL) {
+		ret = length * 4;
+		goto out;
+	}
 
 	b = buf;
 	end = &block[length + 1];
@@ -259,8 +269,11 @@
 	/* Strip trailing whitespace and add newline. */
 	while (b--, (isspace(*b) || *b == '\0') && b > buf);
 	strcpy(b + 1, "\n");
+	ret = b + 2 - buf;
+ out:
+	up_read(&fw_device_rwsem);
 
-	return b + 2 - buf;
+	return ret;
 }
 
 #define TEXT_LEAF_ATTR(name, key)				\
@@ -337,19 +350,28 @@
 config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct fw_device *device = fw_device(dev);
+	size_t length;
 
-	memcpy(buf, device->config_rom, device->config_rom_length * 4);
+	down_read(&fw_device_rwsem);
+	length = device->config_rom_length * 4;
+	memcpy(buf, device->config_rom, length);
+	up_read(&fw_device_rwsem);
 
-	return device->config_rom_length * 4;
+	return length;
 }
 
 static ssize_t
 guid_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct fw_device *device = fw_device(dev);
+	int ret;
 
-	return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
-			device->config_rom[3], device->config_rom[4]);
+	down_read(&fw_device_rwsem);
+	ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
+		       device->config_rom[3], device->config_rom[4]);
+	up_read(&fw_device_rwsem);
+
+	return ret;
 }
 
 static struct device_attribute fw_device_attributes[] = {
@@ -388,7 +410,7 @@
 
 	init_completion(&callback_data.done);
 
-	offset = 0xfffff0000400ULL + index * 4;
+	offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
 	fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
 			device->node_id, generation, device->max_speed,
 			offset, NULL, 4, complete_transaction, &callback_data);
@@ -400,6 +422,9 @@
 	return callback_data.rcode;
 }
 
+#define READ_BIB_ROM_SIZE	256
+#define READ_BIB_STACK_SIZE	16
+
 /*
  * Read the bus info block, perform a speed probe, and read all of the rest of
  * the config ROM.  We do all this with a cached bus generation.  If the bus
@@ -409,16 +434,23 @@
  */
 static int read_bus_info_block(struct fw_device *device, int generation)
 {
-	static u32 rom[256];
-	u32 stack[16], sp, key;
-	int i, end, length;
+	u32 *rom, *stack, *old_rom, *new_rom;
+	u32 sp, key;
+	int i, end, length, ret = -1;
+
+	rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
+		      sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
+	if (rom == NULL)
+		return -ENOMEM;
+
+	stack = &rom[READ_BIB_ROM_SIZE];
 
 	device->max_speed = SCODE_100;
 
 	/* First read the bus info block. */
 	for (i = 0; i < 5; i++) {
 		if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
-			return -1;
+			goto out;
 		/*
 		 * As per IEEE1212 7.2, during power-up, devices can
 		 * reply with a 0 for the first quadlet of the config
@@ -428,7 +460,7 @@
 		 * retry mechanism will try again later.
 		 */
 		if (i == 0 && rom[i] == 0)
-			return -1;
+			goto out;
 	}
 
 	device->max_speed = device->node->max_speed;
@@ -478,26 +510,26 @@
 		 */
 		key = stack[--sp];
 		i = key & 0xffffff;
-		if (i >= ARRAY_SIZE(rom))
+		if (i >= READ_BIB_ROM_SIZE)
 			/*
 			 * The reference points outside the standard
 			 * config rom area, something's fishy.
 			 */
-			return -1;
+			goto out;
 
 		/* Read header quadlet for the block to get the length. */
 		if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
-			return -1;
+			goto out;
 		end = i + (rom[i] >> 16) + 1;
 		i++;
-		if (end > ARRAY_SIZE(rom))
+		if (end > READ_BIB_ROM_SIZE)
 			/*
 			 * This block extends outside standard config
 			 * area (and the array we're reading it
 			 * into).  That's broken, so ignore this
 			 * device.
 			 */
-			return -1;
+			goto out;
 
 		/*
 		 * Now read in the block.  If this is a directory
@@ -507,9 +539,9 @@
 		while (i < end) {
 			if (read_rom(device, generation, i, &rom[i]) !=
 			    RCODE_COMPLETE)
-				return -1;
+				goto out;
 			if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
-			    sp < ARRAY_SIZE(stack))
+			    sp < READ_BIB_STACK_SIZE)
 				stack[sp++] = i + rom[i];
 			i++;
 		}
@@ -517,13 +549,23 @@
 			length = i;
 	}
 
-	device->config_rom = kmalloc(length * 4, GFP_KERNEL);
-	if (device->config_rom == NULL)
-		return -1;
-	memcpy(device->config_rom, rom, length * 4);
-	device->config_rom_length = length;
+	old_rom = device->config_rom;
+	new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
+	if (new_rom == NULL)
+		goto out;
 
-	return 0;
+	down_write(&fw_device_rwsem);
+	device->config_rom = new_rom;
+	device->config_rom_length = length;
+	up_write(&fw_device_rwsem);
+
+	kfree(old_rom);
+	ret = 0;
+	device->cmc = rom[2] & 1 << 30;
+ out:
+	kfree(rom);
+
+	return ret;
 }
 
 static void fw_unit_release(struct device *dev)
@@ -592,7 +634,14 @@
 	return 0;
 }
 
-static DECLARE_RWSEM(idr_rwsem);
+/*
+ * fw_device_rwsem acts as dual purpose mutex:
+ *   - serializes accesses to fw_device_idr,
+ *   - serializes accesses to fw_device.config_rom/.config_rom_length and
+ *     fw_unit.directory, unless those accesses happen at safe occasions
+ */
+DECLARE_RWSEM(fw_device_rwsem);
+
 static DEFINE_IDR(fw_device_idr);
 int fw_cdev_major;
 
@@ -600,11 +649,11 @@
 {
 	struct fw_device *device;
 
-	down_read(&idr_rwsem);
+	down_read(&fw_device_rwsem);
 	device = idr_find(&fw_device_idr, MINOR(devt));
 	if (device)
 		fw_device_get(device);
-	up_read(&idr_rwsem);
+	up_read(&fw_device_rwsem);
 
 	return device;
 }
@@ -619,9 +668,9 @@
 	device_for_each_child(&device->device, NULL, shutdown_unit);
 	device_unregister(&device->device);
 
-	down_write(&idr_rwsem);
+	down_write(&fw_device_rwsem);
 	idr_remove(&fw_device_idr, minor);
-	up_write(&idr_rwsem);
+	up_write(&fw_device_rwsem);
 	fw_device_put(device);
 }
 
@@ -674,10 +723,10 @@
 	err = -ENOMEM;
 
 	fw_device_get(device);
-	down_write(&idr_rwsem);
+	down_write(&fw_device_rwsem);
 	if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
 		err = idr_get_new(&fw_device_idr, device, &minor);
-	up_write(&idr_rwsem);
+	up_write(&fw_device_rwsem);
 
 	if (err < 0)
 		goto error;
@@ -711,7 +760,7 @@
 	if (atomic_cmpxchg(&device->state,
 		    FW_DEVICE_INITIALIZING,
 		    FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) {
-		fw_device_shutdown(&device->work.work);
+		fw_device_shutdown(work);
 	} else {
 		if (device->config_rom_retries)
 			fw_notify("created device %s: GUID %08x%08x, S%d00, "
@@ -725,6 +774,7 @@
 				  device->device.bus_id,
 				  device->config_rom[3], device->config_rom[4],
 				  1 << device->max_speed);
+		device->config_rom_retries = 0;
 	}
 
 	/*
@@ -739,9 +789,9 @@
 	return;
 
  error_with_cdev:
-	down_write(&idr_rwsem);
+	down_write(&fw_device_rwsem);
 	idr_remove(&fw_device_idr, minor);
-	up_write(&idr_rwsem);
+	up_write(&fw_device_rwsem);
  error:
 	fw_device_put(device);		/* fw_device_idr's reference */
 
@@ -771,6 +821,106 @@
 	device_for_each_child(&device->device, NULL, update_unit);
 }
 
+enum {
+	REREAD_BIB_ERROR,
+	REREAD_BIB_GONE,
+	REREAD_BIB_UNCHANGED,
+	REREAD_BIB_CHANGED,
+};
+
+/* Reread and compare bus info block and header of root directory */
+static int reread_bus_info_block(struct fw_device *device, int generation)
+{
+	u32 q;
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
+			return REREAD_BIB_ERROR;
+
+		if (i == 0 && q == 0)
+			return REREAD_BIB_GONE;
+
+		if (i > device->config_rom_length || q != device->config_rom[i])
+			return REREAD_BIB_CHANGED;
+	}
+
+	return REREAD_BIB_UNCHANGED;
+}
+
+static void fw_device_refresh(struct work_struct *work)
+{
+	struct fw_device *device =
+		container_of(work, struct fw_device, work.work);
+	struct fw_card *card = device->card;
+	int node_id = device->node_id;
+
+	switch (reread_bus_info_block(device, device->generation)) {
+	case REREAD_BIB_ERROR:
+		if (device->config_rom_retries < MAX_RETRIES / 2 &&
+		    atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+			device->config_rom_retries++;
+			schedule_delayed_work(&device->work, RETRY_DELAY / 2);
+
+			return;
+		}
+		goto give_up;
+
+	case REREAD_BIB_GONE:
+		goto gone;
+
+	case REREAD_BIB_UNCHANGED:
+		if (atomic_cmpxchg(&device->state,
+			    FW_DEVICE_INITIALIZING,
+			    FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
+			goto gone;
+
+		fw_device_update(work);
+		device->config_rom_retries = 0;
+		goto out;
+
+	case REREAD_BIB_CHANGED:
+		break;
+	}
+
+	/*
+	 * Something changed.  We keep things simple and don't investigate
+	 * further.  We just destroy all previous units and create new ones.
+	 */
+	device_for_each_child(&device->device, NULL, shutdown_unit);
+
+	if (read_bus_info_block(device, device->generation) < 0) {
+		if (device->config_rom_retries < MAX_RETRIES &&
+		    atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+			device->config_rom_retries++;
+			schedule_delayed_work(&device->work, RETRY_DELAY);
+
+			return;
+		}
+		goto give_up;
+	}
+
+	create_units(device);
+
+	if (atomic_cmpxchg(&device->state,
+		    FW_DEVICE_INITIALIZING,
+		    FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
+		goto gone;
+
+	fw_notify("refreshed device %s\n", device->device.bus_id);
+	device->config_rom_retries = 0;
+	goto out;
+
+ give_up:
+	fw_notify("giving up on refresh of device %s\n", device->device.bus_id);
+ gone:
+	atomic_set(&device->state, FW_DEVICE_SHUTDOWN);
+	fw_device_shutdown(work);
+ out:
+	if (node_id == card->root_node->node_id)
+		schedule_delayed_work(&card->work, 0);
+}
+
 void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
 {
 	struct fw_device *device;
@@ -780,7 +930,7 @@
 	case FW_NODE_LINK_ON:
 		if (!node->link_on)
 			break;
-
+ create:
 		device = kzalloc(sizeof(*device), GFP_ATOMIC);
 		if (device == NULL)
 			break;
@@ -819,6 +969,23 @@
 		schedule_delayed_work(&device->work, INITIAL_DELAY);
 		break;
 
+	case FW_NODE_INITIATED_RESET:
+		device = node->data;
+		if (device == NULL)
+			goto create;
+
+		device->node_id = node->node_id;
+		smp_wmb();  /* update node_id before generation */
+		device->generation = card->generation;
+		if (atomic_cmpxchg(&device->state,
+			    FW_DEVICE_RUNNING,
+			    FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
+			PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+			schedule_delayed_work(&device->work,
+				node == card->local_node ? 0 : INITIAL_DELAY);
+		}
+		break;
+
 	case FW_NODE_UPDATED:
 		if (!node->link_on || node->data == NULL)
 			break;
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 78ecd39..5f131f5 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -21,6 +21,7 @@
 
 #include <linux/fs.h>
 #include <linux/cdev.h>
+#include <linux/rwsem.h>
 #include <asm/atomic.h>
 
 enum fw_device_state {
@@ -46,6 +47,11 @@
  * fw_device.node_id is guaranteed to be current too.
  *
  * The same applies to fw_device.card->node_id vs. fw_device.generation.
+ *
+ * fw_device.config_rom and fw_device.config_rom_length may be accessed during
+ * the lifetime of any fw_unit belonging to the fw_device, before device_del()
+ * was called on the last fw_unit.  Alternatively, they may be accessed while
+ * holding fw_device_rwsem.
  */
 struct fw_device {
 	atomic_t state;
@@ -53,6 +59,7 @@
 	int node_id;
 	int generation;
 	unsigned max_speed;
+	bool cmc;
 	struct fw_card *card;
 	struct device device;
 	struct list_head link;
@@ -64,28 +71,24 @@
 	struct fw_attribute_group attribute_group;
 };
 
-static inline struct fw_device *
-fw_device(struct device *dev)
+static inline struct fw_device *fw_device(struct device *dev)
 {
 	return container_of(dev, struct fw_device, device);
 }
 
-static inline int
-fw_device_is_shutdown(struct fw_device *device)
+static inline int fw_device_is_shutdown(struct fw_device *device)
 {
 	return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
 }
 
-static inline struct fw_device *
-fw_device_get(struct fw_device *device)
+static inline struct fw_device *fw_device_get(struct fw_device *device)
 {
 	get_device(&device->device);
 
 	return device;
 }
 
-static inline void
-fw_device_put(struct fw_device *device)
+static inline void fw_device_put(struct fw_device *device)
 {
 	put_device(&device->device);
 }
@@ -96,20 +99,35 @@
 void fw_device_cdev_update(struct fw_device *device);
 void fw_device_cdev_remove(struct fw_device *device);
 
+extern struct rw_semaphore fw_device_rwsem;
 extern int fw_cdev_major;
 
+/*
+ * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
+ */
 struct fw_unit {
 	struct device device;
 	u32 *directory;
 	struct fw_attribute_group attribute_group;
 };
 
-static inline struct fw_unit *
-fw_unit(struct device *dev)
+static inline struct fw_unit *fw_unit(struct device *dev)
 {
 	return container_of(dev, struct fw_unit, device);
 }
 
+static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
+{
+	get_device(&unit->device);
+
+	return unit;
+}
+
+static inline void fw_unit_put(struct fw_unit *unit)
+{
+	put_device(&unit->device);
+}
+
 #define CSR_OFFSET	0x40
 #define CSR_LEAF	0x80
 #define CSR_DIRECTORY	0xc0
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index 2b640e9..bcbe794 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -126,7 +126,6 @@
 
 	return ctx;
 }
-EXPORT_SYMBOL(fw_iso_context_create);
 
 void fw_iso_context_destroy(struct fw_iso_context *ctx)
 {
@@ -134,14 +133,12 @@
 
 	card->driver->free_iso_context(ctx);
 }
-EXPORT_SYMBOL(fw_iso_context_destroy);
 
 int
 fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
 {
 	return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
 }
-EXPORT_SYMBOL(fw_iso_context_start);
 
 int
 fw_iso_context_queue(struct fw_iso_context *ctx,
@@ -153,11 +150,9 @@
 
 	return card->driver->queue_iso(ctx, packet, buffer, payload);
 }
-EXPORT_SYMBOL(fw_iso_context_queue);
 
 int
 fw_iso_context_stop(struct fw_iso_context *ctx)
 {
 	return ctx->card->driver->stop_iso(ctx);
 }
-EXPORT_SYMBOL(fw_iso_context_stop);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index ca6d51e..4f02c55 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -27,6 +27,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/pci.h>
 #include <linux/spinlock.h>
 
@@ -177,9 +178,10 @@
 	struct tasklet_struct bus_reset_tasklet;
 	int node_id;
 	int generation;
-	int request_generation;
+	int request_generation;	/* for timestamping incoming requests */
 	u32 bus_seconds;
 	bool old_uninorth;
+	bool bus_reset_packet_quirk;
 
 	/*
 	 * Spinlock for accessing fw_ohci data.  Never call out of
@@ -237,6 +239,196 @@
 
 static char ohci_driver_name[] = KBUILD_MODNAME;
 
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+
+#define OHCI_PARAM_DEBUG_AT_AR		1
+#define OHCI_PARAM_DEBUG_SELFIDS	2
+#define OHCI_PARAM_DEBUG_IRQS		4
+#define OHCI_PARAM_DEBUG_BUSRESETS	8 /* only effective before chip init */
+
+static int param_debug;
+module_param_named(debug, param_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
+	", AT/AR events = "	__stringify(OHCI_PARAM_DEBUG_AT_AR)
+	", self-IDs = "		__stringify(OHCI_PARAM_DEBUG_SELFIDS)
+	", IRQs = "		__stringify(OHCI_PARAM_DEBUG_IRQS)
+	", busReset events = "	__stringify(OHCI_PARAM_DEBUG_BUSRESETS)
+	", or a combination, or all = -1)");
+
+static void log_irqs(u32 evt)
+{
+	if (likely(!(param_debug &
+			(OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
+		return;
+
+	if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
+	    !(evt & OHCI1394_busReset))
+		return;
+
+	printk(KERN_DEBUG KBUILD_MODNAME ": IRQ "
+	       "%08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+	       evt,
+	       evt & OHCI1394_selfIDComplete	? " selfID"		: "",
+	       evt & OHCI1394_RQPkt		? " AR_req"		: "",
+	       evt & OHCI1394_RSPkt		? " AR_resp"		: "",
+	       evt & OHCI1394_reqTxComplete	? " AT_req"		: "",
+	       evt & OHCI1394_respTxComplete	? " AT_resp"		: "",
+	       evt & OHCI1394_isochRx		? " IR"			: "",
+	       evt & OHCI1394_isochTx		? " IT"			: "",
+	       evt & OHCI1394_postedWriteErr	? " postedWriteErr"	: "",
+	       evt & OHCI1394_cycleTooLong	? " cycleTooLong"	: "",
+	       evt & OHCI1394_cycle64Seconds	? " cycle64Seconds"	: "",
+	       evt & OHCI1394_regAccessFail	? " regAccessFail"	: "",
+	       evt & OHCI1394_busReset		? " busReset"		: "",
+	       evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
+		       OHCI1394_RSPkt | OHCI1394_reqTxComplete |
+		       OHCI1394_respTxComplete | OHCI1394_isochRx |
+		       OHCI1394_isochTx | OHCI1394_postedWriteErr |
+		       OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
+		       OHCI1394_regAccessFail | OHCI1394_busReset)
+						? " ?"			: "");
+}
+
+static const char *speed[] = {
+	[0] = "S100", [1] = "S200", [2] = "S400",    [3] = "beta",
+};
+static const char *power[] = {
+	[0] = "+0W",  [1] = "+15W", [2] = "+30W",    [3] = "+45W",
+	[4] = "-3W",  [5] = " ?W",  [6] = "-3..-6W", [7] = "-3..-10W",
+};
+static const char port[] = { '.', '-', 'p', 'c', };
+
+static char _p(u32 *s, int shift)
+{
+	return port[*s >> shift & 3];
+}
+
+static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
+{
+	if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
+		return;
+
+	printk(KERN_DEBUG KBUILD_MODNAME ": %d selfIDs, generation %d, "
+	       "local node ID %04x\n", self_id_count, generation, node_id);
+
+	for (; self_id_count--; ++s)
+		if ((*s & 1 << 23) == 0)
+			printk(KERN_DEBUG "selfID 0: %08x, phy %d [%c%c%c] "
+			       "%s gc=%d %s %s%s%s\n",
+			       *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
+			       speed[*s >> 14 & 3], *s >> 16 & 63,
+			       power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
+			       *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
+		else
+			printk(KERN_DEBUG "selfID n: %08x, phy %d "
+			       "[%c%c%c%c%c%c%c%c]\n",
+			       *s, *s >> 24 & 63,
+			       _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
+			       _p(s,  8), _p(s,  6), _p(s,  4), _p(s,  2));
+}
+
+static const char *evts[] = {
+	[0x00] = "evt_no_status",	[0x01] = "-reserved-",
+	[0x02] = "evt_long_packet",	[0x03] = "evt_missing_ack",
+	[0x04] = "evt_underrun",	[0x05] = "evt_overrun",
+	[0x06] = "evt_descriptor_read",	[0x07] = "evt_data_read",
+	[0x08] = "evt_data_write",	[0x09] = "evt_bus_reset",
+	[0x0a] = "evt_timeout",		[0x0b] = "evt_tcode_err",
+	[0x0c] = "-reserved-",		[0x0d] = "-reserved-",
+	[0x0e] = "evt_unknown",		[0x0f] = "evt_flushed",
+	[0x10] = "-reserved-",		[0x11] = "ack_complete",
+	[0x12] = "ack_pending ",	[0x13] = "-reserved-",
+	[0x14] = "ack_busy_X",		[0x15] = "ack_busy_A",
+	[0x16] = "ack_busy_B",		[0x17] = "-reserved-",
+	[0x18] = "-reserved-",		[0x19] = "-reserved-",
+	[0x1a] = "-reserved-",		[0x1b] = "ack_tardy",
+	[0x1c] = "-reserved-",		[0x1d] = "ack_data_error",
+	[0x1e] = "ack_type_error",	[0x1f] = "-reserved-",
+	[0x20] = "pending/cancelled",
+};
+static const char *tcodes[] = {
+	[0x0] = "QW req",		[0x1] = "BW req",
+	[0x2] = "W resp",		[0x3] = "-reserved-",
+	[0x4] = "QR req",		[0x5] = "BR req",
+	[0x6] = "QR resp",		[0x7] = "BR resp",
+	[0x8] = "cycle start",		[0x9] = "Lk req",
+	[0xa] = "async stream packet",	[0xb] = "Lk resp",
+	[0xc] = "-reserved-",		[0xd] = "-reserved-",
+	[0xe] = "link internal",	[0xf] = "-reserved-",
+};
+static const char *phys[] = {
+	[0x0] = "phy config packet",	[0x1] = "link-on packet",
+	[0x2] = "self-id packet",	[0x3] = "-reserved-",
+};
+
+static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
+{
+	int tcode = header[0] >> 4 & 0xf;
+	char specific[12];
+
+	if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
+		return;
+
+	if (unlikely(evt >= ARRAY_SIZE(evts)))
+			evt = 0x1f;
+
+	if (evt == OHCI1394_evt_bus_reset) {
+		printk(KERN_DEBUG "A%c evt_bus_reset, generation %d\n",
+		       dir, (header[2] >> 16) & 0xff);
+		return;
+	}
+
+	if (header[0] == ~header[1]) {
+		printk(KERN_DEBUG "A%c %s, %s, %08x\n",
+		       dir, evts[evt], phys[header[0] >> 30 & 0x3],
+		       header[0]);
+		return;
+	}
+
+	switch (tcode) {
+	case 0x0: case 0x6: case 0x8:
+		snprintf(specific, sizeof(specific), " = %08x",
+			 be32_to_cpu((__force __be32)header[3]));
+		break;
+	case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
+		snprintf(specific, sizeof(specific), " %x,%x",
+			 header[3] >> 16, header[3] & 0xffff);
+		break;
+	default:
+		specific[0] = '\0';
+	}
+
+	switch (tcode) {
+	case 0xe: case 0xa:
+		printk(KERN_DEBUG "A%c %s, %s\n",
+		       dir, evts[evt], tcodes[tcode]);
+		break;
+	case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
+		printk(KERN_DEBUG "A%c spd %x tl %02x, "
+		       "%04x -> %04x, %s, "
+		       "%s, %04x%08x%s\n",
+		       dir, speed, header[0] >> 10 & 0x3f,
+		       header[1] >> 16, header[0] >> 16, evts[evt],
+		       tcodes[tcode], header[1] & 0xffff, header[2], specific);
+		break;
+	default:
+		printk(KERN_DEBUG "A%c spd %x tl %02x, "
+		       "%04x -> %04x, %s, "
+		       "%s%s\n",
+		       dir, speed, header[0] >> 10 & 0x3f,
+		       header[1] >> 16, header[0] >> 16, evts[evt],
+		       tcodes[tcode], specific);
+	}
+}
+
+#else
+
+#define log_irqs(evt)
+#define log_selfids(node_id, generation, self_id_count, sid)
+#define log_ar_at_event(dir, speed, header, evt)
+
+#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
+
 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
 {
 	writel(data, ohci->registers + offset);
@@ -320,6 +512,7 @@
 	struct fw_ohci *ohci = ctx->ohci;
 	struct fw_packet p;
 	u32 status, length, tcode;
+	int evt;
 
 	p.header[0] = cond_le32_to_cpu(buffer[0]);
 	p.header[1] = cond_le32_to_cpu(buffer[1]);
@@ -362,12 +555,15 @@
 	/* FIXME: What to do about evt_* errors? */
 	length = (p.header_length + p.payload_length + 3) / 4;
 	status = cond_le32_to_cpu(buffer[length]);
+	evt    = (status >> 16) & 0x1f;
 
-	p.ack        = ((status >> 16) & 0x1f) - 16;
+	p.ack        = evt - 16;
 	p.speed      = (status >> 21) & 0x7;
 	p.timestamp  = status & 0xffff;
 	p.generation = ohci->request_generation;
 
+	log_ar_at_event('R', p.speed, p.header, evt);
+
 	/*
 	 * The OHCI bus reset handler synthesizes a phy packet with
 	 * the new generation number when a bus reset happens (see
@@ -376,14 +572,19 @@
 	 * generation.  We only need this for requests; for responses
 	 * we use the unique tlabel for finding the matching
 	 * request.
+	 *
+	 * Alas some chips sometimes emit bus reset packets with a
+	 * wrong generation.  We set the correct generation for these
+	 * at a slightly incorrect time (in bus_reset_tasklet).
 	 */
-
-	if (p.ack + 16 == 0x09)
-		ohci->request_generation = (p.header[2] >> 16) & 0xff;
-	else if (ctx == &ohci->ar_request_ctx)
+	if (evt == OHCI1394_evt_bus_reset) {
+		if (!ohci->bus_reset_packet_quirk)
+			ohci->request_generation = (p.header[2] >> 16) & 0xff;
+	} else if (ctx == &ohci->ar_request_ctx) {
 		fw_core_handle_request(&ohci->card, &p);
-	else
+	} else {
 		fw_core_handle_response(&ohci->card, &p);
+	}
 
 	return buffer + length + 1;
 }
@@ -770,8 +971,19 @@
 				     DESCRIPTOR_IRQ_ALWAYS |
 				     DESCRIPTOR_BRANCH_ALWAYS);
 
-	/* FIXME: Document how the locking works. */
-	if (ohci->generation != packet->generation) {
+	/*
+	 * If the controller and packet generations don't match, we need to
+	 * bail out and try again.  If IntEvent.busReset is set, the AT context
+	 * is halted, so appending to the context and trying to run it is
+	 * futile.  Most controllers do the right thing and just flush the AT
+	 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
+	 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
+	 * up stalling out.  So we just bail out in software and try again
+	 * later, and everyone is happy.
+	 * FIXME: Document how the locking works.
+	 */
+	if (ohci->generation != packet->generation ||
+	    reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
 		if (packet->payload_length > 0)
 			dma_unmap_single(ohci->card.device, payload_bus,
 					 packet->payload_length, DMA_TO_DEVICE);
@@ -817,6 +1029,8 @@
 	evt = le16_to_cpu(last->transfer_status) & 0x1f;
 	packet->timestamp = le16_to_cpu(last->res_count);
 
+	log_ar_at_event('T', packet->speed, packet->header, evt);
+
 	switch (evt) {
 	case OHCI1394_evt_timeout:
 		/* Async response transmit timed out. */
@@ -1019,20 +1233,30 @@
 	ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
 			       OHCI1394_NodeID_nodeNumber);
 
+	reg = reg_read(ohci, OHCI1394_SelfIDCount);
+	if (reg & OHCI1394_SelfIDCount_selfIDError) {
+		fw_notify("inconsistent self IDs\n");
+		return;
+	}
 	/*
 	 * The count in the SelfIDCount register is the number of
 	 * bytes in the self ID receive buffer.  Since we also receive
 	 * the inverted quadlets and a header quadlet, we shift one
 	 * bit extra to get the actual number of self IDs.
 	 */
-
-	self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
+	self_id_count = (reg >> 3) & 0x3ff;
+	if (self_id_count == 0) {
+		fw_notify("inconsistent self IDs\n");
+		return;
+	}
 	generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
 	rmb();
 
 	for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
-		if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
-			fw_error("inconsistent self IDs\n");
+		if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
+			fw_notify("inconsistent self IDs\n");
+			return;
+		}
 		ohci->self_id_buffer[j] =
 				cond_le32_to_cpu(ohci->self_id_cpu[i]);
 	}
@@ -1067,6 +1291,9 @@
 	context_stop(&ohci->at_response_ctx);
 	reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
 
+	if (ohci->bus_reset_packet_quirk)
+		ohci->request_generation = generation;
+
 	/*
 	 * This next bit is unrelated to the AT context stuff but we
 	 * have to do it under the spinlock also.  If a new config rom
@@ -1097,12 +1324,20 @@
 		reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
 	}
 
+#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
+	reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+	reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+#endif
+
 	spin_unlock_irqrestore(&ohci->lock, flags);
 
 	if (free_rom)
 		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
 				  free_rom, free_rom_bus);
 
+	log_selfids(ohci->node_id, generation,
+		    self_id_count, ohci->self_id_buffer);
+
 	fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
 				 self_id_count, ohci->self_id_buffer);
 }
@@ -1118,7 +1353,9 @@
 	if (!event || !~event)
 		return IRQ_NONE;
 
-	reg_write(ohci, OHCI1394_IntEventClear, event);
+	/* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
+	reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
+	log_irqs(event);
 
 	if (event & OHCI1394_selfIDComplete)
 		tasklet_schedule(&ohci->bus_reset_tasklet);
@@ -1153,6 +1390,10 @@
 		iso_event &= ~(1 << i);
 	}
 
+	if (unlikely(event & OHCI1394_regAccessFail))
+		fw_error("Register access failure - "
+			 "please notify linux1394-devel@lists.sf.net\n");
+
 	if (unlikely(event & OHCI1394_postedWriteErr))
 		fw_error("PCI posted write error\n");
 
@@ -1192,6 +1433,8 @@
 {
 	struct fw_ohci *ohci = fw_ohci(card);
 	struct pci_dev *dev = to_pci_dev(card->device);
+	u32 lps;
+	int i;
 
 	if (software_reset(ohci)) {
 		fw_error("Failed to reset ohci card.\n");
@@ -1203,13 +1446,24 @@
 	 * most of the registers.  In fact, on some cards (ALI M5251),
 	 * accessing registers in the SClk domain without LPS enabled
 	 * will lock up the machine.  Wait 50msec to make sure we have
-	 * full link enabled.
+	 * full link enabled.  However, with some cards (well, at least
+	 * a JMicron PCIe card), we have to try again sometimes.
 	 */
 	reg_write(ohci, OHCI1394_HCControlSet,
 		  OHCI1394_HCControl_LPS |
 		  OHCI1394_HCControl_postedWriteEnable);
 	flush_writes(ohci);
-	msleep(50);
+
+	for (lps = 0, i = 0; !lps && i < 3; i++) {
+		msleep(50);
+		lps = reg_read(ohci, OHCI1394_HCControlSet) &
+		      OHCI1394_HCControl_LPS;
+	}
+
+	if (!lps) {
+		fw_error("Failed to set Link Power Status\n");
+		return -EIO;
+	}
 
 	reg_write(ohci, OHCI1394_HCControlClear,
 		  OHCI1394_HCControl_noByteSwapData);
@@ -1237,7 +1491,10 @@
 		  OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
 		  OHCI1394_isochRx | OHCI1394_isochTx |
 		  OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
-		  OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable);
+		  OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
+		  OHCI1394_masterIntEnable);
+	if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
+		reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
 
 	/* Activate link_on bit and contender bit in our self ID packets.*/
 	if (ohci_update_phy_reg(card, 4, 0,
@@ -1421,6 +1678,7 @@
 	if (packet->ack != 0)
 		goto out;
 
+	log_ar_at_event('T', packet->speed, packet->header, 0x20);
 	driver_data->packet = NULL;
 	packet->ack = RCODE_CANCELLED;
 	packet->callback(packet, &ohci->card, packet->ack);
@@ -1435,6 +1693,9 @@
 static int
 ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
 {
+#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
+	return 0;
+#else
 	struct fw_ohci *ohci = fw_ohci(card);
 	unsigned long flags;
 	int n, retval = 0;
@@ -1466,6 +1727,7 @@
  out:
 	spin_unlock_irqrestore(&ohci->lock, flags);
 	return retval;
+#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
 }
 
 static u64
@@ -2045,6 +2307,35 @@
 	.stop_iso		= ohci_stop_iso,
 };
 
+#ifdef CONFIG_PPC_PMAC
+static void ohci_pmac_on(struct pci_dev *dev)
+{
+	if (machine_is(powermac)) {
+		struct device_node *ofn = pci_device_to_OF_node(dev);
+
+		if (ofn) {
+			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
+			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
+		}
+	}
+}
+
+static void ohci_pmac_off(struct pci_dev *dev)
+{
+	if (machine_is(powermac)) {
+		struct device_node *ofn = pci_device_to_OF_node(dev);
+
+		if (ofn) {
+			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
+			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
+		}
+	}
+}
+#else
+#define ohci_pmac_on(dev)
+#define ohci_pmac_off(dev)
+#endif /* CONFIG_PPC_PMAC */
+
 static int __devinit
 pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
 {
@@ -2054,18 +2345,6 @@
 	int err;
 	size_t size;
 
-#ifdef CONFIG_PPC_PMAC
-	/* Necessary on some machines if fw-ohci was loaded/ unloaded before */
-	if (machine_is(powermac)) {
-		struct device_node *ofn = pci_device_to_OF_node(dev);
-
-		if (ofn) {
-			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
-			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
-		}
-	}
-#endif /* CONFIG_PPC_PMAC */
-
 	ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
 	if (ohci == NULL) {
 		fw_error("Could not malloc fw_ohci data.\n");
@@ -2074,10 +2353,12 @@
 
 	fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
 
+	ohci_pmac_on(dev);
+
 	err = pci_enable_device(dev);
 	if (err) {
 		fw_error("Failed to enable OHCI hardware.\n");
-		goto fail_put_card;
+		goto fail_free;
 	}
 
 	pci_set_master(dev);
@@ -2088,6 +2369,8 @@
 	ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
 			     dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
 #endif
+	ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
+
 	spin_lock_init(&ohci->lock);
 
 	tasklet_init(&ohci->bus_reset_tasklet,
@@ -2173,8 +2456,9 @@
 	pci_release_region(dev, 0);
  fail_disable:
 	pci_disable_device(dev);
- fail_put_card:
-	fw_card_put(&ohci->card);
+ fail_free:
+	kfree(&ohci->card);
+	ohci_pmac_off(dev);
 
 	return err;
 }
@@ -2202,72 +2486,42 @@
 	pci_iounmap(dev, ohci->registers);
 	pci_release_region(dev, 0);
 	pci_disable_device(dev);
-	fw_card_put(&ohci->card);
-
-#ifdef CONFIG_PPC_PMAC
-	/* On UniNorth, power down the cable and turn off the chip clock
-	 * to save power on laptops */
-	if (machine_is(powermac)) {
-		struct device_node *ofn = pci_device_to_OF_node(dev);
-
-		if (ofn) {
-			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
-			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
-		}
-	}
-#endif /* CONFIG_PPC_PMAC */
+	kfree(&ohci->card);
+	ohci_pmac_off(dev);
 
 	fw_notify("Removed fw-ohci device.\n");
 }
 
 #ifdef CONFIG_PM
-static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int pci_suspend(struct pci_dev *dev, pm_message_t state)
 {
-	struct fw_ohci *ohci = pci_get_drvdata(pdev);
+	struct fw_ohci *ohci = pci_get_drvdata(dev);
 	int err;
 
 	software_reset(ohci);
-	free_irq(pdev->irq, ohci);
-	err = pci_save_state(pdev);
+	free_irq(dev->irq, ohci);
+	err = pci_save_state(dev);
 	if (err) {
 		fw_error("pci_save_state failed\n");
 		return err;
 	}
-	err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	err = pci_set_power_state(dev, pci_choose_state(dev, state));
 	if (err)
 		fw_error("pci_set_power_state failed with %d\n", err);
-
-/* PowerMac suspend code comes last */
-#ifdef CONFIG_PPC_PMAC
-	if (machine_is(powermac)) {
-		struct device_node *ofn = pci_device_to_OF_node(pdev);
-
-		if (ofn)
-			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
-	}
-#endif /* CONFIG_PPC_PMAC */
+	ohci_pmac_off(dev);
 
 	return 0;
 }
 
-static int pci_resume(struct pci_dev *pdev)
+static int pci_resume(struct pci_dev *dev)
 {
-	struct fw_ohci *ohci = pci_get_drvdata(pdev);
+	struct fw_ohci *ohci = pci_get_drvdata(dev);
 	int err;
 
-/* PowerMac resume code comes first */
-#ifdef CONFIG_PPC_PMAC
-	if (machine_is(powermac)) {
-		struct device_node *ofn = pci_device_to_OF_node(pdev);
-
-		if (ofn)
-			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
-	}
-#endif /* CONFIG_PPC_PMAC */
-
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	err = pci_enable_device(pdev);
+	ohci_pmac_on(dev);
+	pci_set_power_state(dev, PCI_D0);
+	pci_restore_state(dev);
+	err = pci_enable_device(dev);
 	if (err) {
 		fw_error("pci_enable_device failed\n");
 		return err;
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
index dec4f04..a2fbb62 100644
--- a/drivers/firewire/fw-ohci.h
+++ b/drivers/firewire/fw-ohci.h
@@ -30,6 +30,7 @@
 #define  OHCI1394_HCControl_softReset		0x00010000
 #define OHCI1394_SelfIDBuffer                 0x064
 #define OHCI1394_SelfIDCount                  0x068
+#define  OHCI1394_SelfIDCount_selfIDError	0x80000000
 #define OHCI1394_IRMultiChanMaskHiSet         0x070
 #define OHCI1394_IRMultiChanMaskHiClear       0x074
 #define OHCI1394_IRMultiChanMaskLoSet         0x078
@@ -124,6 +125,7 @@
 #define OHCI1394_lockRespErr		0x00000200
 #define OHCI1394_selfIDComplete		0x00010000
 #define OHCI1394_busReset		0x00020000
+#define OHCI1394_regAccessFail		0x00040000
 #define OHCI1394_phy			0x00080000
 #define OHCI1394_cycleSynch		0x00100000
 #define OHCI1394_cycle64Seconds		0x00200000
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 62b4e47..2a99937 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -153,6 +153,7 @@
 	struct list_head lu_list;
 
 	u64 management_agent_address;
+	u64 guid;
 	int directory_id;
 	int node_id;
 	int address_high;
@@ -173,10 +174,8 @@
 #define SBP2_ORB_TIMEOUT		2000U	/* Timeout in ms */
 #define SBP2_ORB_NULL			0x80000000
 #define SBP2_MAX_SG_ELEMENT_LENGTH	0xf000
-#define SBP2_RETRY_LIMIT		0xf	/* 15 retries */
-
-#define SBP2_DIRECTION_TO_MEDIA		0x0
-#define SBP2_DIRECTION_FROM_MEDIA	0x1
+#define SBP2_RETRY_LIMIT		0xf		/* 15 retries */
+#define SBP2_CYCLE_LIMIT		(0xc8 << 12)	/* 200 125us cycles */
 
 /* Unit directory keys */
 #define SBP2_CSR_UNIT_CHARACTERISTICS	0x3a
@@ -224,8 +223,8 @@
 };
 
 struct sbp2_pointer {
-	u32 high;
-	u32 low;
+	__be32 high;
+	__be32 low;
 };
 
 struct sbp2_orb {
@@ -253,8 +252,8 @@
 	struct {
 		struct sbp2_pointer password;
 		struct sbp2_pointer response;
-		u32 misc;
-		u32 length;
+		__be32 misc;
+		__be32 length;
 		struct sbp2_pointer status_fifo;
 	} request;
 	__be32 response[4];
@@ -263,20 +262,17 @@
 	struct sbp2_status status;
 };
 
-#define LOGIN_RESPONSE_GET_LOGIN_ID(v)	((v).misc & 0xffff)
-#define LOGIN_RESPONSE_GET_LENGTH(v)	(((v).misc >> 16) & 0xffff)
-
 struct sbp2_login_response {
-	u32 misc;
+	__be32 misc;
 	struct sbp2_pointer command_block_agent;
-	u32 reconnect_hold;
+	__be32 reconnect_hold;
 };
 #define COMMAND_ORB_DATA_SIZE(v)	((v))
 #define COMMAND_ORB_PAGE_SIZE(v)	((v) << 16)
 #define COMMAND_ORB_PAGE_TABLE_PRESENT	((1) << 19)
 #define COMMAND_ORB_MAX_PAYLOAD(v)	((v) << 20)
 #define COMMAND_ORB_SPEED(v)		((v) << 24)
-#define COMMAND_ORB_DIRECTION(v)	((v) << 27)
+#define COMMAND_ORB_DIRECTION		((1) << 27)
 #define COMMAND_ORB_REQUEST_FORMAT(v)	((v) << 29)
 #define COMMAND_ORB_NOTIFY		((1) << 31)
 
@@ -285,7 +281,7 @@
 	struct {
 		struct sbp2_pointer next;
 		struct sbp2_pointer data_descriptor;
-		u32 misc;
+		__be32 misc;
 		u8 command_block[12];
 	} request;
 	struct scsi_cmnd *cmd;
@@ -459,8 +455,7 @@
 	unsigned long flags;
 
 	orb->pointer.high = 0;
-	orb->pointer.low = orb->request_bus;
-	fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
+	orb->pointer.low = cpu_to_be32(orb->request_bus);
 
 	spin_lock_irqsave(&device->card->lock, flags);
 	list_add_tail(&orb->link, &lu->orb_list);
@@ -536,31 +531,31 @@
 	if (dma_mapping_error(orb->response_bus))
 		goto fail_mapping_response;
 
-	orb->request.response.high    = 0;
-	orb->request.response.low     = orb->response_bus;
+	orb->request.response.high = 0;
+	orb->request.response.low  = cpu_to_be32(orb->response_bus);
 
-	orb->request.misc =
+	orb->request.misc = cpu_to_be32(
 		MANAGEMENT_ORB_NOTIFY |
 		MANAGEMENT_ORB_FUNCTION(function) |
-		MANAGEMENT_ORB_LUN(lun_or_login_id);
-	orb->request.length =
-		MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
+		MANAGEMENT_ORB_LUN(lun_or_login_id));
+	orb->request.length = cpu_to_be32(
+		MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
 
-	orb->request.status_fifo.high = lu->address_handler.offset >> 32;
-	orb->request.status_fifo.low  = lu->address_handler.offset;
+	orb->request.status_fifo.high =
+		cpu_to_be32(lu->address_handler.offset >> 32);
+	orb->request.status_fifo.low  =
+		cpu_to_be32(lu->address_handler.offset);
 
 	if (function == SBP2_LOGIN_REQUEST) {
 		/* Ask for 2^2 == 4 seconds reconnect grace period */
-		orb->request.misc |=
+		orb->request.misc |= cpu_to_be32(
 			MANAGEMENT_ORB_RECONNECT(2) |
-			MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login);
+			MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
 		timeout = lu->tgt->mgt_orb_timeout;
 	} else {
 		timeout = SBP2_ORB_TIMEOUT;
 	}
 
-	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
-
 	init_completion(&orb->done);
 	orb->base.callback = complete_management_orb;
 
@@ -605,8 +600,7 @@
 			 sizeof(orb->response), DMA_FROM_DEVICE);
  fail_mapping_response:
 	if (response)
-		fw_memcpy_from_be32(response,
-				    orb->response, sizeof(orb->response));
+		memcpy(response, orb->response, sizeof(orb->response));
 	kref_put(&orb->base.kref, free_orb);
 
 	return retval;
@@ -701,10 +695,8 @@
 	if (!tgt->dont_block && !lu->blocked &&
 	    lu->generation != card->generation) {
 		lu->blocked = true;
-		if (++tgt->blocked == 1) {
+		if (++tgt->blocked == 1)
 			scsi_block_requests(shost);
-			fw_notify("blocked %s\n", lu->tgt->bus_id);
-		}
 	}
 	spin_unlock_irqrestore(&card->lock, flags);
 }
@@ -731,10 +723,8 @@
 	}
 	spin_unlock_irqrestore(&card->lock, flags);
 
-	if (unblock) {
+	if (unblock)
 		scsi_unblock_requests(shost);
-		fw_notify("unblocked %s\n", lu->tgt->bus_id);
-	}
 }
 
 /*
@@ -796,7 +786,7 @@
 	scsi_remove_host(shost);
 	fw_notify("released %s\n", tgt->bus_id);
 
-	put_device(&tgt->unit->device);
+	fw_unit_put(tgt->unit);
 	scsi_host_put(shost);
 	fw_device_put(device);
 }
@@ -825,6 +815,22 @@
 	complete(done);
 }
 
+/*
+ * Write retransmit retry values into the BUSY_TIMEOUT register.
+ * - The single-phase retry protocol is supported by all SBP-2 devices, but the
+ *   default retry_limit value is 0 (i.e. never retry transmission). We write a
+ *   saner value after logging into the device.
+ * - The dual-phase retry protocol is optional to implement, and if not
+ *   supported, writes to the dual-phase portion of the register will be
+ *   ignored. We try to write the original 1394-1995 default here.
+ * - In the case of devices that are also SBP-3-compliant, all writes are
+ *   ignored, as the register is read-only, but contains single-phase retry of
+ *   15, which is what we're trying to set for all SBP-2 device anyway, so this
+ *   write attempt is safe and yields more consistent behavior for all devices.
+ *
+ * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
+ * and section 6.4 of the SBP-3 spec for further details.
+ */
 static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
 {
 	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
@@ -832,8 +838,7 @@
 	struct fw_transaction t;
 	static __be32 busy_timeout;
 
-	/* FIXME: we should try to set dual-phase cycle_limit too */
-	busy_timeout = cpu_to_be32(SBP2_RETRY_LIMIT);
+	busy_timeout = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
 
 	fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST,
 			lu->tgt->node_id, lu->generation, device->max_speed,
@@ -885,11 +890,10 @@
 	tgt->address_high = local_node_id << 16;
 	sbp2_set_generation(lu, generation);
 
-	/* Get command block agent offset and login id. */
 	lu->command_block_agent_address =
-		((u64) (response.command_block_agent.high & 0xffff) << 32) |
-		response.command_block_agent.low;
-	lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
+		((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
+		      << 32) | be32_to_cpu(response.command_block_agent.low);
+	lu->login_id = be32_to_cpu(response.misc) & 0xffff;
 
 	fw_notify("%s: logged in to LUN %04x (%d retries)\n",
 		  tgt->bus_id, lu->lun, lu->retries);
@@ -1111,6 +1115,7 @@
 	kref_init(&tgt->kref);
 	INIT_LIST_HEAD(&tgt->lu_list);
 	tgt->bus_id = unit->device.bus_id;
+	tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
 
 	if (fw_device_enable_phys_dma(device) < 0)
 		goto fail_shost_put;
@@ -1119,6 +1124,7 @@
 		goto fail_shost_put;
 
 	fw_device_get(device);
+	fw_unit_get(unit);
 
 	/* Initialize to values that won't match anything in our table. */
 	firmware_revision = 0xff000000;
@@ -1134,8 +1140,6 @@
 
 	sbp2_init_workarounds(tgt, model, firmware_revision);
 
-	get_device(&unit->device);
-
 	/* Do the login in a workqueue so we can easily reschedule retries. */
 	list_for_each_entry(lu, &tgt->lu_list, link)
 		sbp2_queue_work(lu, 0);
@@ -1367,9 +1371,12 @@
 	 * tables.
 	 */
 	if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
-		orb->request.data_descriptor.high = lu->tgt->address_high;
-		orb->request.data_descriptor.low  = sg_dma_address(sg);
-		orb->request.misc |= COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
+		orb->request.data_descriptor.high =
+			cpu_to_be32(lu->tgt->address_high);
+		orb->request.data_descriptor.low  =
+			cpu_to_be32(sg_dma_address(sg));
+		orb->request.misc |=
+			cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
 		return 0;
 	}
 
@@ -1390,16 +1397,14 @@
 				goto fail_page_table;
 			}
 			l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
-			orb->page_table[j].low = sg_addr;
-			orb->page_table[j].high = (l << 16);
+			orb->page_table[j].low = cpu_to_be32(sg_addr);
+			orb->page_table[j].high = cpu_to_be32(l << 16);
 			sg_addr += l;
 			sg_len -= l;
 			j++;
 		}
 	}
 
-	fw_memcpy_to_be32(orb->page_table, orb->page_table,
-			  sizeof(orb->page_table[0]) * j);
 	orb->page_table_bus =
 		dma_map_single(device->card->device, orb->page_table,
 			       sizeof(orb->page_table), DMA_TO_DEVICE);
@@ -1413,11 +1418,10 @@
 	 * initiator (i.e. us), but data_descriptor can refer to data
 	 * on other nodes so we need to put our ID in descriptor.high.
 	 */
-	orb->request.data_descriptor.high = lu->tgt->address_high;
-	orb->request.data_descriptor.low  = orb->page_table_bus;
-	orb->request.misc |=
-		COMMAND_ORB_PAGE_TABLE_PRESENT |
-		COMMAND_ORB_DATA_SIZE(j);
+	orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
+	orb->request.data_descriptor.low  = cpu_to_be32(orb->page_table_bus);
+	orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
+					 COMMAND_ORB_DATA_SIZE(j));
 
 	return 0;
 
@@ -1463,8 +1467,7 @@
 	orb->done = done;
 	orb->cmd  = cmd;
 
-	orb->request.next.high   = SBP2_ORB_NULL;
-	orb->request.next.low    = 0x0;
+	orb->request.next.high   = cpu_to_be32(SBP2_ORB_NULL);
 	/*
 	 * At speed 100 we can do 512 bytes per packet, at speed 200,
 	 * 1024 bytes per packet etc.  The SBP-2 max_payload field
@@ -1473,25 +1476,17 @@
 	 */
 	max_payload = min(device->max_speed + 7,
 			  device->card->max_receive - 1);
-	orb->request.misc =
+	orb->request.misc = cpu_to_be32(
 		COMMAND_ORB_MAX_PAYLOAD(max_payload) |
 		COMMAND_ORB_SPEED(device->max_speed) |
-		COMMAND_ORB_NOTIFY;
+		COMMAND_ORB_NOTIFY);
 
 	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
-		orb->request.misc |=
-			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
-	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
-		orb->request.misc |=
-			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
+		orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
 
 	if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
 		goto out;
 
-	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
-
-	memset(orb->request.command_block,
-	       0, sizeof(orb->request.command_block));
 	memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
 
 	orb->base.callback = complete_command_orb;
@@ -1519,11 +1514,8 @@
 
 	sdev->allow_restart = 1;
 
-	/*
-	 * Update the dma alignment (minimum alignment requirements for
-	 * start and end of DMA transfers) to be a sector
-	 */
-	blk_queue_update_dma_alignment(sdev->request_queue, 511);
+	/* SBP-2 requires quadlet alignment of the data buffers. */
+	blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
 
 	if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
 		sdev->inquiry_len = 36;
@@ -1581,16 +1573,14 @@
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
 	struct sbp2_logical_unit *lu;
-	struct fw_device *device;
 
 	if (!sdev)
 		return 0;
 
 	lu = sdev->hostdata;
-	device = fw_device(lu->tgt->unit->device.parent);
 
-	return sprintf(buf, "%08x%08x:%06x:%04x\n",
-			device->config_rom[3], device->config_rom[4],
+	return sprintf(buf, "%016llx:%06x:%04x\n",
+			(unsigned long long)lu->tgt->guid,
 			lu->tgt->directory_id, lu->lun);
 }
 
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index d2c7a3d..213b0ff 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -108,6 +108,7 @@
 	node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
 	node->link_on = SELF_ID_LINK_ON(sid);
 	node->phy_speed = SELF_ID_PHY_SPEED(sid);
+	node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
 	node->port_count = port_count;
 
 	atomic_set(&node->ref_count, 1);
@@ -289,12 +290,11 @@
 			beta_repeaters_present = true;
 
 		/*
-		 * If all PHYs does not report the same gap count
-		 * setting, we fall back to 63 which will force a gap
-		 * count reconfiguration and a reset.
+		 * If PHYs report different gap counts, set an invalid count
+		 * which will force a gap count reconfiguration and a reset.
 		 */
 		if (SELF_ID_GAP_COUNT(q) != gap_count)
-			gap_count = 63;
+			gap_count = 0;
 
 		update_hop_count(node);
 
@@ -431,6 +431,8 @@
 			event = FW_NODE_LINK_OFF;
 		else if (!node0->link_on && node1->link_on)
 			event = FW_NODE_LINK_ON;
+		else if (node1->initiated_reset && node1->link_on)
+			event = FW_NODE_INITIATED_RESET;
 		else
 			event = FW_NODE_UPDATED;
 
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index cedc1ec..addb9f8 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -20,11 +20,12 @@
 #define __fw_topology_h
 
 enum {
-	FW_NODE_CREATED =   0x00,
-	FW_NODE_UPDATED =   0x01,
-	FW_NODE_DESTROYED = 0x02,
-	FW_NODE_LINK_ON =   0x03,
-	FW_NODE_LINK_OFF =  0x04,
+	FW_NODE_CREATED,
+	FW_NODE_UPDATED,
+	FW_NODE_DESTROYED,
+	FW_NODE_LINK_ON,
+	FW_NODE_LINK_OFF,
+	FW_NODE_INITIATED_RESET,
 };
 
 struct fw_node {
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index e6f1bda..3a59e9b 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -18,6 +18,7 @@
  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#include <linux/completion.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -294,42 +295,40 @@
 }
 EXPORT_SYMBOL(fw_send_request);
 
+struct fw_phy_packet {
+	struct fw_packet packet;
+	struct completion done;
+};
+
 static void
 transmit_phy_packet_callback(struct fw_packet *packet,
 			     struct fw_card *card, int status)
 {
-	kfree(packet);
-}
+	struct fw_phy_packet *p =
+			container_of(packet, struct fw_phy_packet, packet);
 
-static void send_phy_packet(struct fw_card *card, u32 data, int generation)
-{
-	struct fw_packet *packet;
-
-	packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
-	if (packet == NULL)
-		return;
-
-	packet->header[0] = data;
-	packet->header[1] = ~data;
-	packet->header_length = 8;
-	packet->payload_length = 0;
-	packet->speed = SCODE_100;
-	packet->generation = generation;
-	packet->callback = transmit_phy_packet_callback;
-
-	card->driver->send_request(card, packet);
+	complete(&p->done);
 }
 
 void fw_send_phy_config(struct fw_card *card,
 			int node_id, int generation, int gap_count)
 {
-	u32 q;
+	struct fw_phy_packet p;
+	u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
+		   PHY_CONFIG_ROOT_ID(node_id) |
+		   PHY_CONFIG_GAP_COUNT(gap_count);
 
-	q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
-		PHY_CONFIG_ROOT_ID(node_id) |
-		PHY_CONFIG_GAP_COUNT(gap_count);
+	p.packet.header[0] = data;
+	p.packet.header[1] = ~data;
+	p.packet.header_length = 8;
+	p.packet.payload_length = 0;
+	p.packet.speed = SCODE_100;
+	p.packet.generation = generation;
+	p.packet.callback = transmit_phy_packet_callback;
+	init_completion(&p.done);
 
-	send_phy_packet(card, q, generation);
+	card->driver->send_request(card, &p.packet);
+	wait_for_completion(&p.done);
 }
 
 void fw_flush_transactions(struct fw_card *card)
@@ -389,21 +388,21 @@
 static DEFINE_SPINLOCK(address_handler_lock);
 static LIST_HEAD(address_handler_list);
 
-const struct fw_address_region fw_low_memory_region =
-	{ .start = 0x000000000000ULL, .end = 0x000100000000ULL,  };
 const struct fw_address_region fw_high_memory_region =
 	{ .start = 0x000100000000ULL, .end = 0xffffe0000000ULL,  };
+EXPORT_SYMBOL(fw_high_memory_region);
+
+#if 0
+const struct fw_address_region fw_low_memory_region =
+	{ .start = 0x000000000000ULL, .end = 0x000100000000ULL,  };
 const struct fw_address_region fw_private_region =
 	{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL,  };
 const struct fw_address_region fw_csr_region =
-	{ .start = 0xfffff0000000ULL, .end = 0xfffff0000800ULL,  };
+	{ .start = CSR_REGISTER_BASE,
+	  .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END,  };
 const struct fw_address_region fw_unit_space_region =
 	{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
-EXPORT_SYMBOL(fw_low_memory_region);
-EXPORT_SYMBOL(fw_high_memory_region);
-EXPORT_SYMBOL(fw_private_region);
-EXPORT_SYMBOL(fw_csr_region);
-EXPORT_SYMBOL(fw_unit_space_region);
+#endif  /*  0  */
 
 /**
  * Allocate a range of addresses in the node space of the OHCI
@@ -747,7 +746,8 @@
 EXPORT_SYMBOL(fw_core_handle_response);
 
 static const struct fw_address_region topology_map_region =
-	{ .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, };
+	{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
+	  .end   = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
 
 static void
 handle_topology_map(struct fw_card *card, struct fw_request *request,
@@ -785,7 +785,8 @@
 };
 
 static const struct fw_address_region registers_region =
-	{ .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, };
+	{ .start = CSR_REGISTER_BASE,
+	  .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
 
 static void
 handle_registers(struct fw_card *card, struct fw_request *request,
@@ -794,7 +795,7 @@
 		 unsigned long long offset,
 		 void *payload, size_t length, void *callback_data)
 {
-	int reg = offset - CSR_REGISTER_BASE;
+	int reg = offset & ~CSR_REGISTER_BASE;
 	unsigned long long bus_time;
 	__be32 *data = payload;
 
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index a43bb22..04d3854 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -201,11 +201,7 @@
 	u64 end;
 };
 
-extern const struct fw_address_region fw_low_memory_region;
 extern const struct fw_address_region fw_high_memory_region;
-extern const struct fw_address_region fw_private_region;
-extern const struct fw_address_region fw_csr_region;
-extern const struct fw_address_region fw_unit_space_region;
 
 int fw_core_add_address_handler(struct fw_address_handler *handler,
 				const struct fw_address_region *region);
@@ -221,12 +217,9 @@
 	const struct fw_card_driver *driver;
 	struct device *device;
 	atomic_t device_count;
-	struct kref kref;
 
 	int node_id;
 	int generation;
-	/* This is the generation used for timestamping incoming requests. */
-	int request_generation;
 	int current_tlabel, tlabel_mask;
 	struct list_head transaction_list;
 	struct timer_list flush_timer;
@@ -263,9 +256,6 @@
 	int bm_generation;
 };
 
-struct fw_card *fw_card_get(struct fw_card *card);
-void fw_card_put(struct fw_card *card);
-
 /*
  * The iso packet format allows for an immediate header/payload part
  * stored in 'header' immediately after the packet info plus an
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index eed6d8e..8753203 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -122,24 +122,6 @@
 
 	  If unsure, say N.
 
-config BLK_DEV_HD_IDE
-	bool "Use old disk-only driver on primary interface"
-	depends on (X86 || SH_MPC1211)
-	---help---
-	  There are two drivers for MFM/RLL/IDE disks.  Most people use just
-	  the new enhanced driver by itself.  This option however installs the
-	  old hard disk driver to control the primary IDE/disk interface in
-	  the system, leaving the new enhanced IDE driver to take care of only
-	  the 2nd/3rd/4th IDE interfaces.  Doing this will prevent you from
-	  having an IDE/ATAPI CD-ROM or tape drive connected to the primary
-	  IDE interface.  Choosing this option may be useful for older systems
-	  which have MFM/RLL/ESDI controller+drives at the primary port
-	  address (0x1f0), along with IDE drives at the secondary/3rd/4th port
-	  addresses.
-
-	  Normally, just say N here; you will then use the new driver for all
-	  4 interfaces.
-
 config BLK_DEV_IDEDISK
 	tristate "Include IDE/ATA-2 DISK support"
 	---help---
@@ -325,6 +307,7 @@
 
 config IDE_GENERIC
 	tristate "generic/default IDE chipset support"
+	depends on ALPHA || X86 || IA64 || M32R || MIPS || PPC32
 	help
 	  If unsure, say N.
 
@@ -416,12 +399,6 @@
 	  This can improve the usability of some boot managers such as lilo
 	  when booting from a drive on an off-board controller.
 
-	  If you say Y here, and you actually want to reverse the device scan
-	  order as explained above, you also need to issue the kernel command
-	  line option "ide=reverse". (Try "man bootparam" or see the
-	  documentation of your boot loader (lilo or loadlin) about how to
-	  pass options to the kernel at boot time.)
-
 	  Note that, if you do this, the order of the hd* devices will be
 	  rearranged which may require modification of fstab and other files.
 
@@ -615,8 +592,7 @@
 	  reference to device 0x80. The other solution is to say Y to "Boot
 	  off-board chipsets first support" (CONFIG_BLK_DEV_OFFBOARD) unless
 	  your mother board has the chipset natively mounted. Regardless one
-	  should use the fore mentioned option and call at LILO or include
-	  "ide=reverse" in LILO's append-line.
+	  should use the fore mentioned option and call at LILO.
 
 	  This driver requires dynamic tuning of the chipset during the
 	  ide-probe at boot. It is reported to support DVD II drives, by the
@@ -1049,7 +1025,7 @@
 endchoice
 
 # no isa -> no vlb
-if ISA
+if ISA && (ALPHA || X86 || MIPS)
 
 comment "Other IDE chipsets support"
 comment "Note: most of these also require special kernel boot parameters"
@@ -1060,8 +1036,8 @@
 	  Certain older chipsets, including the Tekram 690CD, use a single set
 	  of I/O ports at 0x1f0 to control up to four drives, instead of the
 	  customary two drives per port. Support for this can be enabled at
-	  runtime using the "ide0=four" kernel boot parameter if you say Y
-	  here.
+	  runtime using the "ide-4drives.probe" kernel boot parameter if you
+	  say Y here.
 
 config BLK_DEV_ALI14XX
 	tristate "ALI M14xx support"
@@ -1114,14 +1090,10 @@
 	def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \
 		 BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
 
-config IDE_ARCH_OBSOLETE_INIT
-	def_bool ALPHA || (ARM && !ARCH_L7200) || BLACKFIN || X86 || IA64 || M32R || MIPS || PARISC || PPC || (SUPERH64 && BLK_DEV_IDEPCI) || SPARC
-
 endif
 
 config BLK_DEV_HD_ONLY
 	bool "Old hard disk (MFM/RLL/IDE) driver"
-	depends on BLK_DEV_IDE=n
 	help
 	  There are two drivers for MFM/RLL/IDE hard disks. Most people use
 	  the newer enhanced driver, but this old one is still around for two
@@ -1133,12 +1105,16 @@
 	  for systems with only older MFM/RLL/ESDI drives. Choosing the old
 	  driver can save 13 KB or so of kernel memory.
 
+	  If you want to use this driver together with the new one you have
+	  to use "hda=noprobe hdb=noprobe" kernel parameters to prevent the new
+	  driver from probing the primary interface.
+
 	  If you are unsure, then just choose the Enhanced IDE/MFM/RLL driver
 	  instead of this one. For more detailed information, read the
 	  Disk-HOWTO, available from
 	  <http://www.tldp.org/docs.html#howto>.
 
 config BLK_DEV_HD
-	def_bool BLK_DEV_HD_IDE || BLK_DEV_HD_ONLY
+	def_bool BLK_DEV_HD_ONLY
 
 endif # IDE
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index a4a4323..571544c 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -36,9 +36,9 @@
 endif
 
 obj-$(CONFIG_BLK_DEV_IDE)		+= cris/ ppc/
-obj-$(CONFIG_BLK_DEV_IDEPNP)		+= ide-pnp.o
 obj-$(CONFIG_IDE_H8300)			+= h8300/
 obj-$(CONFIG_IDE_GENERIC)		+= ide-generic.o
+obj-$(CONFIG_BLK_DEV_IDEPNP)		+= ide-pnp.o
 
 ide-cd_mod-y += ide-cd.o ide-cd_ioctl.o ide-cd_verbose.o
 
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
index 161d30c..ec46c44 100644
--- a/drivers/ide/arm/bast-ide.c
+++ b/drivers/ide/arm/bast-ide.c
@@ -41,15 +41,15 @@
 	hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20);
 	hw.irq = irq;
 
-	hwif = ide_deprecated_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
 	if (hwif == NULL)
 		goto out;
 
 	i = hwif->index;
 
 	if (hwif->present)
-		ide_unregister(i, 0, 0);
-	else if (!hwif->hold)
+		ide_unregister(i);
+	else
 		ide_init_port_data(hwif, i);
 
 	ide_init_port_hw(hwif, &hw);
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index 8e1f6bd..474162c 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -378,15 +378,15 @@
 	hw.irq = irq->start;
 	hw.chipset = ide_palm3710;
 
-	hwif = ide_deprecated_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
 	if (hwif == NULL)
 		goto out;
 
 	i = hwif->index;
 
 	if (hwif->present)
-		ide_unregister(i, 0, 0);
-	else if (!hwif->hold)
+		ide_unregister(i);
+	else
 		ide_init_port_data(hwif, i);
 
 	ide_init_port_hw(hwif, &hw);
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index efba00d..b30adcf 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -76,7 +76,7 @@
 
 	ecard_set_drvdata(ec, NULL);
 
-	ide_unregister(hwif->index, 0, 0);
+	ide_unregister(hwif->index);
 
 	ecard_release_resources(ec);
 }
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index c8ffbaf..31266d2 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -228,7 +228,10 @@
 static void
 cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type,int len)
 {
-	reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, IDE_DATA_REG);
+	ide_hwif_t *hwif = drive->hwif;
+
+	reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
+					       hwif->io_ports[IDE_DATA_OFFSET]);
 	reg_ata_rw_trf_cnt trf_cnt = {0};
 
 	mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
@@ -264,8 +267,12 @@
 
 static int cris_dma_test_irq(ide_drive_t *drive)
 {
+	ide_hwif_t *hwif = drive->hwif;
 	int intr = REG_RD_INT(ata, regi_ata, r_intr);
-	reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, IDE_DATA_REG);
+
+	reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
+					       hwif->io_ports[IDE_DATA_OFFSET]);
+
 	return intr & (1 << ctrl2.sel) ? 1 : 0;
 }
 
@@ -523,7 +530,8 @@
 	                          IO_STATE(R_ATA_CTRL_DATA, handsh, dma);
 	*R_ATA_CTRL_DATA =
 		cmd |
-		IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) |
+		IO_FIELD(R_ATA_CTRL_DATA, data,
+			 drive->hwif->io_ports[IDE_DATA_OFFSET]) |
 		IO_STATE(R_ATA_CTRL_DATA, src_dst,  dma)  |
 		IO_STATE(R_ATA_CTRL_DATA, multi,    on)   |
 		IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
@@ -541,7 +549,9 @@
 static int cris_dma_test_irq(ide_drive_t *drive)
 {
 	int intr = *R_IRQ_MASK0_RD;
-	int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel, IDE_DATA_REG);
+	int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel,
+			     drive->hwif->io_ports[IDE_DATA_OFFSET]);
+
 	return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0;
 }
 
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index e07b189..0f6fb6b 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -710,6 +710,8 @@
 	for (i = 0; i < MAX_DRIVES; i++) {
 		drive = &hwif->drives[i];
 
+		memset(drive->acpidata, 0, sizeof(*drive->acpidata));
+
 		if (!drive->present)
 			continue;
 
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index c8d0e87..3960002 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -542,7 +542,8 @@
 
 		/* packet command */
 		spin_lock_irqsave(&ide_lock, flags);
-		hwif->OUTBSYNC(drive, WIN_PACKETCMD, IDE_COMMAND_REG);
+		hwif->OUTBSYNC(drive, WIN_PACKETCMD,
+			       hwif->io_ports[IDE_COMMAND_OFFSET]);
 		ndelay(400);
 		spin_unlock_irqrestore(&ide_lock, flags);
 
@@ -992,6 +993,7 @@
 
 static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 {
+	ide_hwif_t *hwif = drive->hwif;
 	struct cdrom_info *info = drive->driver_data;
 	struct request *rq = HWGROUP(drive)->rq;
 	xfer_func_t *xferfunc;
@@ -1032,9 +1034,9 @@
 	/*
 	 * ok we fall to pio :/
 	 */
-	ireason = HWIF(drive)->INB(IDE_IREASON_REG) & 0x3;
-	lowcyl  = HWIF(drive)->INB(IDE_BCOUNTL_REG);
-	highcyl = HWIF(drive)->INB(IDE_BCOUNTH_REG);
+	ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]) & 0x3;
+	lowcyl  = hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
+	highcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]);
 
 	len = lowcyl + (256 * highcyl);
 
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index faf22d7..5f133df 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -78,40 +78,6 @@
  */
 #define IDEFLOPPY_PC_STACK		(10 + IDEFLOPPY_MAX_PC_RETRIES)
 
-typedef struct idefloppy_packet_command_s {
-	u8 c[12];				/* Actual packet bytes */
-	int retries;				/* On each retry, we increment
-						   retries */
-	int error;				/* Error code */
-	int request_transfer;			/* Bytes to transfer */
-	int actually_transferred;		/* Bytes actually transferred */
-	int buffer_size;			/* Size of our data buffer */
-	int b_count;				/* Missing/Available data on
-						   the current buffer */
-	struct request *rq;			/* The corresponding request */
-	u8 *buffer;				/* Data buffer */
-	u8 *current_position;			/* Pointer into above buffer */
-	void (*callback) (ide_drive_t *);	/* Called when this packet
-						   command is completed */
-	u8 pc_buffer[IDEFLOPPY_PC_BUFFER_SIZE];	/* Temporary buffer */
-	unsigned long flags;			/* Status/Action bit flags: long
-						   for set_bit */
-} idefloppy_pc_t;
-
-/* Packet command flag bits. */
-enum {
-	/* 1 when we prefer to use DMA if possible */
-	PC_FLAG_DMA_RECOMMENDED	= (1 << 0),
-	/* 1 while DMA in progress */
-	PC_FLAG_DMA_IN_PROGRESS	= (1 << 1),
-	/* 1 when encountered problem during DMA */
-	PC_FLAG_DMA_ERROR	= (1 << 2),
-	/* Data direction */
-	PC_FLAG_WRITING		= (1 << 3),
-	/* Suppress error reporting */
-	PC_FLAG_SUPPRESS_ERROR	= (1 << 4),
-};
-
 /* format capacities descriptor codes */
 #define CAPACITY_INVALID	0x00
 #define CAPACITY_UNFORMATTED	0x01
@@ -131,11 +97,11 @@
 	unsigned int	openers;	/* protected by BKL for now */
 
 	/* Current packet command */
-	idefloppy_pc_t *pc;
+	struct ide_atapi_pc *pc;
 	/* Last failed packet command */
-	idefloppy_pc_t *failed_pc;
+	struct ide_atapi_pc *failed_pc;
 	/* Packet command stack */
-	idefloppy_pc_t pc_stack[IDEFLOPPY_PC_STACK];
+	struct ide_atapi_pc pc_stack[IDEFLOPPY_PC_STACK];
 	/* Next free packet command storage space */
 	int pc_stack_index;
 	struct request rq_stack[IDEFLOPPY_PC_STACK];
@@ -195,32 +161,6 @@
 #define	IDEFLOPPY_ERROR_GENERAL		101
 
 /*
- * The following is used to format the general configuration word of the
- * ATAPI IDENTIFY DEVICE command.
- */
-struct idefloppy_id_gcw {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned packet_size		:2;	/* Packet Size */
-	unsigned reserved234		:3;	/* Reserved */
-	unsigned drq_type		:2;	/* Command packet DRQ type */
-	unsigned removable		:1;	/* Removable media */
-	unsigned device_type		:5;	/* Device type */
-	unsigned reserved13		:1;	/* Reserved */
-	unsigned protocol		:2;	/* Protocol type */
-#elif defined(__BIG_ENDIAN_BITFIELD)
-	unsigned protocol		:2;	/* Protocol type */
-	unsigned reserved13		:1;	/* Reserved */
-	unsigned device_type		:5;	/* Device type */
-	unsigned removable		:1;	/* Removable media */
-	unsigned drq_type		:2;	/* Command packet DRQ type */
-	unsigned reserved234		:3;	/* Reserved */
-	unsigned packet_size		:2;	/* Packet Size */
-#else
-#error "Bitfield endianness not defined! Check your byteorder.h"
-#endif
-};
-
-/*
  * Pages of the SELECT SENSE / MODE SENSE packet commands.
  * See SFF-8070i spec.
  */
@@ -256,27 +196,10 @@
 }
 
 /*
- * Too bad. The drive wants to send us data which we are not ready to accept.
- * Just throw it away.
- */
-static void idefloppy_discard_data(ide_drive_t *drive, unsigned int bcount)
-{
-	while (bcount--)
-		(void) HWIF(drive)->INB(IDE_DATA_REG);
-}
-
-static void idefloppy_write_zeros(ide_drive_t *drive, unsigned int bcount)
-{
-	while (bcount--)
-		HWIF(drive)->OUTB(0, IDE_DATA_REG);
-}
-
-
-/*
  * Used to finish servicing a request. For read/write requests, we will call
  * ide_end_request to pass to the next buffer.
  */
-static int idefloppy_do_end_request(ide_drive_t *drive, int uptodate, int nsecs)
+static int idefloppy_end_request(ide_drive_t *drive, int uptodate, int nsecs)
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
 	struct request *rq = HWGROUP(drive)->rq;
@@ -305,7 +228,7 @@
 	return 0;
 }
 
-static void ide_floppy_io_buffers(ide_drive_t *drive, idefloppy_pc_t *pc,
+static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 				  unsigned int bcount, int direction)
 {
 	struct request *rq = pc->rq;
@@ -333,26 +256,26 @@
 		done += count;
 	}
 
-	idefloppy_do_end_request(drive, 1, done >> 9);
+	idefloppy_end_request(drive, 1, done >> 9);
 
 	if (bcount) {
 		printk(KERN_ERR "%s: leftover data in %s, bcount == %d\n",
 				drive->name, __func__, bcount);
 		if (direction)
-			idefloppy_write_zeros(drive, bcount);
+			ide_atapi_write_zeros(drive, bcount);
 		else
-			idefloppy_discard_data(drive, bcount);
-
+			ide_atapi_discard_data(drive, bcount);
 	}
 }
 
-static void idefloppy_update_buffers(ide_drive_t *drive, idefloppy_pc_t *pc)
+static void idefloppy_update_buffers(ide_drive_t *drive,
+				struct ide_atapi_pc *pc)
 {
 	struct request *rq = pc->rq;
 	struct bio *bio = rq->bio;
 
 	while ((bio = rq->bio) != NULL)
-		idefloppy_do_end_request(drive, 1, 0);
+		idefloppy_end_request(drive, 1, 0);
 }
 
 /*
@@ -360,7 +283,7 @@
  * the current request so that it will be processed immediately, on the next
  * pass through the driver.
  */
-static void idefloppy_queue_pc_head(ide_drive_t *drive, idefloppy_pc_t *pc,
+static void idefloppy_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
 		struct request *rq)
 {
 	struct ide_floppy_obj *floppy = drive->driver_data;
@@ -372,7 +295,7 @@
 	(void) ide_do_drive_cmd(drive, rq, ide_preempt);
 }
 
-static idefloppy_pc_t *idefloppy_next_pc_storage(ide_drive_t *drive)
+static struct ide_atapi_pc *idefloppy_next_pc_storage(ide_drive_t *drive)
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
 
@@ -393,7 +316,7 @@
 static void idefloppy_request_sense_callback(ide_drive_t *drive)
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
-	u8 *buf = floppy->pc->buffer;
+	u8 *buf = floppy->pc->buf;
 
 	debug_log("Reached %s\n", __func__);
 
@@ -418,11 +341,11 @@
 					floppy->ascq);
 
 
-		idefloppy_do_end_request(drive, 1, 0);
+		idefloppy_end_request(drive, 1, 0);
 	} else {
 		printk(KERN_ERR "Error in REQUEST SENSE itself - Aborting"
 				" request!\n");
-		idefloppy_do_end_request(drive, 0, 0);
+		idefloppy_end_request(drive, 0, 0);
 	}
 }
 
@@ -433,27 +356,27 @@
 
 	debug_log("Reached %s\n", __func__);
 
-	idefloppy_do_end_request(drive, floppy->pc->error ? 0 : 1, 0);
+	idefloppy_end_request(drive, floppy->pc->error ? 0 : 1, 0);
 }
 
-static void idefloppy_init_pc(idefloppy_pc_t *pc)
+static void idefloppy_init_pc(struct ide_atapi_pc *pc)
 {
 	memset(pc->c, 0, 12);
 	pc->retries = 0;
 	pc->flags = 0;
-	pc->request_transfer = 0;
-	pc->buffer = pc->pc_buffer;
-	pc->buffer_size = IDEFLOPPY_PC_BUFFER_SIZE;
-	pc->callback = &idefloppy_pc_callback;
+	pc->req_xfer = 0;
+	pc->buf = pc->pc_buf;
+	pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE;
+	pc->idefloppy_callback = &idefloppy_pc_callback;
 }
 
-static void idefloppy_create_request_sense_cmd(idefloppy_pc_t *pc)
+static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc)
 {
 	idefloppy_init_pc(pc);
 	pc->c[0] = GPCMD_REQUEST_SENSE;
 	pc->c[4] = 255;
-	pc->request_transfer = 18;
-	pc->callback = &idefloppy_request_sense_callback;
+	pc->req_xfer = 18;
+	pc->idefloppy_callback = &idefloppy_request_sense_callback;
 }
 
 /*
@@ -462,7 +385,7 @@
  */
 static void idefloppy_retry_pc(ide_drive_t *drive)
 {
-	idefloppy_pc_t *pc;
+	struct ide_atapi_pc *pc;
 	struct request *rq;
 
 	(void)ide_read_error(drive);
@@ -477,7 +400,7 @@
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
 	ide_hwif_t *hwif = drive->hwif;
-	idefloppy_pc_t *pc = floppy->pc;
+	struct ide_atapi_pc *pc = floppy->pc;
 	struct request *rq = pc->rq;
 	xfer_func_t *xferfunc;
 	unsigned int temp;
@@ -494,7 +417,7 @@
 					rq_data_dir(rq) ? "write" : "read");
 			pc->flags |= PC_FLAG_DMA_ERROR;
 		} else {
-			pc->actually_transferred = pc->request_transfer;
+			pc->xferred = pc->req_xfer;
 			idefloppy_update_buffers(drive, pc);
 		}
 		debug_log("DMA finished\n");
@@ -506,7 +429,7 @@
 	/* No more interrupts */
 	if ((stat & DRQ_STAT) == 0) {
 		debug_log("Packet command completed, %d bytes transferred\n",
-				pc->actually_transferred);
+				pc->xferred);
 		pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
 
 		local_irq_enable_in_hardirq();
@@ -529,7 +452,7 @@
 		if (floppy->failed_pc == pc)
 			floppy->failed_pc = NULL;
 		/* Command finished - Call the callback function */
-		pc->callback(drive);
+		pc->idefloppy_callback(drive);
 		return ide_stopped;
 	}
 
@@ -542,10 +465,10 @@
 	}
 
 	/* Get the number of bytes to transfer */
-	bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) |
-		  hwif->INB(IDE_BCOUNTL_REG);
+	bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
+		  hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
 	/* on this interrupt */
-	ireason = hwif->INB(IDE_IREASON_REG);
+	ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
 
 	if (ireason & CD) {
 		printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__);
@@ -561,13 +484,13 @@
 	}
 	if (!(pc->flags & PC_FLAG_WRITING)) {
 		/* Reading - Check that we have enough space */
-		temp = pc->actually_transferred + bcount;
-		if (temp > pc->request_transfer) {
-			if (temp > pc->buffer_size) {
+		temp = pc->xferred + bcount;
+		if (temp > pc->req_xfer) {
+			if (temp > pc->buf_size) {
 				printk(KERN_ERR "ide-floppy: The floppy wants "
 					"to send us more data than expected "
 					"- discarding data\n");
-				idefloppy_discard_data(drive, bcount);
+				ide_atapi_discard_data(drive, bcount);
 
 				ide_set_handler(drive,
 						&idefloppy_pc_intr,
@@ -584,15 +507,15 @@
 	else
 		xferfunc = hwif->atapi_input_bytes;
 
-	if (pc->buffer)
-		xferfunc(drive, pc->current_position, bcount);
+	if (pc->buf)
+		xferfunc(drive, pc->cur_pos, bcount);
 	else
 		ide_floppy_io_buffers(drive, pc, bcount,
 				      !!(pc->flags & PC_FLAG_WRITING));
 
 	/* Update the current position */
-	pc->actually_transferred += bcount;
-	pc->current_position += bcount;
+	pc->xferred += bcount;
+	pc->cur_pos += bcount;
 
 	/* And set the interrupt handler again */
 	ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL);
@@ -606,6 +529,7 @@
  */
 static ide_startstop_t idefloppy_transfer_pc(ide_drive_t *drive)
 {
+	ide_hwif_t *hwif = drive->hwif;
 	ide_startstop_t startstop;
 	idefloppy_floppy_t *floppy = drive->driver_data;
 	u8 ireason;
@@ -615,7 +539,7 @@
 				"initiated yet DRQ isn't asserted\n");
 		return startstop;
 	}
-	ireason = drive->hwif->INB(IDE_IREASON_REG);
+	ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
 	if ((ireason & CD) == 0 || (ireason & IO)) {
 		printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while "
 				"issuing a packet command\n");
@@ -652,6 +576,7 @@
 
 static ide_startstop_t idefloppy_transfer_pc1(ide_drive_t *drive)
 {
+	ide_hwif_t *hwif = drive->hwif;
 	idefloppy_floppy_t *floppy = drive->driver_data;
 	ide_startstop_t startstop;
 	u8 ireason;
@@ -661,7 +586,7 @@
 				"initiated yet DRQ isn't asserted\n");
 		return startstop;
 	}
-	ireason = drive->hwif->INB(IDE_IREASON_REG);
+	ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
 	if ((ireason & CD) == 0 || (ireason & IO)) {
 		printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) "
 				"while issuing a packet command\n");
@@ -682,7 +607,7 @@
 }
 
 static void ide_floppy_report_error(idefloppy_floppy_t *floppy,
-				    idefloppy_pc_t *pc)
+				    struct ide_atapi_pc *pc)
 {
 	/* supress error messages resulting from Medium not present */
 	if (floppy->sense_key == 0x02 &&
@@ -698,7 +623,7 @@
 }
 
 static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
-		idefloppy_pc_t *pc)
+		struct ide_atapi_pc *pc)
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
 	ide_hwif_t *hwif = drive->hwif;
@@ -719,7 +644,7 @@
 		pc->error = IDEFLOPPY_ERROR_GENERAL;
 
 		floppy->failed_pc = NULL;
-		pc->callback(drive);
+		pc->idefloppy_callback(drive);
 		return ide_stopped;
 	}
 
@@ -727,9 +652,9 @@
 
 	pc->retries++;
 	/* We haven't transferred any data yet */
-	pc->actually_transferred = 0;
-	pc->current_position = pc->buffer;
-	bcount = min(pc->request_transfer, 63 * 1024);
+	pc->xferred = 0;
+	pc->cur_pos = pc->buf;
+	bcount = min(pc->req_xfer, 63 * 1024);
 
 	if (pc->flags & PC_FLAG_DMA_ERROR) {
 		pc->flags &= ~PC_FLAG_DMA_ERROR;
@@ -757,7 +682,7 @@
 		/* immediate */
 		pkt_xfer_routine = &idefloppy_transfer_pc;
 	}
-	
+
 	if (floppy->flags & IDEFLOPPY_FLAG_DRQ_INTERRUPT) {
 		/* Issue the packet command */
 		ide_execute_command(drive, WIN_PACKETCMD,
@@ -767,7 +692,7 @@
 		return ide_started;
 	} else {
 		/* Issue the packet command */
-		HWIF(drive)->OUTB(WIN_PACKETCMD, IDE_COMMAND_REG);
+		hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
 		return (*pkt_xfer_routine) (drive);
 	}
 }
@@ -776,11 +701,11 @@
 {
 	debug_log("Reached %s\n", __func__);
 
-	idefloppy_do_end_request(drive, 1, 0);
+	idefloppy_end_request(drive, 1, 0);
 	return;
 }
 
-static void idefloppy_create_prevent_cmd(idefloppy_pc_t *pc, int prevent)
+static void idefloppy_create_prevent_cmd(struct ide_atapi_pc *pc, int prevent)
 {
 	debug_log("creating prevent removal command, prevent = %d\n", prevent);
 
@@ -789,39 +714,39 @@
 	pc->c[4] = prevent;
 }
 
-static void idefloppy_create_read_capacity_cmd(idefloppy_pc_t *pc)
+static void idefloppy_create_read_capacity_cmd(struct ide_atapi_pc *pc)
 {
 	idefloppy_init_pc(pc);
 	pc->c[0] = GPCMD_READ_FORMAT_CAPACITIES;
 	pc->c[7] = 255;
 	pc->c[8] = 255;
-	pc->request_transfer = 255;
+	pc->req_xfer = 255;
 }
 
-static void idefloppy_create_format_unit_cmd(idefloppy_pc_t *pc, int b, int l,
-					      int flags)
+static void idefloppy_create_format_unit_cmd(struct ide_atapi_pc *pc, int b,
+		int l, int flags)
 {
 	idefloppy_init_pc(pc);
 	pc->c[0] = GPCMD_FORMAT_UNIT;
 	pc->c[1] = 0x17;
 
-	memset(pc->buffer, 0, 12);
-	pc->buffer[1] = 0xA2;
+	memset(pc->buf, 0, 12);
+	pc->buf[1] = 0xA2;
 	/* Default format list header, u8 1: FOV/DCRT/IMM bits set */
 
 	if (flags & 1)				/* Verify bit on... */
-		pc->buffer[1] ^= 0x20;		/* ... turn off DCRT bit */
-	pc->buffer[3] = 8;
+		pc->buf[1] ^= 0x20;		/* ... turn off DCRT bit */
+	pc->buf[3] = 8;
 
-	put_unaligned(cpu_to_be32(b), (unsigned int *)(&pc->buffer[4]));
-	put_unaligned(cpu_to_be32(l), (unsigned int *)(&pc->buffer[8]));
-	pc->buffer_size = 12;
+	put_unaligned(cpu_to_be32(b), (unsigned int *)(&pc->buf[4]));
+	put_unaligned(cpu_to_be32(l), (unsigned int *)(&pc->buf[8]));
+	pc->buf_size = 12;
 	pc->flags |= PC_FLAG_WRITING;
 }
 
 /* A mode sense command is used to "sense" floppy parameters. */
-static void idefloppy_create_mode_sense_cmd(idefloppy_pc_t *pc, u8 page_code,
-		u8 type)
+static void idefloppy_create_mode_sense_cmd(struct ide_atapi_pc *pc,
+		u8 page_code, u8 type)
 {
 	u16 length = 8; /* sizeof(Mode Parameter Header) = 8 Bytes */
 
@@ -842,24 +767,24 @@
 				"in create_mode_sense_cmd\n");
 	}
 	put_unaligned(cpu_to_be16(length), (u16 *) &pc->c[7]);
-	pc->request_transfer = length;
+	pc->req_xfer = length;
 }
 
-static void idefloppy_create_start_stop_cmd(idefloppy_pc_t *pc, int start)
+static void idefloppy_create_start_stop_cmd(struct ide_atapi_pc *pc, int start)
 {
 	idefloppy_init_pc(pc);
 	pc->c[0] = GPCMD_START_STOP_UNIT;
 	pc->c[4] = start;
 }
 
-static void idefloppy_create_test_unit_ready_cmd(idefloppy_pc_t *pc)
+static void idefloppy_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
 {
 	idefloppy_init_pc(pc);
 	pc->c[0] = GPCMD_TEST_UNIT_READY;
 }
 
 static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy,
-				    idefloppy_pc_t *pc, struct request *rq,
+				    struct ide_atapi_pc *pc, struct request *rq,
 				    unsigned long sector)
 {
 	int block = sector / floppy->bs_factor;
@@ -874,41 +799,41 @@
 	put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
 	put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
 
-	pc->callback = &idefloppy_rw_callback;
+	pc->idefloppy_callback = &idefloppy_rw_callback;
 	pc->rq = rq;
 	pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
 	if (rq->cmd_flags & REQ_RW)
 		pc->flags |= PC_FLAG_WRITING;
-	pc->buffer = NULL;
-	pc->request_transfer = pc->buffer_size = blocks * floppy->block_size;
+	pc->buf = NULL;
+	pc->req_xfer = pc->buf_size = blocks * floppy->block_size;
 	pc->flags |= PC_FLAG_DMA_RECOMMENDED;
 }
 
 static void idefloppy_blockpc_cmd(idefloppy_floppy_t *floppy,
-		idefloppy_pc_t *pc, struct request *rq)
+		struct ide_atapi_pc *pc, struct request *rq)
 {
 	idefloppy_init_pc(pc);
-	pc->callback = &idefloppy_rw_callback;
+	pc->idefloppy_callback = &idefloppy_rw_callback;
 	memcpy(pc->c, rq->cmd, sizeof(pc->c));
 	pc->rq = rq;
 	pc->b_count = rq->data_len;
 	if (rq->data_len && rq_data_dir(rq) == WRITE)
 		pc->flags |= PC_FLAG_WRITING;
-	pc->buffer = rq->data;
+	pc->buf = rq->data;
 	if (rq->bio)
 		pc->flags |= PC_FLAG_DMA_RECOMMENDED;
 	/*
 	 * possibly problematic, doesn't look like ide-floppy correctly
 	 * handled scattered requests if dma fails...
 	 */
-	pc->request_transfer = pc->buffer_size = rq->data_len;
+	pc->req_xfer = pc->buf_size = rq->data_len;
 }
 
 static ide_startstop_t idefloppy_do_request(ide_drive_t *drive,
 		struct request *rq, sector_t block_s)
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
-	idefloppy_pc_t *pc;
+	struct ide_atapi_pc *pc;
 	unsigned long block = (unsigned long)block_s;
 
 	debug_log("dev: %s, cmd_type: %x, errors: %d\n",
@@ -924,7 +849,7 @@
 		else
 			printk(KERN_ERR "ide-floppy: %s: I/O error\n",
 				drive->name);
-		idefloppy_do_end_request(drive, 0, 0);
+		idefloppy_end_request(drive, 0, 0);
 		return ide_stopped;
 	}
 	if (blk_fs_request(rq)) {
@@ -932,20 +857,20 @@
 		    (rq->nr_sectors % floppy->bs_factor)) {
 			printk(KERN_ERR "%s: unsupported r/w request size\n",
 					drive->name);
-			idefloppy_do_end_request(drive, 0, 0);
+			idefloppy_end_request(drive, 0, 0);
 			return ide_stopped;
 		}
 		pc = idefloppy_next_pc_storage(drive);
 		idefloppy_create_rw_cmd(floppy, pc, rq, block);
 	} else if (blk_special_request(rq)) {
-		pc = (idefloppy_pc_t *) rq->buffer;
+		pc = (struct ide_atapi_pc *) rq->buffer;
 	} else if (blk_pc_request(rq)) {
 		pc = idefloppy_next_pc_storage(drive);
 		idefloppy_blockpc_cmd(floppy, pc, rq);
 	} else {
 		blk_dump_rq_flags(rq,
 			"ide-floppy: unsupported command in queue");
-		idefloppy_do_end_request(drive, 0, 0);
+		idefloppy_end_request(drive, 0, 0);
 		return ide_stopped;
 	}
 
@@ -957,7 +882,7 @@
  * Add a special packet command request to the tail of the request queue,
  * and wait for it to be serviced.
  */
-static int idefloppy_queue_pc_tail(ide_drive_t *drive, idefloppy_pc_t *pc)
+static int idefloppy_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
 {
 	struct ide_floppy_obj *floppy = drive->driver_data;
 	struct request rq;
@@ -977,7 +902,7 @@
 static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive)
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
-	idefloppy_pc_t pc;
+	struct ide_atapi_pc pc;
 	u8 *page;
 	int capacity, lba_capacity;
 	u16 transfer_rate, sector_size, cyls, rpm;
@@ -991,16 +916,16 @@
 				" parameters\n");
 		return 1;
 	}
-	floppy->wp = !!(pc.buffer[3] & 0x80);
+	floppy->wp = !!(pc.buf[3] & 0x80);
 	set_disk_ro(floppy->disk, floppy->wp);
-	page = &pc.buffer[8];
+	page = &pc.buf[8];
 
-	transfer_rate = be16_to_cpu(*(u16 *)&pc.buffer[8 + 2]);
-	sector_size   = be16_to_cpu(*(u16 *)&pc.buffer[8 + 6]);
-	cyls          = be16_to_cpu(*(u16 *)&pc.buffer[8 + 8]);
-	rpm           = be16_to_cpu(*(u16 *)&pc.buffer[8 + 28]);
-	heads         = pc.buffer[8 + 4];
-	sectors       = pc.buffer[8 + 5];
+	transfer_rate = be16_to_cpu(*(u16 *)&pc.buf[8 + 2]);
+	sector_size   = be16_to_cpu(*(u16 *)&pc.buf[8 + 6]);
+	cyls          = be16_to_cpu(*(u16 *)&pc.buf[8 + 8]);
+	rpm           = be16_to_cpu(*(u16 *)&pc.buf[8 + 28]);
+	heads         = pc.buf[8 + 4];
+	sectors       = pc.buf[8 + 5];
 
 	capacity = cyls * heads * sectors * sector_size;
 
@@ -1029,7 +954,7 @@
 static int idefloppy_get_sfrp_bit(ide_drive_t *drive)
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
-	idefloppy_pc_t pc;
+	struct ide_atapi_pc pc;
 
 	floppy->srfp = 0;
 	idefloppy_create_mode_sense_cmd(&pc, IDEFLOPPY_CAPABILITIES_PAGE,
@@ -1039,7 +964,7 @@
 	if (idefloppy_queue_pc_tail(drive, &pc))
 		return 1;
 
-	floppy->srfp = pc.buffer[8 + 2] & 0x40;
+	floppy->srfp = pc.buf[8 + 2] & 0x40;
 	return (0);
 }
 
@@ -1050,7 +975,7 @@
 static int ide_floppy_get_capacity(ide_drive_t *drive)
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
-	idefloppy_pc_t pc;
+	struct ide_atapi_pc pc;
 	u8 *cap_desc;
 	u8 header_len, desc_cnt;
 	int i, rc = 1, blocks, length;
@@ -1066,15 +991,15 @@
 		printk(KERN_ERR "ide-floppy: Can't get floppy parameters\n");
 		return 1;
 	}
-	header_len = pc.buffer[3];
-	cap_desc = &pc.buffer[4];
+	header_len = pc.buf[3];
+	cap_desc = &pc.buf[4];
 	desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */
 
 	for (i = 0; i < desc_cnt; i++) {
 		unsigned int desc_start = 4 + i*8;
 
-		blocks = be32_to_cpu(*(u32 *)&pc.buffer[desc_start]);
-		length = be16_to_cpu(*(u16 *)&pc.buffer[desc_start + 6]);
+		blocks = be32_to_cpu(*(u32 *)&pc.buf[desc_start]);
+		length = be16_to_cpu(*(u16 *)&pc.buf[desc_start + 6]);
 
 		debug_log("Descriptor %d: %dkB, %d blocks, %d sector size\n",
 				i, blocks * length / 1024, blocks, length);
@@ -1085,7 +1010,7 @@
 		 * the code below is valid only for the 1st descriptor, ie i=0
 		 */
 
-		switch (pc.buffer[desc_start + 4] & 0x03) {
+		switch (pc.buf[desc_start + 4] & 0x03) {
 		/* Clik! drive returns this instead of CAPACITY_CURRENT */
 		case CAPACITY_UNFORMATTED:
 			if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE))
@@ -1130,7 +1055,7 @@
 			break;
 		}
 		debug_log("Descriptor 0 Code: %d\n",
-			  pc.buffer[desc_start + 4] & 0x03);
+			  pc.buf[desc_start + 4] & 0x03);
 	}
 
 	/* Clik! disk does not support get_flexible_disk_page */
@@ -1162,7 +1087,7 @@
 
 static int ide_floppy_get_format_capacities(ide_drive_t *drive, int __user *arg)
 {
-	idefloppy_pc_t pc;
+	struct ide_atapi_pc pc;
 	u8 header_len, desc_cnt;
 	int i, blocks, length, u_array_size, u_index;
 	int __user *argp;
@@ -1178,7 +1103,7 @@
 		printk(KERN_ERR "ide-floppy: Can't get floppy parameters\n");
 		return (-EIO);
 	}
-	header_len = pc.buffer[3];
+	header_len = pc.buf[3];
 	desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */
 
 	u_index = 0;
@@ -1195,8 +1120,8 @@
 		if (u_index >= u_array_size)
 			break;	/* User-supplied buffer too small */
 
-		blocks = be32_to_cpu(*(u32 *)&pc.buffer[desc_start]);
-		length = be16_to_cpu(*(u16 *)&pc.buffer[desc_start + 6]);
+		blocks = be32_to_cpu(*(u32 *)&pc.buf[desc_start]);
+		length = be16_to_cpu(*(u16 *)&pc.buf[desc_start + 6]);
 
 		if (put_user(blocks, argp))
 			return(-EFAULT);
@@ -1227,7 +1152,7 @@
 static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
 {
 	idefloppy_floppy_t *floppy = drive->driver_data;
-	idefloppy_pc_t pc;
+	struct ide_atapi_pc pc;
 	int progress_indication = 0x10000;
 
 	if (floppy->srfp) {
@@ -1271,33 +1196,39 @@
  */
 static int idefloppy_identify_device(ide_drive_t *drive, struct hd_driveid *id)
 {
-	struct idefloppy_id_gcw gcw;
+	u8 gcw[2];
+	u8 device_type, protocol, removable, drq_type, packet_size;
 
 	*((u16 *) &gcw) = id->config;
 
+	device_type =  gcw[1] & 0x1F;
+	removable   = (gcw[0] & 0x80) >> 7;
+	protocol    = (gcw[1] & 0xC0) >> 6;
+	drq_type    = (gcw[0] & 0x60) >> 5;
+	packet_size =  gcw[0] & 0x03;
+
 #ifdef CONFIG_PPC
 	/* kludge for Apple PowerBook internal zip */
-	if ((gcw.device_type == 5) &&
-	    !strstr(id->model, "CD-ROM") &&
-	    strstr(id->model, "ZIP"))
-		gcw.device_type = 0;
+	if (device_type == 5 &&
+	    !strstr(id->model, "CD-ROM") && strstr(id->model, "ZIP"))
+		device_type = 0;
 #endif
 
-	if (gcw.protocol != 2)
+	if (protocol != 2)
 		printk(KERN_ERR "ide-floppy: Protocol (0x%02x) is not ATAPI\n",
-				gcw.protocol);
-	else if (gcw.device_type != 0)
+			protocol);
+	else if (device_type != 0)
 		printk(KERN_ERR "ide-floppy: Device type (0x%02x) is not set "
-				"to floppy\n", gcw.device_type);
-	else if (!gcw.removable)
+				"to floppy\n", device_type);
+	else if (!removable)
 		printk(KERN_ERR "ide-floppy: The removable flag is not set\n");
-	else if (gcw.drq_type == 3) {
+	else if (drq_type == 3)
 		printk(KERN_ERR "ide-floppy: Sorry, DRQ type (0x%02x) not "
-				"supported\n", gcw.drq_type);
-	} else if (gcw.packet_size != 0) {
+				"supported\n", drq_type);
+	else if (packet_size != 0)
 		printk(KERN_ERR "ide-floppy: Packet size (0x%02x) is not 12 "
-				"bytes long\n", gcw.packet_size);
-	} else
+				"bytes\n", packet_size);
+	else
 		return 1;
 	return 0;
 }
@@ -1322,11 +1253,12 @@
 
 static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
 {
-	struct idefloppy_id_gcw gcw;
+	u8 gcw[2];
 
 	*((u16 *) &gcw) = drive->id->config;
 	floppy->pc = floppy->pc_stack;
-	if (gcw.drq_type == 1)
+
+	if (((gcw[0] & 0x60) >> 5) == 1)
 		floppy->flags |= IDEFLOPPY_FLAG_DRQ_INTERRUPT;
 	/*
 	 * We used to check revisions here. At this point however I'm giving up.
@@ -1413,7 +1345,7 @@
 	.media			= ide_floppy,
 	.supports_dsc_overlap	= 0,
 	.do_request		= idefloppy_do_request,
-	.end_request		= idefloppy_do_end_request,
+	.end_request		= idefloppy_end_request,
 	.error			= __ide_error,
 	.abort			= __ide_abort,
 #ifdef CONFIG_IDE_PROC_FS
@@ -1426,7 +1358,7 @@
 	struct gendisk *disk = inode->i_bdev->bd_disk;
 	struct ide_floppy_obj *floppy;
 	ide_drive_t *drive;
-	idefloppy_pc_t pc;
+	struct ide_atapi_pc pc;
 	int ret = 0;
 
 	debug_log("Reached %s\n", __func__);
@@ -1489,7 +1421,7 @@
 	struct gendisk *disk = inode->i_bdev->bd_disk;
 	struct ide_floppy_obj *floppy = ide_floppy_g(disk);
 	ide_drive_t *drive = floppy->drive;
-	idefloppy_pc_t pc;
+	struct ide_atapi_pc pc;
 
 	debug_log("Reached %s\n", __func__);
 
@@ -1521,8 +1453,8 @@
 	return 0;
 }
 
-static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy, idefloppy_pc_t *pc,
-			       unsigned long arg, unsigned int cmd)
+static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy,
+		struct ide_atapi_pc *pc, unsigned long arg, unsigned int cmd)
 {
 	if (floppy->openers > 1)
 		return -EBUSY;
@@ -1551,7 +1483,7 @@
 				  int __user *arg)
 {
 	int blocks, length, flags, err = 0;
-	idefloppy_pc_t pc;
+	struct ide_atapi_pc pc;
 
 	if (floppy->openers > 1) {
 		/* Don't format if someone is using the disk */
@@ -1602,7 +1534,7 @@
 	struct block_device *bdev = inode->i_bdev;
 	struct ide_floppy_obj *floppy = ide_floppy_g(bdev->bd_disk);
 	ide_drive_t *drive = floppy->drive;
-	idefloppy_pc_t pc;
+	struct ide_atapi_pc pc;
 	void __user *argp = (void __user *)arg;
 	int err;
 
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 9ebec08..25fda0a 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -1,17 +1,89 @@
 /*
  * generic/default IDE host driver
  *
- * Copyright (C) 2004 Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2004, 2008 Bartlomiej Zolnierkiewicz
  * This code was split off from ide.c.  See it for original copyrights.
  *
  * May be copied or modified under the terms of the GNU General Public License.
  */
 
+/*
+ * For special cases new interfaces may be added using sysfs, i.e.
+ *
+ *	echo -n "0x168:0x36e:10" > /sys/class/ide_generic/add
+ *
+ * will add an interface using I/O ports 0x168-0x16f/0x36e and IRQ 10.
+ */
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/ide.h>
 
+#define DRV_NAME	"ide_generic"
+
+static ssize_t store_add(struct class *cls, const char *buf, size_t n)
+{
+	ide_hwif_t *hwif;
+	unsigned int base, ctl;
+	int irq;
+	hw_regs_t hw;
+	u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
+
+	if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
+		return -EINVAL;
+
+	hwif = ide_find_port(base);
+	if (hwif == NULL)
+		return -ENOENT;
+
+	memset(&hw, 0, sizeof(hw));
+	ide_std_init_ports(&hw, base, ctl);
+	hw.irq = irq;
+	hw.chipset = ide_generic;
+
+	ide_init_port_hw(hwif, &hw);
+
+	idx[0] = hwif->index;
+
+	ide_device_add(idx, NULL);
+
+	return n;
+};
+
+static struct class_attribute ide_generic_class_attrs[] = {
+	__ATTR(add, S_IWUSR, NULL, store_add),
+	__ATTR_NULL
+};
+
+static void ide_generic_class_release(struct class *cls)
+{
+	kfree(cls);
+}
+
+static int __init ide_generic_sysfs_init(void)
+{
+	struct class *cls;
+	int rc;
+
+	cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+	if (!cls)
+		return -ENOMEM;
+
+	cls->name = DRV_NAME;
+	cls->owner = THIS_MODULE;
+	cls->class_release = ide_generic_class_release;
+	cls->class_attrs = ide_generic_class_attrs;
+
+	rc = class_register(cls);
+	if (rc) {
+		kfree(cls);
+		return rc;
+	}
+
+	return 0;
+}
+
 static int __init ide_generic_init(void)
 {
 	u8 idx[MAX_HWIFS];
@@ -19,15 +91,26 @@
 
 	for (i = 0; i < MAX_HWIFS; i++) {
 		ide_hwif_t *hwif = &ide_hwifs[i];
+		unsigned long io_addr = ide_default_io_base(i);
+		hw_regs_t hw;
 
-		if (hwif->io_ports[IDE_DATA_OFFSET] && !hwif->present)
+		if (hwif->chipset == ide_unknown && io_addr) {
+			memset(&hw, 0, sizeof(hw));
+			ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
+			hw.irq = ide_default_irq(io_addr);
+			ide_init_port_hw(hwif, &hw);
+
 			idx[i] = i;
-		else
+		} else
 			idx[i] = 0xff;
 	}
 
 	ide_device_add_all(idx, NULL);
 
+	if (ide_generic_sysfs_init())
+		printk(KERN_ERR DRV_NAME ": failed to create ide_generic "
+					 "class\n");
+
 	return 0;
 }
 
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 7153796..31e5afa 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -301,39 +301,45 @@
 	struct ide_taskfile *tf = &task->tf;
 
 	if (task->tf_flags & IDE_TFLAG_IN_DATA) {
-		u16 data = hwif->INW(IDE_DATA_REG);
+		u16 data = hwif->INW(hwif->io_ports[IDE_DATA_OFFSET]);
 
 		tf->data = data & 0xff;
 		tf->hob_data = (data >> 8) & 0xff;
 	}
 
 	/* be sure we're looking at the low order bits */
-	hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
+	hwif->OUTB(drive->ctl & ~0x80, hwif->io_ports[IDE_CONTROL_OFFSET]);
 
 	if (task->tf_flags & IDE_TFLAG_IN_NSECT)
-		tf->nsect  = hwif->INB(IDE_NSECTOR_REG);
+		tf->nsect  = hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_IN_LBAL)
-		tf->lbal   = hwif->INB(IDE_SECTOR_REG);
+		tf->lbal   = hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_IN_LBAM)
-		tf->lbam   = hwif->INB(IDE_LCYL_REG);
+		tf->lbam   = hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_IN_LBAH)
-		tf->lbah   = hwif->INB(IDE_HCYL_REG);
+		tf->lbah   = hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
-		tf->device = hwif->INB(IDE_SELECT_REG);
+		tf->device = hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]);
 
 	if (task->tf_flags & IDE_TFLAG_LBA48) {
-		hwif->OUTB(drive->ctl | 0x80, IDE_CONTROL_REG);
+		hwif->OUTB(drive->ctl | 0x80,
+			   hwif->io_ports[IDE_CONTROL_OFFSET]);
 
 		if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
-			tf->hob_feature = hwif->INB(IDE_FEATURE_REG);
+			tf->hob_feature =
+				hwif->INB(hwif->io_ports[IDE_FEATURE_OFFSET]);
 		if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
-			tf->hob_nsect   = hwif->INB(IDE_NSECTOR_REG);
+			tf->hob_nsect   =
+				hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
 		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
-			tf->hob_lbal    = hwif->INB(IDE_SECTOR_REG);
+			tf->hob_lbal    =
+				hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
 		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
-			tf->hob_lbam    = hwif->INB(IDE_LCYL_REG);
+			tf->hob_lbam    =
+				hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
 		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
-			tf->hob_lbah    = hwif->INB(IDE_HCYL_REG);
+			tf->hob_lbah    =
+				hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
 	}
 }
 
@@ -448,7 +454,8 @@
 		if (err == ABRT_ERR) {
 			if (drive->select.b.lba &&
 			    /* some newer drives don't support WIN_SPECIFY */
-			    hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY)
+			    hwif->INB(hwif->io_ports[IDE_COMMAND_OFFSET]) ==
+				WIN_SPECIFY)
 				return ide_stopped;
 		} else if ((err & BAD_CRC) == BAD_CRC) {
 			/* UDMA crc error, just retry the operation */
@@ -500,7 +507,8 @@
 
 	if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
 		/* force an abort */
-		hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
+		hwif->OUTB(WIN_IDLEIMMEDIATE,
+			   hwif->io_ports[IDE_COMMAND_OFFSET]);
 
 	if (rq->errors >= ERROR_MAX) {
 		ide_kill_rq(drive, rq);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index e77cee0..4594421 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -158,9 +158,12 @@
 
 void SELECT_DRIVE (ide_drive_t *drive)
 {
-	if (HWIF(drive)->selectproc)
-		HWIF(drive)->selectproc(drive);
-	HWIF(drive)->OUTB(drive->select.all, IDE_SELECT_REG);
+	ide_hwif_t *hwif = drive->hwif;
+
+	if (hwif->selectproc)
+		hwif->selectproc(drive);
+
+	hwif->OUTB(drive->select.all, hwif->io_ports[IDE_SELECT_OFFSET]);
 }
 
 void SELECT_MASK (ide_drive_t *drive, int mask)
@@ -194,15 +197,18 @@
 	if (io_32bit) {
 		if (io_32bit & 2) {
 			unsigned long flags;
+
 			local_irq_save(flags);
-			ata_vlb_sync(drive, IDE_NSECTOR_REG);
-			hwif->INSL(IDE_DATA_REG, buffer, wcount);
+			ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]);
+			hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
+				   wcount);
 			local_irq_restore(flags);
 		} else
-			hwif->INSL(IDE_DATA_REG, buffer, wcount);
-	} else {
-		hwif->INSW(IDE_DATA_REG, buffer, wcount<<1);
-	}
+			hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
+				   wcount);
+	} else
+		hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], buffer,
+			   wcount << 1);
 }
 
 /*
@@ -216,15 +222,18 @@
 	if (io_32bit) {
 		if (io_32bit & 2) {
 			unsigned long flags;
+
 			local_irq_save(flags);
-			ata_vlb_sync(drive, IDE_NSECTOR_REG);
-			hwif->OUTSL(IDE_DATA_REG, buffer, wcount);
+			ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]);
+			hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
+				    wcount);
 			local_irq_restore(flags);
 		} else
-			hwif->OUTSL(IDE_DATA_REG, buffer, wcount);
-	} else {
-		hwif->OUTSW(IDE_DATA_REG, buffer, wcount<<1);
-	}
+			hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
+				    wcount);
+	} else
+		hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], buffer,
+			    wcount << 1);
 }
 
 /*
@@ -243,13 +252,15 @@
 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
 	if (MACH_IS_ATARI || MACH_IS_Q40) {
 		/* Atari has a byte-swapped IDE interface */
-		insw_swapw(IDE_DATA_REG, buffer, bytecount / 2);
+		insw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer,
+			   bytecount / 2);
 		return;
 	}
 #endif /* CONFIG_ATARI || CONFIG_Q40 */
 	hwif->ata_input_data(drive, buffer, bytecount / 4);
 	if ((bytecount & 0x03) >= 2)
-		hwif->INSW(IDE_DATA_REG, ((u8 *)buffer)+(bytecount & ~0x03), 1);
+		hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET],
+			   (u8 *)buffer + (bytecount & ~0x03), 1);
 }
 
 static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
@@ -260,13 +271,15 @@
 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
 	if (MACH_IS_ATARI || MACH_IS_Q40) {
 		/* Atari has a byte-swapped IDE interface */
-		outsw_swapw(IDE_DATA_REG, buffer, bytecount / 2);
+		outsw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer,
+			    bytecount / 2);
 		return;
 	}
 #endif /* CONFIG_ATARI || CONFIG_Q40 */
 	hwif->ata_output_data(drive, buffer, bytecount / 4);
 	if ((bytecount & 0x03) >= 2)
-		hwif->OUTSW(IDE_DATA_REG, ((u8*)buffer)+(bytecount & ~0x03), 1);
+		hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET],
+			    (u8 *)buffer + (bytecount & ~0x03), 1);
 }
 
 void default_hwif_transport(ide_hwif_t *hwif)
@@ -429,7 +442,7 @@
 	 * an interrupt with another pci card/device.  We make no assumptions
 	 * about possible isa-pnp and pci-pnp issues yet.
 	 */
-	if (IDE_CONTROL_REG)
+	if (hwif->io_ports[IDE_CONTROL_OFFSET])
 		stat = ide_read_altstatus(drive);
 	else
 		/* Note: this may clear a pending IRQ!! */
@@ -631,7 +644,7 @@
 	SELECT_MASK(drive, 1);
 	ide_set_irq(drive, 1);
 	msleep(50);
-	hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
+	hwif->OUTB(WIN_IDENTIFY, hwif->io_ports[IDE_COMMAND_OFFSET]);
 	timeout = jiffies + WAIT_WORSTCASE;
 	do {
 		if (time_after(jiffies, timeout)) {
@@ -718,9 +731,10 @@
 	SELECT_MASK(drive, 0);
 	udelay(1);
 	ide_set_irq(drive, 0);
-	hwif->OUTB(speed, IDE_NSECTOR_REG);
-	hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
-	hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
+	hwif->OUTB(speed, hwif->io_ports[IDE_NSECTOR_OFFSET]);
+	hwif->OUTB(SETFEATURES_XFER, hwif->io_ports[IDE_FEATURE_OFFSET]);
+	hwif->OUTBSYNC(drive, WIN_SETFEATURES,
+		       hwif->io_ports[IDE_COMMAND_OFFSET]);
 	if (drive->quirk_list == 2)
 		ide_set_irq(drive, 1);
 
@@ -828,7 +842,7 @@
 
 	spin_lock_irqsave(&ide_lock, flags);
 	__ide_set_handler(drive, handler, timeout, expiry);
-	hwif->OUTBSYNC(drive, cmd, IDE_COMMAND_REG);
+	hwif->OUTBSYNC(drive, cmd, hwif->io_ports[IDE_COMMAND_OFFSET]);
 	/*
 	 * Drive takes 400nS to respond, we must avoid the IRQ being
 	 * serviced before that.
@@ -1009,7 +1023,8 @@
 	unsigned long flags;
 	ide_hwif_t *hwif;
 	ide_hwgroup_t *hwgroup;
-	
+	u8 ctl;
+
 	spin_lock_irqsave(&ide_lock, flags);
 	hwif = HWIF(drive);
 	hwgroup = HWGROUP(drive);
@@ -1023,7 +1038,8 @@
 		pre_reset(drive);
 		SELECT_DRIVE(drive);
 		udelay (20);
-		hwif->OUTBSYNC(drive, WIN_SRST, IDE_COMMAND_REG);
+		hwif->OUTBSYNC(drive, WIN_SRST,
+			       hwif->io_ports[IDE_COMMAND_OFFSET]);
 		ndelay(400);
 		hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
 		hwgroup->polling = 1;
@@ -1039,7 +1055,7 @@
 	for (unit = 0; unit < MAX_DRIVES; ++unit)
 		pre_reset(&hwif->drives[unit]);
 
-	if (!IDE_CONTROL_REG) {
+	if (hwif->io_ports[IDE_CONTROL_OFFSET] == 0) {
 		spin_unlock_irqrestore(&ide_lock, flags);
 		return ide_stopped;
 	}
@@ -1054,16 +1070,14 @@
 	 * recover from reset very quickly, saving us the first 50ms wait time.
 	 */
 	/* set SRST and nIEN */
-	hwif->OUTBSYNC(drive, drive->ctl|6,IDE_CONTROL_REG);
+	hwif->OUTBSYNC(drive, drive->ctl|6, hwif->io_ports[IDE_CONTROL_OFFSET]);
 	/* more than enough time */
 	udelay(10);
-	if (drive->quirk_list == 2) {
-		/* clear SRST and nIEN */
-		hwif->OUTBSYNC(drive, drive->ctl, IDE_CONTROL_REG);
-	} else {
-		/* clear SRST, leave nIEN */
-		hwif->OUTBSYNC(drive, drive->ctl|2, IDE_CONTROL_REG);
-	}
+	if (drive->quirk_list == 2)
+		ctl = drive->ctl;	/* clear SRST and nIEN */
+	else
+		ctl = drive->ctl | 2;	/* clear SRST, leave nIEN */
+	hwif->OUTBSYNC(drive, ctl, hwif->io_ports[IDE_CONTROL_OFFSET]);
 	/* more than enough time */
 	udelay(10);
 	hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index c14bb53..34c2ad3 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -62,7 +62,7 @@
 	ide_hwif_t *hwif = pnp_get_drvdata(dev);
 
 	if (hwif)
-		ide_unregister(hwif->index, 0, 0);
+		ide_unregister(hwif->index);
 	else
 		printk(KERN_ERR "idepnp: Unable to remove device, please report.\n");
 }
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 47a1149..6a196c2 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -271,7 +271,7 @@
 	/* take a deep breath */
 	msleep(50);
 
-	if (IDE_CONTROL_REG) {
+	if (hwif->io_ports[IDE_CONTROL_OFFSET]) {
 		a = ide_read_altstatus(drive);
 		s = ide_read_status(drive);
 		if ((a ^ s) & ~INDEX_STAT)
@@ -289,10 +289,10 @@
 	 */
 	if ((cmd == WIN_PIDENTIFY))
 		/* disable dma & overlap */
-		hwif->OUTB(0, IDE_FEATURE_REG);
+		hwif->OUTB(0, hwif->io_ports[IDE_FEATURE_OFFSET]);
 
 	/* ask drive for ID */
-	hwif->OUTB(cmd, IDE_COMMAND_REG);
+	hwif->OUTB(cmd, hwif->io_ports[IDE_COMMAND_OFFSET]);
 
 	timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
 	timeout += jiffies;
@@ -353,7 +353,7 @@
 	 * interrupts during the identify-phase that
 	 * the irq handler isn't expecting.
 	 */
-	if (IDE_CONTROL_REG) {
+	if (hwif->io_ports[IDE_CONTROL_OFFSET]) {
 		if (!hwif->irq) {
 			autoprobe = 1;
 			cookie = probe_irq_on();
@@ -445,7 +445,8 @@
 	msleep(50);
 	SELECT_DRIVE(drive);
 	msleep(50);
-	if (hwif->INB(IDE_SELECT_REG) != drive->select.all && !drive->present) {
+	if (hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]) != drive->select.all &&
+	    !drive->present) {
 		if (drive->select.b.unit != 0) {
 			/* exit with drive0 selected */
 			SELECT_DRIVE(&hwif->drives[0]);
@@ -477,9 +478,11 @@
 			printk(KERN_ERR "%s: no response (status = 0x%02x), "
 					"resetting drive\n", drive->name, stat);
 			msleep(50);
-			hwif->OUTB(drive->select.all, IDE_SELECT_REG);
+			hwif->OUTB(drive->select.all,
+				   hwif->io_ports[IDE_SELECT_OFFSET]);
 			msleep(50);
-			hwif->OUTB(WIN_SRST, IDE_COMMAND_REG);
+			hwif->OUTB(WIN_SRST,
+				   hwif->io_ports[IDE_COMMAND_OFFSET]);
 			(void)ide_busy_sleep(hwif);
 			rc = try_to_identify(drive, cmd);
 		}
@@ -515,7 +518,7 @@
 	printk("%s: enabling %s -- ", hwif->name, drive->id->model);
 	SELECT_DRIVE(drive);
 	msleep(50);
-	hwif->OUTB(EXABYTE_ENABLE_NEST, IDE_COMMAND_REG);
+	hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports[IDE_COMMAND_OFFSET]);
 
 	if (ide_busy_sleep(hwif)) {
 		printk(KERN_CONT "failed (timeout)\n");
@@ -623,7 +626,7 @@
 	complete(&hwif->gendev_rel_comp);
 }
 
-static void ide_register_port(ide_hwif_t *hwif)
+static int ide_register_port(ide_hwif_t *hwif)
 {
 	int ret;
 
@@ -639,9 +642,23 @@
 	}
 	hwif->gendev.release = hwif_release_dev;
 	ret = device_register(&hwif->gendev);
-	if (ret < 0)
+	if (ret < 0) {
 		printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
 			__FUNCTION__, ret);
+		goto out;
+	}
+
+	get_device(&hwif->gendev);
+
+	hwif->portdev = device_create(ide_port_class, &hwif->gendev,
+				      MKDEV(0, 0), hwif->name);
+	if (IS_ERR(hwif->portdev)) {
+		ret = PTR_ERR(hwif->portdev);
+		device_unregister(&hwif->gendev);
+	}
+	dev_set_drvdata(hwif->portdev, hwif);
+out:
+	return ret;
 }
 
 /**
@@ -949,6 +966,7 @@
 {
 	int i;
 
+	mutex_lock(&ide_cfg_mtx);
 	for (i = 0; i < MAX_DRIVES; i++) {
 		ide_drive_t *drive = &hwif->drives[i];
 
@@ -963,6 +981,7 @@
 
 		ide_add_drive_to_hwgroup(drive);
 	}
+	mutex_unlock(&ide_cfg_mtx);
 }
 
 /*
@@ -1088,8 +1107,6 @@
 			hwif->sharing_irq ? "shar" : "serializ", match->name);
 	printk("\n");
 
-	ide_port_setup_devices(hwif);
-
 	mutex_unlock(&ide_cfg_mtx);
 	return 0;
 out_unlink:
@@ -1199,6 +1216,8 @@
 {
 	ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
 
+	ide_proc_unregister_device(drive);
+
 	spin_lock_irq(&ide_lock);
 	ide_remove_drive_from_hwgroup(drive);
 	kfree(drive->id);
@@ -1214,6 +1233,10 @@
 	complete(&drive->gendev_rel_comp);
 }
 
+#ifndef ide_default_irq
+#define ide_default_irq(irq) 0
+#endif
+
 static int hwif_init(ide_hwif_t *hwif)
 {
 	int old_irq;
@@ -1225,13 +1248,6 @@
 			return 0;
 		}
 	}
-#ifdef CONFIG_BLK_DEV_HD
-	if (hwif->irq == HD_IRQ && hwif->io_ports[IDE_DATA_OFFSET] != HD_DATA) {
-		printk("%s: CANNOT SHARE IRQ WITH OLD "
-			"HARDDISK DRIVER (hd.c)\n", hwif->name);
-		return 0;
-	}
-#endif /* CONFIG_BLK_DEV_HD */
 
 	if (register_blkdev(hwif->major, hwif->name))
 		return 0;
@@ -1366,13 +1382,68 @@
 	/* call chipset specific routine for each enabled port */
 	if (d->init_hwif)
 		d->init_hwif(hwif);
+}
 
+static void ide_port_cable_detect(ide_hwif_t *hwif)
+{
 	if (hwif->cable_detect && (hwif->ultra_mask & 0x78)) {
 		if (hwif->cbl != ATA_CBL_PATA40_SHORT)
 			hwif->cbl = hwif->cable_detect(hwif);
 	}
 }
 
+static ssize_t store_delete_devices(struct device *portdev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t n)
+{
+	ide_hwif_t *hwif = dev_get_drvdata(portdev);
+
+	if (strncmp(buf, "1", n))
+		return -EINVAL;
+
+	ide_port_unregister_devices(hwif);
+
+	return n;
+};
+
+static DEVICE_ATTR(delete_devices, S_IWUSR, NULL, store_delete_devices);
+
+static ssize_t store_scan(struct device *portdev,
+			  struct device_attribute *attr,
+			  const char *buf, size_t n)
+{
+	ide_hwif_t *hwif = dev_get_drvdata(portdev);
+
+	if (strncmp(buf, "1", n))
+		return -EINVAL;
+
+	ide_port_unregister_devices(hwif);
+	ide_port_scan(hwif);
+
+	return n;
+};
+
+static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
+
+static struct device_attribute *ide_port_attrs[] = {
+	&dev_attr_delete_devices,
+	&dev_attr_scan,
+	NULL
+};
+
+static int ide_sysfs_register_port(ide_hwif_t *hwif)
+{
+	int i, rc;
+
+	for (i = 0; ide_port_attrs[i]; i++) {
+		rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
+		if (rc)
+			break;
+	}
+
+	return rc;
+}
+
 int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
 {
 	ide_hwif_t *hwif, *mate = NULL;
@@ -1394,6 +1465,7 @@
 		mate = (i & 1) ? NULL : hwif;
 
 		ide_init_port(hwif, i & 1, d);
+		ide_port_cable_detect(hwif);
 		ide_port_init_devices(hwif);
 	}
 
@@ -1441,6 +1513,8 @@
 			continue;
 		}
 
+		ide_port_setup_devices(hwif);
+
 		ide_acpi_init(hwif);
 		ide_acpi_port_init_devices(hwif);
 	}
@@ -1452,8 +1526,7 @@
 		hwif = &ide_hwifs[idx[i]];
 
 		if (hwif->present) {
-			if (hwif->chipset == ide_unknown ||
-			    hwif->chipset == ide_forced)
+			if (hwif->chipset == ide_unknown)
 				hwif->chipset = ide_generic;
 			hwif_register_devices(hwif);
 		}
@@ -1466,6 +1539,7 @@
 		hwif = &ide_hwifs[idx[i]];
 
 		if (hwif->present) {
+			ide_sysfs_register_port(hwif);
 			ide_proc_register_port(hwif);
 			ide_proc_port_register_devices(hwif);
 		}
@@ -1486,3 +1560,21 @@
 	return ide_device_add_all(idx_all, d);
 }
 EXPORT_SYMBOL_GPL(ide_device_add);
+
+void ide_port_scan(ide_hwif_t *hwif)
+{
+	ide_port_cable_detect(hwif);
+	ide_port_init_devices(hwif);
+
+	if (ide_probe_port(hwif) < 0)
+		return;
+
+	hwif->present = 1;
+
+	ide_port_tune_devices(hwif);
+	ide_acpi_port_init_devices(hwif);
+	ide_port_setup_devices(hwif);
+	hwif_register_devices(hwif);
+	ide_proc_port_register_devices(hwif);
+}
+EXPORT_SYMBOL_GPL(ide_port_scan);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index bab88ca..edd7f18 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -46,9 +46,6 @@
 	int		len;
 	const char	*name;
 
-	/*
-	 * Neither ide_unknown nor ide_forced should be set at this point.
-	 */
 	switch (hwif->chipset) {
 		case ide_generic:	name = "generic";	break;
 		case ide_pci:		name = "pci";		break;
@@ -764,27 +761,16 @@
 	}
 }
 
-static void destroy_proc_ide_device(ide_hwif_t *hwif, ide_drive_t *drive)
+void ide_proc_unregister_device(ide_drive_t *drive)
 {
 	if (drive->proc) {
 		ide_remove_proc_entries(drive->proc, generic_drive_entries);
 		remove_proc_entry(drive->name, proc_ide_root);
-		remove_proc_entry(drive->name, hwif->proc);
+		remove_proc_entry(drive->name, drive->hwif->proc);
 		drive->proc = NULL;
 	}
 }
 
-static void destroy_proc_ide_drives(ide_hwif_t *hwif)
-{
-	int	d;
-
-	for (d = 0; d < MAX_DRIVES; d++) {
-		ide_drive_t *drive = &hwif->drives[d];
-		if (drive->proc)
-			destroy_proc_ide_device(hwif, drive);
-	}
-}
-
 static ide_proc_entry_t hwif_entries[] = {
 	{ "channel",	S_IFREG|S_IRUGO,	proc_ide_read_channel,	NULL },
 	{ "mate",	S_IFREG|S_IRUGO,	proc_ide_read_mate,	NULL },
@@ -816,7 +802,6 @@
 void ide_proc_unregister_port(ide_hwif_t *hwif)
 {
 	if (hwif->proc) {
-		destroy_proc_ide_drives(hwif);
 		ide_remove_proc_entries(hwif->proc, hwif_entries);
 		remove_proc_entry(hwif->name, proc_ide_root);
 		hwif->proc = NULL;
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index 93d2e41..98888da 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -88,13 +88,8 @@
 	struct list_head *l, *n;
 
 	pre_init = 0;
-	if (!ide_scan_direction)
-		while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)))
-			ide_scan_pcidev(dev);
-	else
-		while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID,
-						     dev)))
-			ide_scan_pcidev(dev);
+	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)))
+		ide_scan_pcidev(dev);
 
 	/*
 	 *	Hand the drivers over to the PCI layer now we
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 0598ecf..f43fd07 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -181,49 +181,63 @@
 	char *b_data;
 };
 
-typedef struct idetape_packet_command_s {
-	/* Actual packet bytes */
-	u8 c[12];
-	/* On each retry, we increment retries */
-	int retries;
-	/* Error code */
-	int error;
-	/* Bytes to transfer */
-	int request_transfer;
-	/* Bytes actually transferred */
-	int actually_transferred;
-	/* Size of our data buffer */
-	int buffer_size;
-	struct idetape_bh *bh;
-	char *b_data;
-	int b_count;
-	/* Data buffer */
-	u8 *buffer;
-	/* Pointer into the above buffer */
-	u8 *current_position;
-	/* Called when this packet command is completed */
-	ide_startstop_t (*callback) (ide_drive_t *);
-	/* Temporary buffer */
-	u8 pc_buffer[IDETAPE_PC_BUFFER_SIZE];
-	/* Status/Action bit flags: long for set_bit */
-	unsigned long flags;
-} idetape_pc_t;
+/* Tape door status */
+#define DOOR_UNLOCKED			0
+#define DOOR_LOCKED			1
+#define DOOR_EXPLICITLY_LOCKED		2
+
+/* Some defines for the SPACE command */
+#define IDETAPE_SPACE_OVER_FILEMARK	1
+#define IDETAPE_SPACE_TO_EOD		3
+
+/* Some defines for the LOAD UNLOAD command */
+#define IDETAPE_LU_LOAD_MASK		1
+#define IDETAPE_LU_RETENSION_MASK	2
+#define IDETAPE_LU_EOT_MASK		4
 
 /*
- *	Packet command flag bits.
+ * Special requests for our block device strategy routine.
+ *
+ * In order to service a character device command, we add special requests to
+ * the tail of our block device request queue and wait for their completion.
  */
-/* Set when an error is considered normal - We won't retry */
-#define	PC_ABORT			0
-/* 1 When polling for DSC on a media access command */
-#define PC_WAIT_FOR_DSC			1
-/* 1 when we prefer to use DMA if possible */
-#define PC_DMA_RECOMMENDED		2
-/* 1 while DMA in progress */
-#define	PC_DMA_IN_PROGRESS		3
-/* 1 when encountered problem during DMA */
-#define	PC_DMA_ERROR			4
-/* Data direction */
-#define	PC_WRITING			5
+
+enum {
+	REQ_IDETAPE_PC1		= (1 << 0), /* packet command (first stage) */
+	REQ_IDETAPE_PC2		= (1 << 1), /* packet command (second stage) */
+	REQ_IDETAPE_READ	= (1 << 2),
+	REQ_IDETAPE_WRITE	= (1 << 3),
+};
+
+/* Error codes returned in rq->errors to the higher part of the driver. */
+#define IDETAPE_ERROR_GENERAL		101
+#define IDETAPE_ERROR_FILEMARK		102
+#define IDETAPE_ERROR_EOD		103
+
+/* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
+#define IDETAPE_BLOCK_DESCRIPTOR	0
+#define IDETAPE_CAPABILITIES_PAGE	0x2a
+
+/* Tape flag bits values. */
+enum {
+	IDETAPE_FLAG_IGNORE_DSC		= (1 << 0),
+	/* 0 When the tape position is unknown */
+	IDETAPE_FLAG_ADDRESS_VALID	= (1 <<	1),
+	/* Device already opened */
+	IDETAPE_FLAG_BUSY			= (1 << 2),
+	/* Error detected in a pipeline stage */
+	IDETAPE_FLAG_PIPELINE_ERR	= (1 <<	3),
+	/* Attempt to auto-detect the current user block size */
+	IDETAPE_FLAG_DETECT_BS		= (1 << 4),
+	/* Currently on a filemark */
+	IDETAPE_FLAG_FILEMARK		= (1 << 5),
+	/* DRQ interrupt device */
+	IDETAPE_FLAG_DRQ_INTERRUPT	= (1 << 6),
+	/* pipeline active */
+	IDETAPE_FLAG_PIPELINE_ACTIVE	= (1 << 7),
+	/* 0 = no tape is loaded, so we don't rewind after ejecting */
+	IDETAPE_FLAG_MEDIUM_PRESENT	= (1 << 8),
+};
 
 /* A pipeline stage. */
 typedef struct idetape_stage_s {
@@ -258,11 +272,11 @@
 	 *	retry, to get detailed information on what went wrong.
 	 */
 	/* Current packet command */
-	idetape_pc_t *pc;
+	struct ide_atapi_pc *pc;
 	/* Last failed packet command */
-	idetape_pc_t *failed_pc;
+	struct ide_atapi_pc *failed_pc;
 	/* Packet command stack */
-	idetape_pc_t pc_stack[IDETAPE_PC_STACK];
+	struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
 	/* Next free packet command storage space */
 	int pc_stack_index;
 	struct request rq_stack[IDETAPE_PC_STACK];
@@ -446,58 +460,6 @@
 	mutex_unlock(&idetape_ref_mutex);
 }
 
-/* Tape door status */
-#define DOOR_UNLOCKED			0
-#define DOOR_LOCKED			1
-#define DOOR_EXPLICITLY_LOCKED		2
-
-/*
- *	Tape flag bits values.
- */
-#define IDETAPE_IGNORE_DSC		0
-#define IDETAPE_ADDRESS_VALID		1	/* 0 When the tape position is unknown */
-#define IDETAPE_BUSY			2	/* Device already opened */
-#define IDETAPE_PIPELINE_ERROR		3	/* Error detected in a pipeline stage */
-#define IDETAPE_DETECT_BS		4	/* Attempt to auto-detect the current user block size */
-#define IDETAPE_FILEMARK		5	/* Currently on a filemark */
-#define IDETAPE_DRQ_INTERRUPT		6	/* DRQ interrupt device */
-#define IDETAPE_READ_ERROR		7
-#define IDETAPE_PIPELINE_ACTIVE		8	/* pipeline active */
-/* 0 = no tape is loaded, so we don't rewind after ejecting */
-#define IDETAPE_MEDIUM_PRESENT		9
-
-/* Some defines for the SPACE command */
-#define IDETAPE_SPACE_OVER_FILEMARK	1
-#define IDETAPE_SPACE_TO_EOD		3
-
-/* Some defines for the LOAD UNLOAD command */
-#define IDETAPE_LU_LOAD_MASK		1
-#define IDETAPE_LU_RETENSION_MASK	2
-#define IDETAPE_LU_EOT_MASK		4
-
-/*
- * Special requests for our block device strategy routine.
- *
- * In order to service a character device command, we add special requests to
- * the tail of our block device request queue and wait for their completion.
- */
-
-enum {
-	REQ_IDETAPE_PC1		= (1 << 0), /* packet command (first stage) */
-	REQ_IDETAPE_PC2		= (1 << 1), /* packet command (second stage) */
-	REQ_IDETAPE_READ	= (1 << 2),
-	REQ_IDETAPE_WRITE	= (1 << 3),
-};
-
-/* Error codes returned in rq->errors to the higher part of the driver. */
-#define	IDETAPE_ERROR_GENERAL		101
-#define	IDETAPE_ERROR_FILEMARK		102
-#define	IDETAPE_ERROR_EOD		103
-
-/* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
-#define IDETAPE_BLOCK_DESCRIPTOR	0
-#define	IDETAPE_CAPABILITIES_PAGE	0x2a
-
 /*
  * The variables below are used for the character device interface. Additional
  * state variables are defined in our ide_drive_t structure.
@@ -518,17 +480,7 @@
 	return tape;
 }
 
-/*
- * Too bad. The drive wants to send us data which we are not ready to accept.
- * Just throw it away.
- */
-static void idetape_discard_data(ide_drive_t *drive, unsigned int bcount)
-{
-	while (bcount--)
-		(void) HWIF(drive)->INB(IDE_DATA_REG);
-}
-
-static void idetape_input_buffers(ide_drive_t *drive, idetape_pc_t *pc,
+static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 				  unsigned int bcount)
 {
 	struct idetape_bh *bh = pc->bh;
@@ -538,7 +490,7 @@
 		if (bh == NULL) {
 			printk(KERN_ERR "ide-tape: bh == NULL in "
 				"idetape_input_buffers\n");
-			idetape_discard_data(drive, bcount);
+			ide_atapi_discard_data(drive, bcount);
 			return;
 		}
 		count = min(
@@ -557,7 +509,7 @@
 	pc->bh = bh;
 }
 
-static void idetape_output_buffers(ide_drive_t *drive, idetape_pc_t *pc,
+static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 				   unsigned int bcount)
 {
 	struct idetape_bh *bh = pc->bh;
@@ -585,13 +537,13 @@
 	}
 }
 
-static void idetape_update_buffers(idetape_pc_t *pc)
+static void idetape_update_buffers(struct ide_atapi_pc *pc)
 {
 	struct idetape_bh *bh = pc->bh;
 	int count;
-	unsigned int bcount = pc->actually_transferred;
+	unsigned int bcount = pc->xferred;
 
-	if (test_bit(PC_WRITING, &pc->flags))
+	if (pc->flags & PC_FLAG_WRITING)
 		return;
 	while (bcount) {
 		if (bh == NULL) {
@@ -614,7 +566,7 @@
  *	driver. A storage space for a maximum of IDETAPE_PC_STACK packet
  *	commands is allocated at initialization time.
  */
-static idetape_pc_t *idetape_next_pc_storage(ide_drive_t *drive)
+static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
 {
 	idetape_tape_t *tape = drive->driver_data;
 
@@ -649,14 +601,14 @@
 	return (&tape->rq_stack[tape->rq_stack_index++]);
 }
 
-static void idetape_init_pc(idetape_pc_t *pc)
+static void idetape_init_pc(struct ide_atapi_pc *pc)
 {
 	memset(pc->c, 0, 12);
 	pc->retries = 0;
 	pc->flags = 0;
-	pc->request_transfer = 0;
-	pc->buffer = pc->pc_buffer;
-	pc->buffer_size = IDETAPE_PC_BUFFER_SIZE;
+	pc->req_xfer = 0;
+	pc->buf = pc->pc_buf;
+	pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
 	pc->bh = NULL;
 	pc->b_data = NULL;
 }
@@ -668,7 +620,7 @@
 static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t *pc = tape->failed_pc;
+	struct ide_atapi_pc *pc = tape->failed_pc;
 
 	tape->sense_key = sense[2] & 0xF;
 	tape->asc       = sense[12];
@@ -677,9 +629,9 @@
 	debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
 		 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
 
-	/* Correct pc->actually_transferred by asking the tape.	 */
-	if (test_bit(PC_DMA_ERROR, &pc->flags)) {
-		pc->actually_transferred = pc->request_transfer -
+	/* Correct pc->xferred by asking the tape.	 */
+	if (pc->flags & PC_FLAG_DMA_ERROR) {
+		pc->xferred = pc->req_xfer -
 			tape->blk_size *
 			be32_to_cpu(get_unaligned((u32 *)&sense[3]));
 		idetape_update_buffers(pc);
@@ -697,27 +649,27 @@
 			/* don't report an error, everything's ok */
 			pc->error = 0;
 			/* don't retry read/write */
-			set_bit(PC_ABORT, &pc->flags);
+			pc->flags |= PC_FLAG_ABORT;
 		}
 	}
 	if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
 		pc->error = IDETAPE_ERROR_FILEMARK;
-		set_bit(PC_ABORT, &pc->flags);
+		pc->flags |= PC_FLAG_ABORT;
 	}
 	if (pc->c[0] == WRITE_6) {
 		if ((sense[2] & 0x40) || (tape->sense_key == 0xd
 		     && tape->asc == 0x0 && tape->ascq == 0x2)) {
 			pc->error = IDETAPE_ERROR_EOD;
-			set_bit(PC_ABORT, &pc->flags);
+			pc->flags |= PC_FLAG_ABORT;
 		}
 	}
 	if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
 		if (tape->sense_key == 8) {
 			pc->error = IDETAPE_ERROR_EOD;
-			set_bit(PC_ABORT, &pc->flags);
+			pc->flags |= PC_FLAG_ABORT;
 		}
-		if (!test_bit(PC_ABORT, &pc->flags) &&
-		    pc->actually_transferred)
+		if (!(pc->flags & PC_FLAG_ABORT) &&
+		    pc->xferred)
 			pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
 	}
 }
@@ -872,14 +824,16 @@
 		if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
 			remove_stage = 1;
 			if (error) {
-				set_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
+				set_bit(IDETAPE_FLAG_PIPELINE_ERR,
+					&tape->flags);
 				if (error == IDETAPE_ERROR_EOD)
 					idetape_abort_pipeline(drive,
 								active_stage);
 			}
 		} else if (rq->cmd[0] & REQ_IDETAPE_READ) {
 			if (error == IDETAPE_ERROR_EOD) {
-				set_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
+				set_bit(IDETAPE_FLAG_PIPELINE_ERR,
+					&tape->flags);
 				idetape_abort_pipeline(drive, active_stage);
 			}
 		}
@@ -912,7 +866,7 @@
 	if (remove_stage)
 		idetape_remove_stage_head(drive);
 	if (tape->active_data_rq == NULL)
-		clear_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
+		clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
 	spin_unlock_irqrestore(&tape->lock, flags);
 	return 0;
 }
@@ -924,7 +878,7 @@
 	debug_log(DBG_PROCS, "Enter %s\n", __func__);
 
 	if (!tape->pc->error) {
-		idetape_analyze_error(drive, tape->pc->buffer);
+		idetape_analyze_error(drive, tape->pc->buf);
 		idetape_end_request(drive, 1, 0);
 	} else {
 		printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
@@ -934,13 +888,13 @@
 	return ide_stopped;
 }
 
-static void idetape_create_request_sense_cmd(idetape_pc_t *pc)
+static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = REQUEST_SENSE;
 	pc->c[4] = 20;
-	pc->request_transfer = 20;
-	pc->callback = &idetape_request_sense_callback;
+	pc->req_xfer = 20;
+	pc->idetape_callback = &idetape_request_sense_callback;
 }
 
 static void idetape_init_rq(struct request *rq, u8 cmd)
@@ -965,7 +919,7 @@
  * handling functions should queue request to the lower level part and wait for
  * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
  */
-static void idetape_queue_pc_head(ide_drive_t *drive, idetape_pc_t *pc,
+static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
 				  struct request *rq)
 {
 	struct ide_tape_obj *tape = drive->driver_data;
@@ -984,14 +938,14 @@
 static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t *pc;
+	struct ide_atapi_pc *pc;
 	struct request *rq;
 
 	(void)ide_read_error(drive);
 	pc = idetape_next_pc_storage(drive);
 	rq = idetape_next_rq_storage(drive);
 	idetape_create_request_sense_cmd(pc);
-	set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
+	set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
 	idetape_queue_pc_head(drive, pc, rq);
 	return ide_stopped;
 }
@@ -1010,7 +964,7 @@
 	ide_stall_queue(drive, tape->dsc_poll_freq);
 }
 
-typedef void idetape_io_buf(ide_drive_t *, idetape_pc_t *, unsigned int);
+typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
 
 /*
  * This is the usual interrupt handler which will be called during a packet
@@ -1023,7 +977,7 @@
 {
 	ide_hwif_t *hwif = drive->hwif;
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t *pc = tape->pc;
+	struct ide_atapi_pc *pc = tape->pc;
 	xfer_func_t *xferfunc;
 	idetape_io_buf *iobuf;
 	unsigned int temp;
@@ -1038,7 +992,7 @@
 	/* Clear the interrupt */
 	stat = ide_read_status(drive);
 
-	if (test_bit(PC_DMA_IN_PROGRESS, &pc->flags)) {
+	if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
 		if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
 			/*
 			 * A DMA error is sometimes expected. For example,
@@ -1061,9 +1015,9 @@
 			 * data transfer will occur, but no DMA error.
 			 * (AS, 19 Apr 2001)
 			 */
-			set_bit(PC_DMA_ERROR, &pc->flags);
+			pc->flags |= PC_FLAG_DMA_ERROR;
 		} else {
-			pc->actually_transferred = pc->request_transfer;
+			pc->xferred = pc->req_xfer;
 			idetape_update_buffers(pc);
 		}
 		debug_log(DBG_PROCS, "DMA finished\n");
@@ -1073,9 +1027,9 @@
 	/* No more interrupts */
 	if ((stat & DRQ_STAT) == 0) {
 		debug_log(DBG_SENSE, "Packet command completed, %d bytes"
-				" transferred\n", pc->actually_transferred);
+				" transferred\n", pc->xferred);
 
-		clear_bit(PC_DMA_IN_PROGRESS, &pc->flags);
+		pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
 		local_irq_enable();
 
 #if SIMULATE_ERRORS
@@ -1088,7 +1042,7 @@
 #endif
 		if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
 			stat &= ~ERR_STAT;
-		if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) {
+		if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
 			/* Error detected */
 			debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
 
@@ -1104,7 +1058,7 @@
 			return idetape_retry_pc(drive);
 		}
 		pc->error = 0;
-		if (test_bit(PC_WAIT_FOR_DSC, &pc->flags) &&
+		if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
 		    (stat & SEEK_STAT) == 0) {
 			/* Media access command */
 			tape->dsc_polling_start = jiffies;
@@ -1117,9 +1071,11 @@
 		if (tape->failed_pc == pc)
 			tape->failed_pc = NULL;
 		/* Command finished - Call the callback function */
-		return pc->callback(drive);
+		return pc->idetape_callback(drive);
 	}
-	if (test_and_clear_bit(PC_DMA_IN_PROGRESS, &pc->flags)) {
+
+	if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
+		pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
 		printk(KERN_ERR "ide-tape: The tape wants to issue more "
 				"interrupts in DMA mode\n");
 		printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
@@ -1127,16 +1083,16 @@
 		return ide_do_reset(drive);
 	}
 	/* Get the number of bytes to transfer on this interrupt. */
-	bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) |
-		  hwif->INB(IDE_BCOUNTL_REG);
+	bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
+		  hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
 
-	ireason = hwif->INB(IDE_IREASON_REG);
+	ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
 
 	if (ireason & CD) {
 		printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
 		return ide_do_reset(drive);
 	}
-	if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) {
+	if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
 		/* Hopefully, we will never get here */
 		printk(KERN_ERR "ide-tape: We wanted to %s, ",
 				(ireason & IO) ? "Write" : "Read");
@@ -1144,15 +1100,15 @@
 				(ireason & IO) ? "Read" : "Write");
 		return ide_do_reset(drive);
 	}
-	if (!test_bit(PC_WRITING, &pc->flags)) {
+	if (!(pc->flags & PC_FLAG_WRITING)) {
 		/* Reading - Check that we have enough space */
-		temp = pc->actually_transferred + bcount;
-		if (temp > pc->request_transfer) {
-			if (temp > pc->buffer_size) {
+		temp = pc->xferred + bcount;
+		if (temp > pc->req_xfer) {
+			if (temp > pc->buf_size) {
 				printk(KERN_ERR "ide-tape: The tape wants to "
 					"send us more data than expected "
 					"- discarding data\n");
-				idetape_discard_data(drive, bcount);
+				ide_atapi_discard_data(drive, bcount);
 				ide_set_handler(drive, &idetape_pc_intr,
 						IDETAPE_WAIT_CMD, NULL);
 				return ide_started;
@@ -1170,11 +1126,11 @@
 	if (pc->bh)
 		iobuf(drive, pc, bcount);
 	else
-		xferfunc(drive, pc->current_position, bcount);
+		xferfunc(drive, pc->cur_pos, bcount);
 
 	/* Update the current position */
-	pc->actually_transferred += bcount;
-	pc->current_position += bcount;
+	pc->xferred += bcount;
+	pc->cur_pos += bcount;
 
 	debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
 			pc->c[0], bcount);
@@ -1224,7 +1180,7 @@
 {
 	ide_hwif_t *hwif = drive->hwif;
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t *pc = tape->pc;
+	struct ide_atapi_pc *pc = tape->pc;
 	int retries = 100;
 	ide_startstop_t startstop;
 	u8 ireason;
@@ -1234,12 +1190,12 @@
 				"yet DRQ isn't asserted\n");
 		return startstop;
 	}
-	ireason = hwif->INB(IDE_IREASON_REG);
+	ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
 	while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
 		printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
 				"a packet command, retrying\n");
 		udelay(100);
-		ireason = hwif->INB(IDE_IREASON_REG);
+		ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
 		if (retries == 0) {
 			printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
 					"issuing a packet command, ignoring\n");
@@ -1256,7 +1212,7 @@
 	ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
 #ifdef CONFIG_BLK_DEV_IDEDMA
 	/* Begin DMA, if necessary */
-	if (test_bit(PC_DMA_IN_PROGRESS, &pc->flags))
+	if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
 		hwif->dma_start(drive);
 #endif
 	/* Send the actual packet */
@@ -1264,7 +1220,8 @@
 	return ide_started;
 }
 
-static ide_startstop_t idetape_issue_pc(ide_drive_t *drive, idetape_pc_t *pc)
+static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
+		struct ide_atapi_pc *pc)
 {
 	ide_hwif_t *hwif = drive->hwif;
 	idetape_tape_t *tape = drive->driver_data;
@@ -1283,13 +1240,13 @@
 	tape->pc = pc;
 
 	if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
-	    test_bit(PC_ABORT, &pc->flags)) {
+		(pc->flags & PC_FLAG_ABORT)) {
 		/*
 		 * We will "abort" retrying a packet command in case legitimate
 		 * error code was received (crossing a filemark, or end of the
 		 * media, for example).
 		 */
-		if (!test_bit(PC_ABORT, &pc->flags)) {
+		if (!(pc->flags & PC_FLAG_ABORT)) {
 			if (!(pc->c[0] == TEST_UNIT_READY &&
 			      tape->sense_key == 2 && tape->asc == 4 &&
 			     (tape->ascq == 1 || tape->ascq == 8))) {
@@ -1304,36 +1261,38 @@
 			pc->error = IDETAPE_ERROR_GENERAL;
 		}
 		tape->failed_pc = NULL;
-		return pc->callback(drive);
+		return pc->idetape_callback(drive);
 	}
 	debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
 
 	pc->retries++;
 	/* We haven't transferred any data yet */
-	pc->actually_transferred = 0;
-	pc->current_position = pc->buffer;
+	pc->xferred = 0;
+	pc->cur_pos = pc->buf;
 	/* Request to transfer the entire buffer at once */
-	bcount = pc->request_transfer;
+	bcount = pc->req_xfer;
 
-	if (test_and_clear_bit(PC_DMA_ERROR, &pc->flags)) {
+	if (pc->flags & PC_FLAG_DMA_ERROR) {
+		pc->flags &= ~PC_FLAG_DMA_ERROR;
 		printk(KERN_WARNING "ide-tape: DMA disabled, "
 				"reverting to PIO\n");
 		ide_dma_off(drive);
 	}
-	if (test_bit(PC_DMA_RECOMMENDED, &pc->flags) && drive->using_dma)
+	if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
 		dma_ok = !hwif->dma_setup(drive);
 
 	ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
 			   IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
 
-	if (dma_ok)			/* Will begin DMA later */
-		set_bit(PC_DMA_IN_PROGRESS, &pc->flags);
-	if (test_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags)) {
+	if (dma_ok)
+		/* Will begin DMA later */
+		pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
+	if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
 		ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
 				    IDETAPE_WAIT_CMD, NULL);
 		return ide_started;
 	} else {
-		hwif->OUTB(WIN_PACKETCMD, IDE_COMMAND_REG);
+		hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
 		return idetape_transfer_pc(drive);
 	}
 }
@@ -1349,7 +1308,7 @@
 }
 
 /* A mode sense command is used to "sense" tape parameters. */
-static void idetape_create_mode_sense_cmd(idetape_pc_t *pc, u8 page_code)
+static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = MODE_SENSE;
@@ -1368,12 +1327,12 @@
 	/* We will just discard data in that case */
 	pc->c[4] = 255;
 	if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
-		pc->request_transfer = 12;
+		pc->req_xfer = 12;
 	else if (page_code == IDETAPE_CAPABILITIES_PAGE)
-		pc->request_transfer = 24;
+		pc->req_xfer = 24;
 	else
-		pc->request_transfer = 50;
-	pc->callback = &idetape_pc_callback;
+		pc->req_xfer = 50;
+	pc->idetape_callback = &idetape_pc_callback;
 }
 
 static void idetape_calculate_speeds(ide_drive_t *drive)
@@ -1442,7 +1401,7 @@
 static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t *pc = tape->pc;
+	struct ide_atapi_pc *pc = tape->pc;
 	u8 stat;
 
 	stat = ide_read_status(drive);
@@ -1463,14 +1422,14 @@
 		pc->error = IDETAPE_ERROR_GENERAL;
 		tape->failed_pc = NULL;
 	}
-	return pc->callback(drive);
+	return pc->idetape_callback(drive);
 }
 
 static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
 {
 	idetape_tape_t *tape = drive->driver_data;
 	struct request *rq = HWGROUP(drive)->rq;
-	int blocks = tape->pc->actually_transferred / tape->blk_size;
+	int blocks = tape->pc->xferred / tape->blk_size;
 
 	tape->avg_size += blocks * tape->blk_size;
 	tape->insert_size += blocks * tape->blk_size;
@@ -1502,47 +1461,49 @@
 	return ide_stopped;
 }
 
-static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
+static void idetape_create_read_cmd(idetape_tape_t *tape,
+		struct ide_atapi_pc *pc,
 		unsigned int length, struct idetape_bh *bh)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = READ_6;
 	put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
 	pc->c[1] = 1;
-	pc->callback = &idetape_rw_callback;
+	pc->idetape_callback = &idetape_rw_callback;
 	pc->bh = bh;
 	atomic_set(&bh->b_count, 0);
-	pc->buffer = NULL;
-	pc->buffer_size = length * tape->blk_size;
-	pc->request_transfer = pc->buffer_size;
-	if (pc->request_transfer == tape->stage_size)
-		set_bit(PC_DMA_RECOMMENDED, &pc->flags);
+	pc->buf = NULL;
+	pc->buf_size = length * tape->blk_size;
+	pc->req_xfer = pc->buf_size;
+	if (pc->req_xfer == tape->stage_size)
+		pc->flags |= PC_FLAG_DMA_RECOMMENDED;
 }
 
-static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
+static void idetape_create_write_cmd(idetape_tape_t *tape,
+		struct ide_atapi_pc *pc,
 		unsigned int length, struct idetape_bh *bh)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = WRITE_6;
 	put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
 	pc->c[1] = 1;
-	pc->callback = &idetape_rw_callback;
-	set_bit(PC_WRITING, &pc->flags);
+	pc->idetape_callback = &idetape_rw_callback;
+	pc->flags |= PC_FLAG_WRITING;
 	pc->bh = bh;
 	pc->b_data = bh->b_data;
 	pc->b_count = atomic_read(&bh->b_count);
-	pc->buffer = NULL;
-	pc->buffer_size = length * tape->blk_size;
-	pc->request_transfer = pc->buffer_size;
-	if (pc->request_transfer == tape->stage_size)
-		set_bit(PC_DMA_RECOMMENDED, &pc->flags);
+	pc->buf = NULL;
+	pc->buf_size = length * tape->blk_size;
+	pc->req_xfer = pc->buf_size;
+	if (pc->req_xfer == tape->stage_size)
+		pc->flags |= PC_FLAG_DMA_RECOMMENDED;
 }
 
 static ide_startstop_t idetape_do_request(ide_drive_t *drive,
 					  struct request *rq, sector_t block)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t *pc = NULL;
+	struct ide_atapi_pc *pc = NULL;
 	struct request *postponed_rq = tape->postponed_rq;
 	u8 stat;
 
@@ -1579,10 +1540,10 @@
 	stat = ide_read_status(drive);
 
 	if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
-		set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
+		set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
 
 	if (drive->post_reset == 1) {
-		set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
+		set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
 		drive->post_reset = 0;
 	}
 
@@ -1590,7 +1551,7 @@
 		tape->insert_speed = tape->insert_size / 1024 * HZ /
 					(jiffies - tape->insert_time);
 	idetape_calculate_speeds(drive);
-	if (!test_and_clear_bit(IDETAPE_IGNORE_DSC, &tape->flags) &&
+	if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
 	    (stat & SEEK_STAT) == 0) {
 		if (postponed_rq == NULL) {
 			tape->dsc_polling_start = jiffies;
@@ -1629,7 +1590,7 @@
 		goto out;
 	}
 	if (rq->cmd[0] & REQ_IDETAPE_PC1) {
-		pc = (idetape_pc_t *) rq->buffer;
+		pc = (struct ide_atapi_pc *) rq->buffer;
 		rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
 		rq->cmd[0] |= REQ_IDETAPE_PC2;
 		goto out;
@@ -1648,7 +1609,7 @@
 {
 	int rc1, rc2;
 
-	rc1 = test_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
+	rc1 = test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
 	rc2 = (tape->active_data_rq != NULL);
 	return rc1;
 }
@@ -1881,7 +1842,7 @@
 static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	u8 *readpos = tape->pc->buffer;
+	u8 *readpos = tape->pc->buf;
 
 	debug_log(DBG_PROCS, "Enter %s\n", __func__);
 
@@ -1894,7 +1855,7 @@
 		if (readpos[0] & 0x4) {
 			printk(KERN_INFO "ide-tape: Block location is unknown"
 					 "to the tape\n");
-			clear_bit(IDETAPE_ADDRESS_VALID, &tape->flags);
+			clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
 			idetape_end_request(drive, 0, 0);
 		} else {
 			debug_log(DBG_SENSE, "Block Location - %u\n",
@@ -1903,7 +1864,7 @@
 			tape->partition = readpos[1];
 			tape->first_frame =
 				be32_to_cpu(*(u32 *)&readpos[4]);
-			set_bit(IDETAPE_ADDRESS_VALID, &tape->flags);
+			set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
 			idetape_end_request(drive, 1, 0);
 		}
 	} else {
@@ -1917,20 +1878,20 @@
  * writing a filemark otherwise.
  */
 static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
-		idetape_pc_t *pc, int write_filemark)
+		struct ide_atapi_pc *pc, int write_filemark)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = WRITE_FILEMARKS;
 	pc->c[4] = write_filemark;
-	set_bit(PC_WAIT_FOR_DSC, &pc->flags);
-	pc->callback = &idetape_pc_callback;
+	pc->flags |= PC_FLAG_WAIT_FOR_DSC;
+	pc->idetape_callback = &idetape_pc_callback;
 }
 
-static void idetape_create_test_unit_ready_cmd(idetape_pc_t *pc)
+static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = TEST_UNIT_READY;
-	pc->callback = &idetape_pc_callback;
+	pc->idetape_callback = &idetape_pc_callback;
 }
 
 /*
@@ -1946,7 +1907,7 @@
  * to the request list without waiting for it to be serviced! In that case, we
  * usually use idetape_queue_pc_head().
  */
-static int __idetape_queue_pc_tail(ide_drive_t *drive, idetape_pc_t *pc)
+static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
 {
 	struct ide_tape_obj *tape = drive->driver_data;
 	struct request rq;
@@ -1957,24 +1918,24 @@
 	return ide_do_drive_cmd(drive, &rq, ide_wait);
 }
 
-static void idetape_create_load_unload_cmd(ide_drive_t *drive, idetape_pc_t *pc,
-		int cmd)
+static void idetape_create_load_unload_cmd(ide_drive_t *drive,
+		struct ide_atapi_pc *pc, int cmd)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = START_STOP;
 	pc->c[4] = cmd;
-	set_bit(PC_WAIT_FOR_DSC, &pc->flags);
-	pc->callback = &idetape_pc_callback;
+	pc->flags |= PC_FLAG_WAIT_FOR_DSC;
+	pc->idetape_callback = &idetape_pc_callback;
 }
 
 static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	int load_attempted = 0;
 
 	/* Wait for the tape to become ready */
-	set_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
+	set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
 	timeout += jiffies;
 	while (time_before(jiffies, timeout)) {
 		idetape_create_test_unit_ready_cmd(&pc);
@@ -1998,14 +1959,14 @@
 	return -EIO;
 }
 
-static int idetape_queue_pc_tail(ide_drive_t *drive, idetape_pc_t *pc)
+static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
 {
 	return __idetape_queue_pc_tail(drive, pc);
 }
 
 static int idetape_flush_tape_buffers(ide_drive_t *drive)
 {
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	int rc;
 
 	idetape_create_write_filemark_cmd(drive, &pc, 0);
@@ -2016,18 +1977,18 @@
 	return 0;
 }
 
-static void idetape_create_read_position_cmd(idetape_pc_t *pc)
+static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = READ_POSITION;
-	pc->request_transfer = 20;
-	pc->callback = &idetape_read_position_callback;
+	pc->req_xfer = 20;
+	pc->idetape_callback = &idetape_read_position_callback;
 }
 
 static int idetape_read_position(ide_drive_t *drive)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	int position;
 
 	debug_log(DBG_PROCS, "Enter %s\n", __func__);
@@ -2039,7 +2000,8 @@
 	return position;
 }
 
-static void idetape_create_locate_cmd(ide_drive_t *drive, idetape_pc_t *pc,
+static void idetape_create_locate_cmd(ide_drive_t *drive,
+		struct ide_atapi_pc *pc,
 		unsigned int block, u8 partition, int skip)
 {
 	idetape_init_pc(pc);
@@ -2047,12 +2009,12 @@
 	pc->c[1] = 2;
 	put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
 	pc->c[8] = partition;
-	set_bit(PC_WAIT_FOR_DSC, &pc->flags);
-	pc->callback = &idetape_pc_callback;
+	pc->flags |= PC_FLAG_WAIT_FOR_DSC;
+	pc->idetape_callback = &idetape_pc_callback;
 }
 
-static int idetape_create_prevent_cmd(ide_drive_t *drive, idetape_pc_t *pc,
-				      int prevent)
+static int idetape_create_prevent_cmd(ide_drive_t *drive,
+		struct ide_atapi_pc *pc, int prevent)
 {
 	idetape_tape_t *tape = drive->driver_data;
 
@@ -2063,7 +2025,7 @@
 	idetape_init_pc(pc);
 	pc->c[0] = ALLOW_MEDIUM_REMOVAL;
 	pc->c[4] = prevent;
-	pc->callback = &idetape_pc_callback;
+	pc->idetape_callback = &idetape_pc_callback;
 	return 1;
 }
 
@@ -2078,7 +2040,7 @@
 
 	/* Remove merge stage. */
 	cnt = tape->merge_stage_size / tape->blk_size;
-	if (test_and_clear_bit(IDETAPE_FILEMARK, &tape->flags))
+	if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
 		++cnt;		/* Filemarks count as 1 sector */
 	tape->merge_stage_size = 0;
 	if (tape->merge_stage != NULL) {
@@ -2087,7 +2049,7 @@
 	}
 
 	/* Clear pipeline flags. */
-	clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
+	clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
 	tape->chrdev_dir = IDETAPE_DIR_NONE;
 
 	/* Remove pipeline stages. */
@@ -2124,7 +2086,7 @@
 {
 	idetape_tape_t *tape = drive->driver_data;
 	int retval;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 
 	if (tape->chrdev_dir == IDETAPE_DIR_READ)
 		__idetape_discard_read_pipeline(drive);
@@ -2201,46 +2163,47 @@
 	if (tape->next_stage == NULL)
 		return;
 	if (!idetape_pipeline_active(tape)) {
-		set_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
+		set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
 		idetape_activate_next_stage(drive);
 		(void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
 	}
 }
 
-static void idetape_create_inquiry_cmd(idetape_pc_t *pc)
+static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = INQUIRY;
 	pc->c[4] = 254;
-	pc->request_transfer = 254;
-	pc->callback = &idetape_pc_callback;
+	pc->req_xfer = 254;
+	pc->idetape_callback = &idetape_pc_callback;
 }
 
-static void idetape_create_rewind_cmd(ide_drive_t *drive, idetape_pc_t *pc)
+static void idetape_create_rewind_cmd(ide_drive_t *drive,
+		struct ide_atapi_pc *pc)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = REZERO_UNIT;
-	set_bit(PC_WAIT_FOR_DSC, &pc->flags);
-	pc->callback = &idetape_pc_callback;
+	pc->flags |= PC_FLAG_WAIT_FOR_DSC;
+	pc->idetape_callback = &idetape_pc_callback;
 }
 
-static void idetape_create_erase_cmd(idetape_pc_t *pc)
+static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = ERASE;
 	pc->c[1] = 1;
-	set_bit(PC_WAIT_FOR_DSC, &pc->flags);
-	pc->callback = &idetape_pc_callback;
+	pc->flags |= PC_FLAG_WAIT_FOR_DSC;
+	pc->idetape_callback = &idetape_pc_callback;
 }
 
-static void idetape_create_space_cmd(idetape_pc_t *pc, int count, u8 cmd)
+static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
 {
 	idetape_init_pc(pc);
 	pc->c[0] = SPACE;
 	put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
 	pc->c[1] = cmd;
-	set_bit(PC_WAIT_FOR_DSC, &pc->flags);
-	pc->callback = &idetape_pc_callback;
+	pc->flags |= PC_FLAG_WAIT_FOR_DSC;
+	pc->idetape_callback = &idetape_pc_callback;
 }
 
 static void idetape_wait_first_stage(ide_drive_t *drive)
@@ -2326,7 +2289,7 @@
 			idetape_plug_pipeline(drive);
 		}
 	}
-	if (test_and_clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags))
+	if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
 		/* Return a deferred error */
 		return -EIO;
 	return blocks;
@@ -2402,7 +2365,7 @@
 		__idetape_kfree_stage(tape->merge_stage);
 		tape->merge_stage = NULL;
 	}
-	clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
+	clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
 	tape->chrdev_dir = IDETAPE_DIR_NONE;
 
 	/*
@@ -2490,7 +2453,7 @@
 	rq.sector = tape->first_frame;
 	rq.nr_sectors = blocks;
 	rq.current_nr_sectors = blocks;
-	if (!test_bit(IDETAPE_PIPELINE_ERROR, &tape->flags) &&
+	if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) &&
 	    tape->nr_stages < max_stages) {
 		new_stage = idetape_kmalloc_stage(tape);
 		while (new_stage != NULL) {
@@ -2527,13 +2490,13 @@
 	debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
 
 	/* If we are at a filemark, return a read length of 0 */
-	if (test_bit(IDETAPE_FILEMARK, &tape->flags))
+	if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
 		return 0;
 
 	/* Wait for the next block to reach the head of the pipeline. */
 	idetape_init_read(drive, tape->max_stages);
 	if (tape->first_stage == NULL) {
-		if (test_bit(IDETAPE_PIPELINE_ERROR, &tape->flags))
+		if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
 			return 0;
 		return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
 					tape->merge_stage->bh);
@@ -2550,7 +2513,7 @@
 	else {
 		idetape_switch_buffers(tape, tape->first_stage);
 		if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
-			set_bit(IDETAPE_FILEMARK, &tape->flags);
+			set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
 		spin_lock_irqsave(&tape->lock, flags);
 		idetape_remove_stage_head(drive);
 		spin_unlock_irqrestore(&tape->lock, flags);
@@ -2618,7 +2581,7 @@
 static int idetape_rewind_tape(ide_drive_t *drive)
 {
 	int retval;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	idetape_tape_t *tape;
 	tape = drive->driver_data;
 
@@ -2681,7 +2644,7 @@
 					int mt_count)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	unsigned long flags;
 	int retval, count = 0;
 	int sprev = !!(tape->caps[4] & 0x20);
@@ -2697,12 +2660,13 @@
 	if (tape->chrdev_dir == IDETAPE_DIR_READ) {
 		/* its a read-ahead buffer, scan it for crossed filemarks. */
 		tape->merge_stage_size = 0;
-		if (test_and_clear_bit(IDETAPE_FILEMARK, &tape->flags))
+		if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
 			++count;
 		while (tape->first_stage != NULL) {
 			if (count == mt_count) {
 				if (mt_op == MTFSFM)
-					set_bit(IDETAPE_FILEMARK, &tape->flags);
+					set_bit(IDETAPE_FLAG_FILEMARK,
+						&tape->flags);
 				return 0;
 			}
 			spin_lock_irqsave(&tape->lock, flags);
@@ -2786,7 +2750,7 @@
 	debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
 
 	if (tape->chrdev_dir != IDETAPE_DIR_READ) {
-		if (test_bit(IDETAPE_DETECT_BS, &tape->flags))
+		if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
 			if (count > tape->blk_size &&
 			    (count % tape->blk_size) == 0)
 				tape->user_bs_factor = count / tape->blk_size;
@@ -2829,7 +2793,7 @@
 		tape->merge_stage_size = bytes_read-temp;
 	}
 finish:
-	if (!actually_read && test_bit(IDETAPE_FILEMARK, &tape->flags)) {
+	if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
 		debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
 
 		idetape_space_over_filemarks(drive, MTFSF, 1);
@@ -2938,7 +2902,7 @@
 
 static int idetape_write_filemark(ide_drive_t *drive)
 {
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 
 	/* Write a filemark */
 	idetape_create_write_filemark_cmd(drive, &pc, 1);
@@ -2966,7 +2930,7 @@
 static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	int i, retval;
 
 	debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
@@ -3022,7 +2986,7 @@
 					      !IDETAPE_LU_LOAD_MASK);
 		retval = idetape_queue_pc_tail(drive, &pc);
 		if (!retval)
-			clear_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
+			clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
 		return retval;
 	case MTNOP:
 		idetape_discard_read_pipeline(drive, 0);
@@ -3045,9 +3009,9 @@
 			    mt_count % tape->blk_size)
 				return -EIO;
 			tape->user_bs_factor = mt_count / tape->blk_size;
-			clear_bit(IDETAPE_DETECT_BS, &tape->flags);
+			clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
 		} else
-			set_bit(IDETAPE_DETECT_BS, &tape->flags);
+			set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
 		return 0;
 	case MTSEEK:
 		idetape_discard_read_pipeline(drive, 0);
@@ -3149,7 +3113,7 @@
 static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 
 	idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
 	if (idetape_queue_pc_tail(drive, &pc)) {
@@ -3161,10 +3125,10 @@
 		}
 		return;
 	}
-	tape->blk_size = (pc.buffer[4 + 5] << 16) +
-				(pc.buffer[4 + 6] << 8)  +
-				 pc.buffer[4 + 7];
-	tape->drv_write_prot = (pc.buffer[2] & 0x80) >> 7;
+	tape->blk_size = (pc.buf[4 + 5] << 16) +
+				(pc.buf[4 + 6] << 8)  +
+				 pc.buf[4 + 7];
+	tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
 }
 
 static int idetape_chrdev_open(struct inode *inode, struct file *filp)
@@ -3172,7 +3136,7 @@
 	unsigned int minor = iminor(inode), i = minor & ~0xc0;
 	ide_drive_t *drive;
 	idetape_tape_t *tape;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	int retval;
 
 	if (i >= MAX_HWIFS * MAX_DRIVES)
@@ -3195,24 +3159,24 @@
 
 	filp->private_data = tape;
 
-	if (test_and_set_bit(IDETAPE_BUSY, &tape->flags)) {
+	if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
 		retval = -EBUSY;
 		goto out_put_tape;
 	}
 
 	retval = idetape_wait_ready(drive, 60 * HZ);
 	if (retval) {
-		clear_bit(IDETAPE_BUSY, &tape->flags);
+		clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
 		printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
 		goto out_put_tape;
 	}
 
 	idetape_read_position(drive);
-	if (!test_bit(IDETAPE_ADDRESS_VALID, &tape->flags))
+	if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
 		(void)idetape_rewind_tape(drive);
 
 	if (tape->chrdev_dir != IDETAPE_DIR_READ)
-		clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
+		clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
 
 	/* Read block size and write protect status from drive. */
 	ide_tape_get_bsize_from_bdesc(drive);
@@ -3227,7 +3191,7 @@
 	if (tape->write_prot) {
 		if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
 		    (filp->f_flags & O_ACCMODE) == O_RDWR) {
-			clear_bit(IDETAPE_BUSY, &tape->flags);
+			clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
 			retval = -EROFS;
 			goto out_put_tape;
 		}
@@ -3272,7 +3236,7 @@
 {
 	struct ide_tape_obj *tape = ide_tape_f(filp);
 	ide_drive_t *drive = tape->drive;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	unsigned int minor = iminor(inode);
 
 	lock_kernel();
@@ -3292,7 +3256,7 @@
 		__idetape_kfree_stage(tape->cache_stage);
 		tape->cache_stage = NULL;
 	}
-	if (minor < 128 && test_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags))
+	if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
 		(void) idetape_rewind_tape(drive);
 	if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
 		if (tape->door_locked == DOOR_LOCKED) {
@@ -3302,7 +3266,7 @@
 			}
 		}
 	}
-	clear_bit(IDETAPE_BUSY, &tape->flags);
+	clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
 	ide_tape_put(tape);
 	unlock_kernel();
 	return 0;
@@ -3350,7 +3314,7 @@
 static void idetape_get_inquiry_results(ide_drive_t *drive)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	char fw_rev[6], vendor_id[10], product_id[18];
 
 	idetape_create_inquiry_cmd(&pc);
@@ -3359,9 +3323,9 @@
 				tape->name);
 		return;
 	}
-	memcpy(vendor_id, &pc.buffer[8], 8);
-	memcpy(product_id, &pc.buffer[16], 16);
-	memcpy(fw_rev, &pc.buffer[32], 4);
+	memcpy(vendor_id, &pc.buf[8], 8);
+	memcpy(product_id, &pc.buf[16], 16);
+	memcpy(fw_rev, &pc.buf[32], 4);
 
 	ide_fixstring(vendor_id, 10, 0);
 	ide_fixstring(product_id, 18, 0);
@@ -3378,7 +3342,7 @@
 static void idetape_get_mode_sense_results(ide_drive_t *drive)
 {
 	idetape_tape_t *tape = drive->driver_data;
-	idetape_pc_t pc;
+	struct ide_atapi_pc pc;
 	u8 *caps;
 	u8 speed, max_speed;
 
@@ -3392,7 +3356,7 @@
 		put_unaligned(6*52, (u16 *)&tape->caps[16]);
 		return;
 	}
-	caps = pc.buffer + 4 + pc.buffer[3];
+	caps = pc.buf + 4 + pc.buf[3];
 
 	/* convert to host order and save for later use */
 	speed = be16_to_cpu(*(u16 *)&caps[14]);
@@ -3506,7 +3470,7 @@
 
 	/* Command packet DRQ type */
 	if (((gcw[0] & 0x60) >> 5) == 1)
-		set_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags);
+		set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
 
 	tape->min_pipeline = 10;
 	tape->max_pipeline = 10;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 4c86a8d..155cc90 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -59,32 +59,34 @@
 		SELECT_MASK(drive, 0);
 
 	if (task->tf_flags & IDE_TFLAG_OUT_DATA)
-		hwif->OUTW((tf->hob_data << 8) | tf->data, IDE_DATA_REG);
+		hwif->OUTW((tf->hob_data << 8) | tf->data,
+			   hwif->io_ports[IDE_DATA_OFFSET]);
 
 	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
-		hwif->OUTB(tf->hob_feature, IDE_FEATURE_REG);
+		hwif->OUTB(tf->hob_feature, hwif->io_ports[IDE_FEATURE_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
-		hwif->OUTB(tf->hob_nsect, IDE_NSECTOR_REG);
+		hwif->OUTB(tf->hob_nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
-		hwif->OUTB(tf->hob_lbal, IDE_SECTOR_REG);
+		hwif->OUTB(tf->hob_lbal, hwif->io_ports[IDE_SECTOR_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
-		hwif->OUTB(tf->hob_lbam, IDE_LCYL_REG);
+		hwif->OUTB(tf->hob_lbam, hwif->io_ports[IDE_LCYL_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
-		hwif->OUTB(tf->hob_lbah, IDE_HCYL_REG);
+		hwif->OUTB(tf->hob_lbah, hwif->io_ports[IDE_HCYL_OFFSET]);
 
 	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
-		hwif->OUTB(tf->feature, IDE_FEATURE_REG);
+		hwif->OUTB(tf->feature, hwif->io_ports[IDE_FEATURE_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
-		hwif->OUTB(tf->nsect, IDE_NSECTOR_REG);
+		hwif->OUTB(tf->nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
-		hwif->OUTB(tf->lbal, IDE_SECTOR_REG);
+		hwif->OUTB(tf->lbal, hwif->io_ports[IDE_SECTOR_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
-		hwif->OUTB(tf->lbam, IDE_LCYL_REG);
+		hwif->OUTB(tf->lbam, hwif->io_ports[IDE_LCYL_OFFSET]);
 	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
-		hwif->OUTB(tf->lbah, IDE_HCYL_REG);
+		hwif->OUTB(tf->lbah, hwif->io_ports[IDE_HCYL_OFFSET]);
 
 	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
-		hwif->OUTB((tf->device & HIHI) | drive->select.all, IDE_SELECT_REG);
+		hwif->OUTB((tf->device & HIHI) | drive->select.all,
+			   hwif->io_ports[IDE_SELECT_OFFSET]);
 }
 
 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
@@ -152,7 +154,8 @@
 	switch (task->data_phase) {
 	case TASKFILE_MULTI_OUT:
 	case TASKFILE_OUT:
-		hwif->OUTBSYNC(drive, tf->command, IDE_COMMAND_REG);
+		hwif->OUTBSYNC(drive, tf->command,
+			       hwif->io_ports[IDE_COMMAND_OFFSET]);
 		ndelay(400);	/* FIXME */
 		return pre_task_out_intr(drive, task->rq);
 	case TASKFILE_MULTI_IN:
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index fc69fe2..917c72d 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -78,6 +78,8 @@
 /* default maximum number of failures */
 #define IDE_DEFAULT_MAX_FAILURES 	1
 
+struct class *ide_port_class;
+
 static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR,
 					IDE2_MAJOR, IDE3_MAJOR,
 					IDE4_MAJOR, IDE5_MAJOR,
@@ -90,10 +92,6 @@
 DEFINE_MUTEX(ide_cfg_mtx);
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
 
-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
-#endif
-
 int noautodma = 0;
 
 #ifdef CONFIG_BLK_DEV_IDEACPI
@@ -109,13 +107,13 @@
 
 EXPORT_SYMBOL(ide_hwifs);
 
+static void ide_port_init_devices_data(ide_hwif_t *);
+
 /*
  * Do not even *think* about calling this!
  */
 void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
 {
-	unsigned int unit;
-
 	/* bulk initialize hwif & drive info with zeros */
 	memset(hwif, 0, sizeof(ide_hwif_t));
 
@@ -134,8 +132,20 @@
 
 	default_hwif_iops(hwif);
 	default_hwif_transport(hwif);
+
+	ide_port_init_devices_data(hwif);
+}
+EXPORT_SYMBOL_GPL(ide_init_port_data);
+
+static void ide_port_init_devices_data(ide_hwif_t *hwif)
+{
+	int unit;
+
 	for (unit = 0; unit < MAX_DRIVES; ++unit) {
 		ide_drive_t *drive = &hwif->drives[unit];
+		u8 j = (hwif->index * MAX_DRIVES) + unit;
+
+		memset(drive, 0, sizeof(*drive));
 
 		drive->media			= ide_disk;
 		drive->select.all		= (unit<<4)|0xa0;
@@ -147,32 +157,13 @@
 		drive->special.b.set_geometry	= 1;
 		drive->name[0]			= 'h';
 		drive->name[1]			= 'd';
-		drive->name[2]			= 'a' + (index * MAX_DRIVES) + unit;
+		drive->name[2]			= 'a' + j;
 		drive->max_failures		= IDE_DEFAULT_MAX_FAILURES;
-		drive->using_dma		= 0;
-		drive->vdma			= 0;
+
 		INIT_LIST_HEAD(&drive->list);
 		init_completion(&drive->gendev_rel_comp);
 	}
 }
-EXPORT_SYMBOL_GPL(ide_init_port_data);
-
-static void init_hwif_default(ide_hwif_t *hwif, unsigned int index)
-{
-	hw_regs_t hw;
-
-	memset(&hw, 0, sizeof(hw_regs_t));
-
-	ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, &hwif->irq);
-
-	memcpy(hwif->io_ports, hw.io_ports, sizeof(hw.io_ports));
-
-	hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
-#ifdef CONFIG_BLK_DEV_HD
-	if (hwif->io_ports[IDE_DATA_OFFSET] == HD_DATA)
-		hwif->noprobe = 1;	/* may be overridden by ide_setup() */
-#endif
-}
 
 /*
  * init_ide_data() sets reasonable default values into all fields
@@ -194,7 +185,6 @@
 #define MAGIC_COOKIE 0x12345678
 static void __init init_ide_data (void)
 {
-	ide_hwif_t *hwif;
 	unsigned int index;
 	static unsigned long magic_cookie = MAGIC_COOKIE;
 
@@ -204,13 +194,9 @@
 
 	/* Initialise all interface structures */
 	for (index = 0; index < MAX_HWIFS; ++index) {
-		hwif = &ide_hwifs[index];
+		ide_hwif_t *hwif = &ide_hwifs[index];
+
 		ide_init_port_data(hwif, index);
-		init_hwif_default(hwif, index);
-#if !defined(CONFIG_PPC32) || !defined(CONFIG_PCI)
-		hwif->irq =
-			ide_init_default_irq(hwif->io_ports[IDE_DATA_OFFSET]);
-#endif
 	}
 }
 
@@ -259,7 +245,7 @@
 
 	for (i = 0; i < MAX_HWIFS; i++) {
 		hwif = &ide_hwifs[i];
-		if (hwif->io_ports[IDE_DATA_OFFSET] == 0)
+		if (hwif->chipset == ide_unknown)
 			goto found;
 	}
 
@@ -357,108 +343,6 @@
 			release_region(hwif->io_ports[i], 1);
 }
 
-/**
- *	ide_hwif_restore	-	restore hwif to template
- *	@hwif: hwif to update
- *	@tmp_hwif: template
- *
- *	Restore hwif to a previous state by copying most settings
- *	from the template.
- */
-
-static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
-{
-	hwif->hwgroup			= tmp_hwif->hwgroup;
-
-	hwif->gendev.parent		= tmp_hwif->gendev.parent;
-
-	hwif->proc			= tmp_hwif->proc;
-
-	hwif->major			= tmp_hwif->major;
-	hwif->straight8			= tmp_hwif->straight8;
-	hwif->bus_state			= tmp_hwif->bus_state;
-
-	hwif->host_flags		= tmp_hwif->host_flags;
-
-	hwif->pio_mask			= tmp_hwif->pio_mask;
-
-	hwif->ultra_mask		= tmp_hwif->ultra_mask;
-	hwif->mwdma_mask		= tmp_hwif->mwdma_mask;
-	hwif->swdma_mask		= tmp_hwif->swdma_mask;
-
-	hwif->cbl			= tmp_hwif->cbl;
-
-	hwif->chipset			= tmp_hwif->chipset;
-	hwif->hold			= tmp_hwif->hold;
-
-	hwif->dev			= tmp_hwif->dev;
-
-#ifdef CONFIG_BLK_DEV_IDEPCI
-	hwif->cds			= tmp_hwif->cds;
-#endif
-
-	hwif->set_pio_mode		= tmp_hwif->set_pio_mode;
-	hwif->set_dma_mode		= tmp_hwif->set_dma_mode;
-	hwif->mdma_filter		= tmp_hwif->mdma_filter;
-	hwif->udma_filter		= tmp_hwif->udma_filter;
-	hwif->selectproc		= tmp_hwif->selectproc;
-	hwif->reset_poll		= tmp_hwif->reset_poll;
-	hwif->pre_reset			= tmp_hwif->pre_reset;
-	hwif->resetproc			= tmp_hwif->resetproc;
-	hwif->maskproc			= tmp_hwif->maskproc;
-	hwif->quirkproc			= tmp_hwif->quirkproc;
-	hwif->busproc			= tmp_hwif->busproc;
-
-	hwif->ata_input_data		= tmp_hwif->ata_input_data;
-	hwif->ata_output_data		= tmp_hwif->ata_output_data;
-	hwif->atapi_input_bytes		= tmp_hwif->atapi_input_bytes;
-	hwif->atapi_output_bytes	= tmp_hwif->atapi_output_bytes;
-
-	hwif->dma_host_set		= tmp_hwif->dma_host_set;
-	hwif->dma_setup			= tmp_hwif->dma_setup;
-	hwif->dma_exec_cmd		= tmp_hwif->dma_exec_cmd;
-	hwif->dma_start			= tmp_hwif->dma_start;
-	hwif->ide_dma_end		= tmp_hwif->ide_dma_end;
-	hwif->ide_dma_test_irq		= tmp_hwif->ide_dma_test_irq;
-	hwif->ide_dma_clear_irq		= tmp_hwif->ide_dma_clear_irq;
-	hwif->dma_lost_irq		= tmp_hwif->dma_lost_irq;
-	hwif->dma_timeout		= tmp_hwif->dma_timeout;
-
-	hwif->OUTB			= tmp_hwif->OUTB;
-	hwif->OUTBSYNC			= tmp_hwif->OUTBSYNC;
-	hwif->OUTW			= tmp_hwif->OUTW;
-	hwif->OUTSW			= tmp_hwif->OUTSW;
-	hwif->OUTSL			= tmp_hwif->OUTSL;
-
-	hwif->INB			= tmp_hwif->INB;
-	hwif->INW			= tmp_hwif->INW;
-	hwif->INSW			= tmp_hwif->INSW;
-	hwif->INSL			= tmp_hwif->INSL;
-
-	hwif->sg_max_nents		= tmp_hwif->sg_max_nents;
-
-	hwif->mmio			= tmp_hwif->mmio;
-	hwif->rqsize			= tmp_hwif->rqsize;
-
-#ifndef CONFIG_BLK_DEV_IDECS
-	hwif->irq			= tmp_hwif->irq;
-#endif
-
-	hwif->dma_base			= tmp_hwif->dma_base;
-	hwif->dma_command		= tmp_hwif->dma_command;
-	hwif->dma_vendor1		= tmp_hwif->dma_vendor1;
-	hwif->dma_status		= tmp_hwif->dma_status;
-	hwif->dma_vendor3		= tmp_hwif->dma_vendor3;
-	hwif->dma_prdtable		= tmp_hwif->dma_prdtable;
-
-	hwif->config_data		= tmp_hwif->config_data;
-	hwif->select_data		= tmp_hwif->select_data;
-	hwif->extra_base		= tmp_hwif->extra_base;
-	hwif->extra_ports		= tmp_hwif->extra_ports;
-
-	hwif->hwif_data			= tmp_hwif->hwif_data;
-}
-
 void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
 {
 	ide_hwgroup_t *hwgroup = hwif->hwgroup;
@@ -494,11 +378,38 @@
 	spin_unlock_irq(&ide_lock);
 }
 
+/* Called with ide_lock held. */
+static void __ide_port_unregister_devices(ide_hwif_t *hwif)
+{
+	int i;
+
+	for (i = 0; i < MAX_DRIVES; i++) {
+		ide_drive_t *drive = &hwif->drives[i];
+
+		if (drive->present) {
+			spin_unlock_irq(&ide_lock);
+			device_unregister(&drive->gendev);
+			wait_for_completion(&drive->gendev_rel_comp);
+			spin_lock_irq(&ide_lock);
+		}
+	}
+}
+
+void ide_port_unregister_devices(ide_hwif_t *hwif)
+{
+	mutex_lock(&ide_cfg_mtx);
+	spin_lock_irq(&ide_lock);
+	__ide_port_unregister_devices(hwif);
+	hwif->present = 0;
+	ide_port_init_devices_data(hwif);
+	spin_unlock_irq(&ide_lock);
+	mutex_unlock(&ide_cfg_mtx);
+}
+EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
+
 /**
  *	ide_unregister		-	free an IDE interface
  *	@index: index of interface (will change soon to a pointer)
- *	@init_default: init default hwif flag
- *	@restore: restore hwif flag
  *
  *	Perform the final unregister of an IDE interface. At the moment
  *	we don't refcount interfaces so this will also get split up.
@@ -518,13 +429,11 @@
  *	This is raving bonkers.
  */
 
-void ide_unregister(unsigned int index, int init_default, int restore)
+void ide_unregister(unsigned int index)
 {
-	ide_drive_t *drive;
 	ide_hwif_t *hwif, *g;
-	static ide_hwif_t tmp_hwif; /* protected by ide_cfg_mtx */
 	ide_hwgroup_t *hwgroup;
-	int irq_count = 0, unit;
+	int irq_count = 0;
 
 	BUG_ON(index >= MAX_HWIFS);
 
@@ -535,15 +444,7 @@
 	hwif = &ide_hwifs[index];
 	if (!hwif->present)
 		goto abort;
-	for (unit = 0; unit < MAX_DRIVES; ++unit) {
-		drive = &hwif->drives[unit];
-		if (!drive->present)
-			continue;
-		spin_unlock_irq(&ide_lock);
-		device_unregister(&drive->gendev);
-		wait_for_completion(&drive->gendev_rel_comp);
-		spin_lock_irq(&ide_lock);
-	}
+	__ide_port_unregister_devices(hwif);
 	hwif->present = 0;
 
 	spin_unlock_irq(&ide_lock);
@@ -565,6 +466,7 @@
 
 	ide_remove_port_from_hwgroup(hwif);
 
+	device_unregister(hwif->portdev);
 	device_unregister(&hwif->gendev);
 	wait_for_completion(&hwif->gendev_rel_comp);
 
@@ -576,34 +478,14 @@
 	unregister_blkdev(hwif->major, hwif->name);
 	spin_lock_irq(&ide_lock);
 
-	if (hwif->dma_base) {
-		(void) ide_release_dma(hwif);
-
-		hwif->dma_base = 0;
-		hwif->dma_command = 0;
-		hwif->dma_vendor1 = 0;
-		hwif->dma_status = 0;
-		hwif->dma_vendor3 = 0;
-		hwif->dma_prdtable = 0;
-
-		hwif->extra_base  = 0;
-		hwif->extra_ports = 0;
-	}
+	if (hwif->dma_base)
+		(void)ide_release_dma(hwif);
 
 	ide_hwif_release_regions(hwif);
 
-	/* copy original settings */
-	tmp_hwif = *hwif;
-
 	/* restore hwif data to pristine status */
 	ide_init_port_data(hwif, index);
 
-	if (init_default)
-		init_hwif_default(hwif, index);
-
-	if (restore)
-		ide_hwif_restore(hwif, &tmp_hwif);
-
 abort:
 	spin_unlock_irq(&ide_lock);
 	mutex_unlock(&ide_cfg_mtx);
@@ -622,79 +504,6 @@
 }
 EXPORT_SYMBOL_GPL(ide_init_port_hw);
 
-ide_hwif_t *ide_deprecated_find_port(unsigned long base)
-{
-	ide_hwif_t *hwif;
-	int i;
-
-	for (i = 0; i < MAX_HWIFS; i++) {
-		hwif = &ide_hwifs[i];
-		if (hwif->io_ports[IDE_DATA_OFFSET] == base)
-			goto found;
-	}
-
-	for (i = 0; i < MAX_HWIFS; i++) {
-		hwif = &ide_hwifs[i];
-		if (hwif->hold)
-			continue;
-		if (!hwif->present && hwif->mate == NULL)
-			goto found;
-	}
-
-	hwif = NULL;
-found:
-	return hwif;
-}
-EXPORT_SYMBOL_GPL(ide_deprecated_find_port);
-
-/**
- *	ide_register_hw		-	register IDE interface
- *	@hw: hardware registers
- *	@quirkproc: quirkproc function
- *	@hwifp: pointer to returned hwif
- *
- *	Register an IDE interface, specifying exactly the registers etc.
- *
- *	Returns -1 on error.
- */
-
-int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *),
-		    ide_hwif_t **hwifp)
-{
-	int index, retry = 1;
-	ide_hwif_t *hwif;
-	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-
-	do {
-		hwif = ide_deprecated_find_port(hw->io_ports[IDE_DATA_OFFSET]);
-		if (hwif)
-			goto found;
-		for (index = 0; index < MAX_HWIFS; index++)
-			ide_unregister(index, 1, 1);
-	} while (retry--);
-	return -1;
-found:
-	index = hwif->index;
-	if (hwif->present)
-		ide_unregister(index, 0, 1);
-	else if (!hwif->hold)
-		ide_init_port_data(hwif, index);
-
-	ide_init_port_hw(hwif, hw);
-	hwif->quirkproc = quirkproc;
-
-	idx[0] = index;
-
-	ide_device_add(idx, NULL);
-
-	if (hwifp)
-		*hwifp = hwif;
-
-	return hwif->present ? index : -1;
-}
-
-EXPORT_SYMBOL(ide_register_hw);
-
 /*
  *	Locks for IDE setting functionality
  */
@@ -997,27 +806,6 @@
 			if (!capable(CAP_SYS_RAWIO))
 				return -EACCES;
 			return ide_task_ioctl(drive, cmd, arg);
-
-		case HDIO_SCAN_HWIF:
-		{
-			hw_regs_t hw;
-			int args[3];
-			if (!capable(CAP_SYS_RAWIO)) return -EACCES;
-			if (copy_from_user(args, p, 3 * sizeof(int)))
-				return -EFAULT;
-			memset(&hw, 0, sizeof(hw));
-			ide_init_hwif_ports(&hw, (unsigned long) args[0],
-					    (unsigned long) args[1], NULL);
-			hw.irq = args[2];
-			if (ide_register_hw(&hw, NULL, NULL) == -1)
-				return -EIO;
-			return 0;
-		}
-	        case HDIO_UNREGISTER_HWIF:
-			if (!capable(CAP_SYS_RAWIO)) return -EACCES;
-			/* (arg > MAX_HWIFS) checked in function */
-			ide_unregister(arg, 1, 1);
-			return 0;
 		case HDIO_SET_NICE:
 			if (!capable(CAP_SYS_ADMIN)) return -EACCES;
 			if (arg != (arg & ((1 << IDE_NICE_DSC_OVERLAP) | (1 << IDE_NICE_1))))
@@ -1071,8 +859,6 @@
 		case HDIO_SET_BUSSTATE:
 			if (!capable(CAP_SYS_ADMIN))
 				return -EACCES;
-			if (HWIF(drive)->busproc)
-				return HWIF(drive)->busproc(drive, (int)arg);
 			return -EOPNOTSUPP;
 		default:
 			return -EINVAL;
@@ -1173,8 +959,9 @@
 extern int probe_ht6560b;
 extern int probe_qd65xx;
 extern int cmd640_vlb;
+extern int probe_4drives;
 
-static int __initdata is_chipset_set[MAX_HWIFS];
+static int __initdata is_chipset_set;
 
 /*
  * ide_setup() gets called VERY EARLY during initialization,
@@ -1217,14 +1004,6 @@
 		goto obsolete_option;
 	}
 
-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-	if (!strcmp(s, "ide=reverse")) {
-		ide_scan_direction = 1;
-		printk(" : Enabled support for IDE inverse scan order.\n");
-		goto obsolete_option;
-	}
-#endif
-
 #ifdef CONFIG_BLK_DEV_IDEACPI
 	if (!strcmp(s, "ide=noacpi")) {
 		//printk(" : Disable IDE ACPI support.\n");
@@ -1335,13 +1114,11 @@
 		 * (-8, -9, -10) are reserved to ease the hardcoding.
 		 */
 		static const char *ide_words[] = {
-			"noprobe", "serialize", "minus3", "minus4",
+			"minus1", "serialize", "minus3", "minus4",
 			"reset", "minus6", "ata66", "minus8", "minus9",
 			"minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb",
 			"dtc2278", "umc8672", "ali14xx", NULL };
 
-		hw_regs_t hwregs;
-
 		hw = s[3] - '0';
 		hwif = &ide_hwifs[hw];
 		i = match_parm(&s[4], ide_words, vals, 3);
@@ -1350,19 +1127,14 @@
 		 * Cryptic check to ensure chipset not already set for hwif.
 		 * Note: we can't depend on hwif->chipset here.
 		 */
-		if ((i >= -18 && i <= -11) || (i > 0 && i <= 3)) {
+		if (i >= -18 && i <= -11) {
 			/* chipset already specified */
-			if (is_chipset_set[hw])
+			if (is_chipset_set)
 				goto bad_option;
-			if (i > -18 && i <= -11) {
-				/* these drivers are for "ide0=" only */
-				if (hw != 0)
-					goto bad_hwif;
-				/* chipset already specified for 2nd port */
-				if (is_chipset_set[hw+1])
-					goto bad_option;
-			}
-			is_chipset_set[hw] = 1;
+			/* these drivers are for "ide0=" only */
+			if (hw != 0)
+				goto bad_hwif;
+			is_chipset_set = 1;
 			printk("\n");
 		}
 
@@ -1399,19 +1171,9 @@
 #endif
 #ifdef CONFIG_BLK_DEV_4DRIVES
 			case -11: /* "four" drives on one set of ports */
-			{
-				ide_hwif_t *mate = &ide_hwifs[hw^1];
-				mate->drives[0].select.all ^= 0x20;
-				mate->drives[1].select.all ^= 0x20;
-				hwif->chipset = mate->chipset = ide_4drives;
-				mate->irq = hwif->irq;
-				memcpy(mate->io_ports, hwif->io_ports, sizeof(hwif->io_ports));
-				hwif->mate = mate;
-				mate->mate = hwif;
-				hwif->serialized = mate->serialized = 1;
+				probe_4drives = 1;
 				goto obsolete_option;
-			}
-#endif /* CONFIG_BLK_DEV_4DRIVES */
+#endif
 			case -10: /* minus10 */
 			case -9: /* minus9 */
 			case -8: /* minus8 */
@@ -1439,24 +1201,12 @@
 				hwif->serialized = hwif->mate->serialized = 1;
 				goto obsolete_option;
 
-			case -1: /* "noprobe" */
-				hwif->noprobe = 1;
-				goto obsolete_option;
-
-			case 1:	/* base */
-				vals[1] = vals[0] + 0x206; /* default ctl */
-			case 2: /* base,ctl */
-				vals[2] = 0;	/* default irq = probe for it */
-			case 3: /* base,ctl,irq */
-				memset(&hwregs, 0, sizeof(hwregs));
-				ide_init_hwif_ports(&hwregs, vals[0], vals[1], &hwif->irq);
-				memcpy(hwif->io_ports, hwregs.io_ports, sizeof(hwif->io_ports));
-				hwif->irq      = vals[2];
-				hwif->noprobe  = 0;
-				hwif->chipset  = ide_forced;
-				goto obsolete_option;
-
-			case 0: goto bad_option;
+			case -1:
+			case 0:
+			case 1:
+			case 2:
+			case 3:
+				goto bad_option;
 			default:
 				printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n");
 				return 1;
@@ -1601,6 +1351,13 @@
 
 EXPORT_SYMBOL_GPL(ide_bus_type);
 
+static void ide_port_class_release(struct device *portdev)
+{
+	ide_hwif_t *hwif = dev_get_drvdata(portdev);
+
+	put_device(&hwif->gendev);
+}
+
 /*
  * This is gets invoked once during initialization, to set *everything* up
  */
@@ -1621,11 +1378,23 @@
 		return ret;
 	}
 
+	ide_port_class = class_create(THIS_MODULE, "ide_port");
+	if (IS_ERR(ide_port_class)) {
+		ret = PTR_ERR(ide_port_class);
+		goto out_port_class;
+	}
+	ide_port_class->dev_release = ide_port_class_release;
+
 	init_ide_data();
 
 	proc_ide_create();
 
 	return 0;
+
+out_port_class:
+	bus_unregister(&ide_bus_type);
+
+	return ret;
 }
 
 #ifdef MODULE
@@ -1658,10 +1427,12 @@
 	int index;
 
 	for (index = 0; index < MAX_HWIFS; ++index)
-		ide_unregister(index, 0, 0);
+		ide_unregister(index);
 
 	proc_ide_destroy();
 
+	class_destroy(ide_port_class);
+
 	bus_unregister(&ide_bus_type);
 }
 
diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile
index 7043ec7..6939329 100644
--- a/drivers/ide/legacy/Makefile
+++ b/drivers/ide/legacy/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_BLK_DEV_DTC2278)		+= dtc2278.o
 obj-$(CONFIG_BLK_DEV_HT6560B)		+= ht6560b.o
 obj-$(CONFIG_BLK_DEV_QD65XX)		+= qd65xx.o
+obj-$(CONFIG_BLK_DEV_4DRIVES)		+= ide-4drives.o
 
 obj-$(CONFIG_BLK_DEV_GAYLE)		+= gayle.o
 obj-$(CONFIG_BLK_DEV_FALCON_IDE)	+= falconide.o
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index d4d1a6b..bc8b1f8 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -200,6 +200,7 @@
 static int __init ali14xx_probe(void)
 {
 	static u8 idx[4] = { 0, 1, 0xff, 0xff };
+	hw_regs_t hw[2];
 
 	printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n",
 			  basePort, regOn);
@@ -210,6 +211,17 @@
 		return 1;
 	}
 
+	memset(&hw, 0, sizeof(hw));
+
+	ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
+	hw[0].irq = 14;
+
+	ide_std_init_ports(&hw[1], 0x170, 0x376);
+	hw[1].irq = 15;
+
+	ide_init_port_hw(&ide_hwifs[0], &hw[0]);
+	ide_init_port_hw(&ide_hwifs[1], &hw[1]);
+
 	ide_hwifs[0].set_pio_mode = &ali14xx_set_pio_mode;
 	ide_hwifs[1].set_pio_mode = &ali14xx_set_pio_mode;
 
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
index 73396f7..5f69cd2 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/legacy/dtc2278.c
@@ -103,6 +103,7 @@
 	unsigned long flags;
 	ide_hwif_t *hwif, *mate;
 	static u8 idx[4] = { 0, 1, 0xff, 0xff };
+	hw_regs_t hw[2];
 
 	hwif = &ide_hwifs[0];
 	mate = &ide_hwifs[1];
@@ -128,6 +129,17 @@
 #endif
 	local_irq_restore(flags);
 
+	memset(&hw, 0, sizeof(hw));
+
+	ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
+	hw[0].irq = 14;
+
+	ide_std_init_ports(&hw[1], 0x170, 0x376);
+	hw[1].irq = 15;
+
+	ide_init_port_hw(hwif, &hw[0]);
+	ide_init_port_hw(mate, &hw[1]);
+
 	hwif->set_pio_mode = &dtc2278_set_pio_mode;
 
 	ide_device_add(idx, &dtc2278_port_info);
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index 78ca68e..88fe907 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -82,7 +82,7 @@
  * out how they setup those cycle time interfacing values, as they at Holtek
  * call them. IDESETUP.COM that is supplied with the drivers figures out
  * optimal values and fetches those values to drivers. I found out that
- * they use IDE_SELECT_REG to fetch timings to the ide board right after
+ * they use Select register to fetch timings to the ide board right after
  * interface switching. After that it was quite easy to add code to
  * ht6560b.c.
  *
@@ -127,6 +127,7 @@
  */
 static void ht6560b_selectproc (ide_drive_t *drive)
 {
+	ide_hwif_t *hwif = drive->hwif;
 	unsigned long flags;
 	static u8 current_select = 0;
 	static u8 current_timing = 0;
@@ -155,8 +156,8 @@
 		/*
 		 * Set timing for this drive:
 		 */
-		outb(timing, IDE_SELECT_REG);
-		(void)inb(IDE_STATUS_REG);
+		outb(timing, hwif->io_ports[IDE_SELECT_OFFSET]);
+		(void)inb(hwif->io_ports[IDE_STATUS_OFFSET]);
 #ifdef DEBUG
 		printk("ht6560b: %s: select=%#x timing=%#x\n",
 			drive->name, select, timing);
@@ -193,9 +194,9 @@
 	 * Ht6560b autodetected
 	 */
 	outb(HT_CONFIG_DEFAULT, HT_CONFIG_PORT);
-	outb(HT_TIMING_DEFAULT, 0x1f6);  /* IDE_SELECT_REG */
-	(void) inb(0x1f7);               /* IDE_STATUS_REG */
-	
+	outb(HT_TIMING_DEFAULT, 0x1f6);	/* Select register */
+	(void)inb(0x1f7);		/* Status register */
+
 	printk("ht6560b " HT6560B_VERSION
 	       ": chipset detected and initialized"
 #ifdef DEBUG
@@ -339,6 +340,7 @@
 {
 	ide_hwif_t *hwif, *mate;
 	static u8 idx[4] = { 0, 1, 0xff, 0xff };
+	hw_regs_t hw[2];
 
 	if (probe_ht6560b == 0)
 		return -ENODEV;
@@ -357,6 +359,17 @@
 		goto release_region;
 	}
 
+	memset(&hw, 0, sizeof(hw));
+
+	ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
+	hw[0].irq = 14;
+
+	ide_std_init_ports(&hw[1], 0x170, 0x376);
+	hw[1].irq = 15;
+
+	ide_init_port_hw(hwif, &hw[0]);
+	ide_init_port_hw(mate, &hw[1]);
+
 	hwif->selectproc = &ht6560b_selectproc;
 	hwif->set_pio_mode = &ht6560b_set_pio_mode;
 
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
new file mode 100644
index 0000000..ecd7f35
--- /dev/null
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -0,0 +1,50 @@
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ide.h>
+
+int probe_4drives = 0;
+
+module_param_named(probe, probe_4drives, bool, 0);
+MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
+
+static int __init ide_4drives_init(void)
+{
+	ide_hwif_t *hwif, *mate;
+	u8 idx[4] = { 0, 1, 0xff, 0xff };
+	hw_regs_t hw;
+
+	if (probe_4drives == 0)
+		return -ENODEV;
+
+	hwif = &ide_hwifs[0];
+	mate = &ide_hwifs[1];
+
+	memset(&hw, 0, sizeof(hw));
+
+	ide_std_init_ports(&hw, 0x1f0, 0x3f6);
+	hw.irq = 14;
+	hw.chipset = ide_4drives;
+
+	ide_init_port_hw(hwif, &hw);
+	ide_init_port_hw(mate, &hw);
+
+	mate->drives[0].select.all ^= 0x20;
+	mate->drives[1].select.all ^= 0x20;
+
+	hwif->mate = mate;
+	mate->mate = hwif;
+
+	hwif->serialized = mate->serialized = 1;
+
+	ide_device_add(idx, NULL);
+
+	return 0;
+}
+
+module_init(ide_4drives_init);
+
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
+MODULE_DESCRIPTION("generic IDE chipset with 4 drives/port support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 15ccf69..9a23b94 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -156,15 +156,15 @@
     hw.chipset = ide_pci;
     hw.dev = &handle->dev;
 
-    hwif = ide_deprecated_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+    hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
     if (hwif == NULL)
 	return -1;
 
     i = hwif->index;
 
     if (hwif->present)
-	ide_unregister(i, 0, 0);
-    else if (!hwif->hold)
+	ide_unregister(i);
+    else
 	ide_init_port_data(hwif, i);
 
     ide_init_port_hw(hwif, &hw);
@@ -360,7 +360,7 @@
     if (info->ndev) {
 	/* FIXME: if this fails we need to queue the cleanup somehow
 	   -- need to investigate the required PCMCIA magic */
-	ide_unregister(info->hd, 0, 0);
+	ide_unregister(info->hd);
     }
     info->ndev = 0;
 
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index 688fcae..249651e 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -122,7 +122,7 @@
 {
 	ide_hwif_t *hwif = pdev->dev.driver_data;
 
-	ide_unregister(hwif->index, 0, 0);
+	ide_unregister(hwif->index);
 
 	return 0;
 }
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index 2f4f47a..7016bdf 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -352,9 +352,9 @@
 static int __init qd_probe(int base)
 {
 	ide_hwif_t *hwif;
+	u8 config, unit;
 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-	u8 config;
-	u8 unit;
+	hw_regs_t hw[2];
 
 	config = inb(QD_CONFIG_PORT);
 
@@ -363,6 +363,14 @@
 
 	unit = ! (config & QD_CONFIG_IDE_BASEPORT);
 
+	memset(&hw, 0, sizeof(hw));
+
+	ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
+	hw[0].irq = 14;
+
+	ide_std_init_ports(&hw[1], 0x170, 0x376);
+	hw[1].irq = 15;
+
 	if ((config & 0xf0) == QD_CONFIG_QD6500) {
 
 		if (qd_testreg(base)) return 1;		/* bad register */
@@ -379,6 +387,8 @@
 			return 1;
 		}
 
+		ide_init_port_hw(hwif, &hw[unit]);
+
 		qd_setup(hwif, base, config);
 
 		hwif->port_init_devs = qd6500_port_init_devs;
@@ -416,6 +426,8 @@
 			printk(KERN_INFO "%s: qd6580: single IDE board\n",
 					 hwif->name);
 
+			ide_init_port_hw(hwif, &hw[unit]);
+
 			qd_setup(hwif, base, config | (control << 8));
 
 			hwif->port_init_devs = qd6580_port_init_devs;
@@ -435,6 +447,9 @@
 			printk(KERN_INFO "%s&%s: qd6580: dual IDE board\n",
 					hwif->name, mate->name);
 
+			ide_init_port_hw(hwif, &hw[0]);
+			ide_init_port_hw(mate, &hw[1]);
+
 			qd_setup(hwif, base, config | (control << 8));
 
 			hwif->port_init_devs = qd6580_port_init_devs;
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
index 5696ba0..bc19448 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/legacy/umc8672.c
@@ -130,6 +130,7 @@
 {
 	unsigned long flags;
 	static u8 idx[4] = { 0, 1, 0xff, 0xff };
+	hw_regs_t hw[2];
 
 	if (!request_region(0x108, 2, "umc8672")) {
 		printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n");
@@ -148,6 +149,17 @@
 	umc_set_speeds (current_speeds);
 	local_irq_restore(flags);
 
+	memset(&hw, 0, sizeof(hw));
+
+	ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
+	hw[0].irq = 14;
+
+	ide_std_init_ports(&hw[1], 0x170, 0x376);
+	hw[1].irq = 15;
+
+	ide_init_port_hw(&ide_hwifs[0], &hw[0]);
+	ide_init_port_hw(&ide_hwifs[1], &hw[1]);
+
 	ide_hwifs[0].set_pio_mode = &umc_set_pio_mode;
 	ide_hwifs[1].set_pio_mode = &umc_set_pio_mode;
 
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 85c016b..9b62824 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -613,9 +613,6 @@
 
 	hwif->dev = dev;
 
-	/* hold should be on in all cases */
-	hwif->hold                      = 1;
-
 	hwif->mmio  = 1;
 
 	/* If the user has selected DDMA assisted copies,
@@ -673,7 +670,7 @@
 	ide_hwif_t *hwif = dev_get_drvdata(dev);
 	_auide_hwif *ahwif = &auide_hwif;
 
-	ide_unregister(hwif->index, 0, 0);
+	ide_unregister(hwif->index);
 
 	iounmap((void *)ahwif->regbase);
 
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index 29fbc5e..a1cfe03 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -409,19 +409,9 @@
  */
 static void __init setup_device_ptrs (void)
 {
-	unsigned int i;
+	cmd_hwif0 = &ide_hwifs[0];
+	cmd_hwif1 = &ide_hwifs[1];
 
-	cmd_hwif0 = &ide_hwifs[0]; /* default, if not found below */
-	cmd_hwif1 = &ide_hwifs[1]; /* default, if not found below */
-	for (i = 0; i < MAX_HWIFS; i++) {
-		ide_hwif_t *hwif = &ide_hwifs[i];
-		if (hwif->chipset == ide_unknown || hwif->chipset == ide_forced) {
-			if (hwif->io_ports[IDE_DATA_OFFSET] == 0x1f0)
-				cmd_hwif0 = hwif;
-			else if (hwif->io_ports[IDE_DATA_OFFSET] == 0x170)
-				cmd_hwif1 = hwif;
-		}
-	}
 	cmd_drives[0] = &cmd_hwif0->drives[0];
 	cmd_drives[1] = &cmd_hwif0->drives[1];
 	cmd_drives[2] = &cmd_hwif1->drives[0];
@@ -724,6 +714,7 @@
 	unsigned int index;
 	u8 b, cfr;
 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+	hw_regs_t hw[2];
 
 	if (cmd640_vlb && probe_for_cmd640_vlb()) {
 		bus_type = "VLB";
@@ -762,12 +753,23 @@
 		return 0;
 	}
 
+	memset(&hw, 0, sizeof(hw));
+
+	ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
+	hw[0].irq = 14;
+
+	ide_std_init_ports(&hw[1], 0x170, 0x376);
+	hw[1].irq = 15;
+
+	printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
+			 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
+
 	/*
 	 * Initialize data for primary port
 	 */
 	setup_device_ptrs ();
-	printk("%s: buggy cmd640%c interface on %s, config=0x%02x\n",
-	       cmd_hwif0->name, 'a' + cmd640_chip_version - 1, bus_type, cfr);
+
+	ide_init_port_hw(cmd_hwif0, &hw[0]);
 #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
 	cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode;
 #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
@@ -787,8 +789,7 @@
 	/*
 	 * Try to enable the secondary interface, if not already enabled
 	 */
-	if (cmd_hwif1->noprobe ||
-	    (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe)) {
+	if (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) {
 		port2 = "not probed";
 	} else {
 		b = get_cmd640_reg(CNTRL);
@@ -820,6 +821,7 @@
 	 * Initialize data for secondary cmd640 port, if enabled
 	 */
 	if (second_port_cmd640) {
+		ide_init_port_hw(cmd_hwif1, &hw[1]);
 #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
 		cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode;
 #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index 3f9cd64..961698d 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -78,15 +78,15 @@
 	hw.irq = dev->irq;
 	hw.chipset = ide_pci;		/* this enables IRQ sharing */
 
-	hwif = ide_deprecated_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
 	if (hwif == NULL)
 		goto out_disable;
 
 	i = hwif->index;
 
 	if (hwif->present)
-		ide_unregister(i, 0, 0);
-	else if (!hwif->hold)
+		ide_unregister(i);
+	else
 		ide_init_port_data(hwif, i);
 
 	ide_init_port_hw(hwif, &hw);
@@ -120,7 +120,7 @@
 	ide_hwif_t *hwif = pci_get_drvdata(dev);
 
 	if (hwif)
-		ide_unregister(hwif->index, 0, 0);
+		ide_unregister(hwif->index);
 
 	pci_disable_device(dev);
 }
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 6357bb6..82d0e31 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -760,7 +760,7 @@
 		}
 	} else
 		outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
-		     IDE_CONTROL_REG);
+		     hwif->io_ports[IDE_CONTROL_OFFSET]);
 }
 
 /*
@@ -929,64 +929,6 @@
 	hpt3xxn_set_clock(HWIF(drive), rq_data_dir(rq) ? 0x23 : 0x21);
 }
 
-/* 
- * Set/get power state for a drive.
- * NOTE: affects both drives on each channel.
- *
- * When we turn the power back on, we need to re-initialize things.
- */
-#define TRISTATE_BIT  0x8000
-
-static int hpt3xx_busproc(ide_drive_t *drive, int state)
-{
-	ide_hwif_t *hwif	= HWIF(drive);
-	struct pci_dev *dev	= to_pci_dev(hwif->dev);
-	u8  mcr_addr		= hwif->select_data + 2;
-	u8  resetmask		= hwif->channel ? 0x80 : 0x40;
-	u8  bsr2		= 0;
-	u16 mcr			= 0;
-
-	hwif->bus_state = state;
-
-	/* Grab the status. */
-	pci_read_config_word(dev, mcr_addr, &mcr);
-	pci_read_config_byte(dev, 0x59, &bsr2);
-
-	/*
-	 * Set the state. We don't set it if we don't need to do so.
-	 * Make sure that the drive knows that it has failed if it's off.
-	 */
-	switch (state) {
-	case BUSSTATE_ON:
-		if (!(bsr2 & resetmask))
-			return 0;
-		hwif->drives[0].failures = hwif->drives[1].failures = 0;
-
-		pci_write_config_byte(dev, 0x59, bsr2 & ~resetmask);
-		pci_write_config_word(dev, mcr_addr, mcr & ~TRISTATE_BIT);
-		return 0;
-	case BUSSTATE_OFF:
-		if ((bsr2 & resetmask) && !(mcr & TRISTATE_BIT))
-			return 0;
-		mcr &= ~TRISTATE_BIT;
-		break;
-	case BUSSTATE_TRISTATE:
-		if ((bsr2 & resetmask) &&  (mcr & TRISTATE_BIT))
-			return 0;
-		mcr |= TRISTATE_BIT;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
-	hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
-
-	pci_write_config_word(dev, mcr_addr, mcr);
-	pci_write_config_byte(dev, 0x59, bsr2 | resetmask);
-	return 0;
-}
-
 /**
  *	hpt37x_calibrate_dpll	-	calibrate the DPLL
  *	@dev: PCI device
@@ -1334,7 +1276,6 @@
 
 	hwif->quirkproc		= &hpt3xx_quirkproc;
 	hwif->maskproc		= &hpt3xx_maskproc;
-	hwif->busproc		= &hpt3xx_busproc;
 
 	hwif->udma_filter	= &hpt3xx_udma_filter;
 	hwif->mdma_filter	= &hpt3xx_mdma_filter;
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index bf0d3b2..7551332 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -181,6 +181,10 @@
 	return 1;
 }
 
+#ifndef ide_default_irq
+#define ide_default_irq(irq) 0
+#endif
+
 static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
 {
 	struct pci_dev *dev = to_pci_dev(hwif->dev);
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 238e3e1..ef07c7a 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -334,7 +334,8 @@
 
 	/* errata A308 workaround: Step5 (check data loss) */
 	/* We don't check non ide_disk because it is limited to UDMA4 */
-	if (!(in_be32((void __iomem *)IDE_ALTSTATUS_REG) & ERR_STAT) &&
+	if (!(in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET])
+	      & ERR_STAT) &&
 	    drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
 		reg = in_be32((void __iomem *)intsts_port);
 		if (!(reg & INTSTS_ACTEINT)) {
@@ -437,7 +438,8 @@
 	u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
 
 	/* SCC errata A252,A308 workaround: Step4 */
-	if ((in_be32((void __iomem *)IDE_ALTSTATUS_REG) & ERR_STAT) &&
+	if ((in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET])
+	     & ERR_STAT) &&
 	    (int_stat & INTSTS_INTRQ))
 		return 1;
 
@@ -523,6 +525,43 @@
 	return -ENOMEM;
 }
 
+static int scc_ide_setup_pci_device(struct pci_dev *dev,
+				    const struct ide_port_info *d)
+{
+	struct scc_ports *ports = pci_get_drvdata(dev);
+	ide_hwif_t *hwif = NULL;
+	hw_regs_t hw;
+	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+	int i;
+
+	for (i = 0; i < MAX_HWIFS; i++) {
+		hwif = &ide_hwifs[i];
+		if (hwif->chipset == ide_unknown)
+			break; /* pick an unused entry */
+	}
+	if (i == MAX_HWIFS) {
+		printk(KERN_ERR "%s: too many IDE interfaces, "
+				"no room in table\n", SCC_PATA_NAME);
+		return -ENOMEM;
+	}
+
+	memset(&hw, 0, sizeof(hw));
+	for (i = IDE_DATA_OFFSET; i <= IDE_CONTROL_OFFSET; i++)
+		hw.io_ports[i] = ports->dma + 0x20 + i * 4;
+	hw.irq = dev->irq;
+	hw.dev = &dev->dev;
+	hw.chipset = ide_pci;
+	ide_init_port_hw(hwif, &hw);
+	hwif->dev = &dev->dev;
+	hwif->cds = d;
+
+	idx[0] = hwif->index;
+
+	ide_device_add(idx, d);
+
+	return 0;
+}
+
 /**
  *	init_setup_scc	-	set up an SCC PATA Controller
  *	@dev: PCI device
@@ -545,10 +584,13 @@
 	struct scc_ports *ports;
 	int rc;
 
+	rc = pci_enable_device(dev);
+	if (rc)
+		goto end;
+
 	rc = setup_mmio_scc(dev, d->name);
-	if (rc < 0) {
-		return rc;
-	}
+	if (rc < 0)
+		goto end;
 
 	ports = pci_get_drvdata(dev);
 	ctl_base = ports->ctl;
@@ -583,7 +625,10 @@
 	out_be32((void*)mode_port, MODE_JCUSFEN);
 	out_be32((void*)intmask_port, INTMASK_MSK);
 
-	return ide_setup_pci_device(dev, d);
+	rc = scc_ide_setup_pci_device(dev, d);
+
+ end:
+	return rc;
 }
 
 /**
@@ -610,17 +655,6 @@
 	hwif->OUTSW = scc_ide_outsw;
 	hwif->OUTSL = scc_ide_outsl;
 
-	hwif->io_ports[IDE_DATA_OFFSET] = dma_base + 0x20;
-	hwif->io_ports[IDE_ERROR_OFFSET] = dma_base + 0x24;
-	hwif->io_ports[IDE_NSECTOR_OFFSET] = dma_base + 0x28;
-	hwif->io_ports[IDE_SECTOR_OFFSET] = dma_base + 0x2c;
-	hwif->io_ports[IDE_LCYL_OFFSET] = dma_base + 0x30;
-	hwif->io_ports[IDE_HCYL_OFFSET] = dma_base + 0x34;
-	hwif->io_ports[IDE_SELECT_OFFSET] = dma_base + 0x38;
-	hwif->io_ports[IDE_STATUS_OFFSET] = dma_base + 0x3c;
-	hwif->io_ports[IDE_CONTROL_OFFSET] = dma_base + 0x40;
-
-	hwif->irq = dev->irq;
 	hwif->dma_base = dma_base;
 	hwif->config_data = ports->ctl;
 	hwif->mmio = 1;
@@ -736,7 +770,7 @@
 		hwif->dmatable_cpu = NULL;
 	}
 
-	ide_unregister(hwif->index, 0, 0);
+	ide_unregister(hwif->index);
 
 	hwif->chipset = ide_unknown;
 	iounmap((void*)ports->dma);
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 0546264..9d1a303 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -112,10 +112,9 @@
 sgiioc4_maskproc(ide_drive_t * drive, int mask)
 {
 	writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
-	       (void __iomem *)IDE_CONTROL_REG);
+	       (void __iomem *)drive->hwif->io_ports[IDE_CONTROL_OFFSET]);
 }
 
-
 static int
 sgiioc4_checkirq(ide_hwif_t * hwif)
 {
@@ -142,18 +141,18 @@
 	intr_reg = readl((void __iomem *)other_ir);
 	if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */
 		/*
-		 * Using sgiioc4_INB to read the IDE_STATUS_REG has a side effect
-		 * of clearing the interrupt.  The first read should clear it
-		 * if it is set.  The second read should return a "clear" status
-		 * if it got cleared.  If not, then spin for a bit trying to
-		 * clear it.
+		 * Using sgiioc4_INB to read the Status register has a side
+		 * effect of clearing the interrupt.  The first read should
+		 * clear it if it is set.  The second read should return
+		 * a "clear" status if it got cleared.  If not, then spin
+		 * for a bit trying to clear it.
 		 */
-		u8 stat = sgiioc4_INB(IDE_STATUS_REG);
+		u8 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
 		int count = 0;
-		stat = sgiioc4_INB(IDE_STATUS_REG);
+		stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
 		while ((stat & 0x80) && (count++ < 100)) {
 			udelay(1);
-			stat = sgiioc4_INB(IDE_STATUS_REG);
+			stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
 		}
 
 		if (intr_reg & 0x02) {
@@ -562,7 +561,6 @@
 						clear interrupts */
 	hwif->maskproc = &sgiioc4_maskproc;	/* Mask on/off NIEN register */
 	hwif->quirkproc = NULL;
-	hwif->busproc = NULL;
 
 	hwif->INB = &sgiioc4_INB;
 
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 8d624af..b6be1b4 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -370,48 +370,6 @@
 }
 
 /**
- *	sil_sata_busproc	-	bus isolation IOCTL
- *	@drive: drive to isolate/restore
- *	@state: bus state to set
- *
- *	Used by the SII3112 to handle bus isolation. As this is a 
- *	SATA controller the work required is quite limited, we 
- *	just have to clean up the statistics
- */
-
-static int sil_sata_busproc(ide_drive_t * drive, int state)
-{
-	ide_hwif_t *hwif	= HWIF(drive);
-	struct pci_dev *dev	= to_pci_dev(hwif->dev);
-	u32 stat_config		= 0;
-	unsigned long addr	= siimage_selreg(hwif, 0);
-
-	if (hwif->mmio)
-		stat_config = readl((void __iomem *)addr);
-	else
-		pci_read_config_dword(dev, addr, &stat_config);
-
-	switch (state) {
-		case BUSSTATE_ON:
-			hwif->drives[0].failures = 0;
-			hwif->drives[1].failures = 0;
-			break;
-		case BUSSTATE_OFF:
-			hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
-			hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
-			break;
-		case BUSSTATE_TRISTATE:
-			hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
-			hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
-			break;
-		default:
-			return -EINVAL;
-	}
-	hwif->bus_state = state;
-	return 0;
-}
-
-/**
  *	sil_sata_reset_poll	-	wait for SATA reset
  *	@drive: drive we are resetting
  *
@@ -818,7 +776,6 @@
 	if (sata) {
 		static int first = 1;
 
-		hwif->busproc = &sil_sata_busproc;
 		hwif->reset_poll = &sil_sata_reset_poll;
 		hwif->pre_reset = &sil_sata_pre_reset;
 		hwif->udma_filter = &sil_sata_udma_filter;
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index ee261ae..1f00251 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -328,6 +328,10 @@
 	.enablebits	= {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
 	.host_flags	= IDE_HFLAG_IO_32BIT |
 			  IDE_HFLAG_UNMASK_IRQS |
+/* FIXME: check for Compatibility mode in generic IDE PCI code */
+#if defined(CONFIG_LOPEC) || defined(CONFIG_SANDPOINT)
+			  IDE_HFLAG_FORCE_LEGACY_IRQS |
+#endif
 			  IDE_HFLAG_NO_AUTODMA |
 			  IDE_HFLAG_BOOTABLE,
 	.pio_mask	= ATA_PIO5,
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 2ef2ed2..1e4a626 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -126,40 +126,6 @@
 	ide_dma_start(drive);
 }
 
-static int tc86c001_busproc(ide_drive_t *drive, int state)
-{
-	ide_hwif_t *hwif	= HWIF(drive);
-	unsigned long sc_base	= hwif->config_data;
-	u16 scr1;
-
-	/* System Control 1 Register bit 11 (ATA Hard Reset) read */
-	scr1 = inw(sc_base + 0x00);
-
-	switch (state) {
-		case BUSSTATE_ON:
-			if (!(scr1 & 0x0800))
-				return 0;
-			scr1 &= ~0x0800;
-
-			hwif->drives[0].failures = hwif->drives[1].failures = 0;
-			break;
-		case BUSSTATE_OFF:
-			if (scr1 & 0x0800)
-				return 0;
-			scr1 |= 0x0800;
-
-			hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
-			hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
-			break;
-		default:
-			return -EINVAL;
-	}
-
-	/* System Control 1 Register bit 11 (ATA Hard Reset) write */
-	outw(scr1, sc_base + 0x00);
-	return 0;
-}
-
 static u8 __devinit tc86c001_cable_detect(ide_hwif_t *hwif)
 {
 	struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -194,8 +160,6 @@
 	hwif->set_pio_mode = &tc86c001_set_pio_mode;
 	hwif->set_dma_mode = &tc86c001_set_mode;
 
-	hwif->busproc	= &tc86c001_busproc;
-
 	hwif->cable_detect = tc86c001_cable_detect;
 
 	if (!hwif->dma_base)
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
index ebaba01..a784a97 100644
--- a/drivers/ide/ppc/mpc8xx.c
+++ b/drivers/ide/ppc/mpc8xx.c
@@ -99,32 +99,6 @@
 /* Make clock cycles and always round up */
 #define PCMCIA_MK_CLKS( t, T ) (( (t) * ((T)/1000000) + 999U ) / 1000U )
 
-
-
-/*
- * IDE stuff.
- */
-static int
-m8xx_ide_default_irq(unsigned long base)
-{
-#ifdef CONFIG_BLK_DEV_MPC8xx_IDE
-	if (base >= MAX_HWIFS)
-		return 0;
-
-	printk("[%d] m8xx_ide_default_irq %d\n",__LINE__,ioport_dsc[base].irq);
-	
-	return (ioport_dsc[base].irq);
-#else
-        return 9;
-#endif
-}
-
-static unsigned long
-m8xx_ide_default_io_base(int index)
-{
-        return index;
-}
-
 #define M8XX_PCMCIA_CD2(slot)      (0x10000000 >> (slot << 4))
 #define M8XX_PCMCIA_CD1(slot)      (0x08000000 >> (slot << 4))
 
@@ -149,12 +123,11 @@
  */
 
 /*
- * m8xx_ide_init_hwif_ports for a direct IDE interface _using_
+ * m8xx_ide_init_ports() for a direct IDE interface _using_
+ * MPC8xx's internal PCMCIA interface
  */
 #if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT)
-static void
-m8xx_ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, 
-		unsigned long ctrl_port, int *irq)
+static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
 {
 	unsigned long *p = hw->io_ports;
 	int i;
@@ -173,8 +146,6 @@
 	unsigned long base;
 
 	*p = 0;
-	if (irq)
-		*irq = 0;
 
 	pcmp = (pcmconf8xx_t *)(&(((immap_t *)IMAP_ADDR)->im_pcmcia));
 
@@ -248,9 +219,6 @@
 		}
 	}
 
-	if (data_port >= MAX_HWIFS)
-		return;
-
 	if (_slot_ == -1) {
 		printk ("PCMCIA slot has not been defined! Using A as default\n");
 		_slot_ = 0;
@@ -292,11 +260,13 @@
 	 	*p++ = base + ioport_dsc[data_port].reg_off[i];
 	}
 
-	if (irq) {
+	hw->irq = ioport_dsc[data_port].irq;
+	hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack;
+
 #ifdef CONFIG_IDE_8xx_PCCARD
+	{
 		unsigned int reg;
 
-		*irq = ioport_dsc[data_port].irq;
 		if (_slot_)
 			pgcrx = &((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pgcrb;
 		else
@@ -306,14 +276,11 @@
 		reg |= mk_int_int_mask (pcmcia_schlvl) << 24;
 		reg |= mk_int_int_mask (pcmcia_schlvl) << 16;
 		*pgcrx = reg;
-#else	/* direct connected IDE drive, i.e. external IRQ, not the PCMCIA irq */
-		*irq = ioport_dsc[data_port].irq;
-#endif	/* CONFIG_IDE_8xx_PCCARD */
 	}
+#endif	/* CONFIG_IDE_8xx_PCCARD */
 
 	ide_hwifs[data_port].pio_mask = ATA_PIO4;
 	ide_hwifs[data_port].set_pio_mode = m8xx_ide_set_pio_mode;
-	ide_hwifs[data_port].ack_intr = (ide_ack_intr_t *)ide_interrupt_ack;
 
 	/* Enable Harddisk Interrupt,
 	 * and make it edge sensitive
@@ -329,16 +296,15 @@
 	/* Enable falling edge irq */
 	pcmp->pcmc_per = 0x100000 >> (16 * _slot_);
 #endif	/* CONFIG_IDE_8xx_PCCARD */
-}	/* m8xx_ide_init_hwif_ports() using 8xx internal PCMCIA interface */
+}
 #endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */
 
 /*
- * m8xx_ide_init_hwif_ports for a direct IDE interface _not_ using
+ * m8xx_ide_init_ports() for a direct IDE interface _not_ using
  * MPC8xx's internal PCMCIA interface
  */
 #if defined(CONFIG_IDE_EXT_DIRECT)
-void m8xx_ide_init_hwif_ports (hw_regs_t *hw,
-	unsigned long data_port, unsigned long ctrl_port, int *irq)
+static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
 {
 	unsigned long *p = hw->io_ports;
 	int i;
@@ -349,8 +315,6 @@
 	unsigned long base;
 
 	*p = 0;
-	if (irq)
-		*irq = 0;
 
 	if (!ide_base) {
 
@@ -372,9 +336,6 @@
 #endif
 	}
 
-	if (data_port >= MAX_HWIFS)
-		return;
-
 	base = ide_base + ioport_dsc[data_port].base_off;
 #ifdef DEBUG
 	printk ("base: %08x + %08x = %08x\n",
@@ -392,14 +353,12 @@
 	 	*p++ = base + ioport_dsc[data_port].reg_off[i];
 	}
 
-	if (irq) {
-		/* direct connected IDE drive, i.e. external IRQ */
-		*irq = ioport_dsc[data_port].irq;
-	}
+	/* direct connected IDE drive, i.e. external IRQ */
+	hw->irq = ioport_dsc[data_port].irq;
+	hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack;
 
 	ide_hwifs[data_port].pio_mask = ATA_PIO4;
 	ide_hwifs[data_port].set_pio_mode = m8xx_ide_set_pio_mode;
-	ide_hwifs[data_port].ack_intr = (ide_ack_intr_t *)ide_interrupt_ack;
 
 	/* Enable Harddisk Interrupt,
 	 * and make it edge sensitive
@@ -407,8 +366,7 @@
 	/* (11-18) Set edge detect for irq, no wakeup from low power mode */
 	((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |=
 			(0x80000000 >> ioport_dsc[data_port].irq);
-}	/* m8xx_ide_init_hwif_ports() for CONFIG_IDE_8xx_DIRECT */ 
-
+}
 #endif	/* CONFIG_IDE_8xx_DIRECT */
 
 
@@ -829,20 +787,20 @@
 	return (0);	/* don't know */
 }
 
-void m8xx_ide_init(void)
-{
-	ppc_ide_md.default_irq          = m8xx_ide_default_irq;
-	ppc_ide_md.default_io_base      = m8xx_ide_default_io_base;
-	ppc_ide_md.ide_init_hwif        = m8xx_ide_init_hwif_ports;
-}
-
 static int __init mpc8xx_ide_probe(void)
 {
+	hw_regs_t hw;
 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
 
 #ifdef IDE0_BASE_OFFSET
+	memset(&hw, 0, sizeof(hw));
+	m8xx_ide_init_ports(&hw, 0);
+	ide_init_port_hw(&ide_hwifs[0], &hw);
 	idx[0] = 0;
 #ifdef IDE1_BASE_OFFSET
+	memset(&hw, 0, sizeof(hw));
+	m8xx_ide_init_ports(&hw, 1);
+	ide_init_port_hw(&ide_hwifs[1], &hw);
 	idx[1] = 1;
 #endif
 #endif
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index d9ca52e..88619b5 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -80,7 +80,6 @@
 } pmac_ide_hwif_t;
 
 static pmac_ide_hwif_t pmac_ide[MAX_HWIFS];
-static int pmac_ide_count;
 
 enum {
 	controller_ohare,	/* OHare based */
@@ -419,38 +418,8 @@
 
 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
 
-/*
- * N.B. this can't be an initfunc, because the media-bay task can
- * call ide_[un]register at any time.
- */
-void
-pmac_ide_init_hwif_ports(hw_regs_t *hw,
-			      unsigned long data_port, unsigned long ctrl_port,
-			      int *irq)
-{
-	int i, ix;
-
-	if (data_port == 0)
-		return;
-
-	for (ix = 0; ix < MAX_HWIFS; ++ix)
-		if (data_port == pmac_ide[ix].regbase)
-			break;
-
-	if (ix >= MAX_HWIFS)
-		return;		/* not an IDE PMAC interface */
-
-	for (i = 0; i < 8; ++i)
-		hw->io_ports[i] = data_port + i * 0x10;
-	hw->io_ports[8] = data_port + 0x160;
-
-	if (irq != NULL)
-		*irq = pmac_ide[ix].irq;
-
-	hw->dev = &pmac_ide[ix].mdev->ofdev.dev;
-}
-
-#define PMAC_IDE_REG(x) ((void __iomem *)(IDE_DATA_REG+(x)))
+#define PMAC_IDE_REG(x) \
+	((void __iomem *)((drive)->hwif->io_ports[IDE_DATA_OFFSET] + (x)))
 
 /*
  * Apply the timings of the proper unit (master/slave) to the shared
@@ -886,58 +855,6 @@
 	pmif->timings[2] = pmif->timings[3] = value2;
 }
 
-unsigned long
-pmac_ide_get_base(int index)
-{
-	return pmac_ide[index].regbase;
-}
-
-int
-pmac_ide_check_base(unsigned long base)
-{
-	int ix;
-	
- 	for (ix = 0; ix < MAX_HWIFS; ++ix)
-		if (base == pmac_ide[ix].regbase)
-			return ix;
-	return -1;
-}
-
-int
-pmac_ide_get_irq(unsigned long base)
-{
-	int ix;
-
-	for (ix = 0; ix < MAX_HWIFS; ++ix)
-		if (base == pmac_ide[ix].regbase)
-			return pmac_ide[ix].irq;
-	return 0;
-}
-
-static int ide_majors[] = { 3, 22, 33, 34, 56, 57 };
-
-dev_t __init
-pmac_find_ide_boot(char *bootdevice, int n)
-{
-	int i;
-	
-	/*
-	 * Look through the list of IDE interfaces for this one.
-	 */
-	for (i = 0; i < pmac_ide_count; ++i) {
-		char *name;
-		if (!pmac_ide[i].node || !pmac_ide[i].node->full_name)
-			continue;
-		name = pmac_ide[i].node->full_name;
-		if (memcmp(name, bootdevice, n) == 0 && name[n] == 0) {
-			/* XXX should cope with the 2nd drive as well... */
-			return MKDEV(ide_majors[i], 0);
-		}
-	}
-
-	return 0;
-}
-
 /* Suspend call back, should be called after the child devices
  * have actually been suspended
  */
@@ -1088,7 +1005,8 @@
 	if (np->parent && np->parent->name
 	    && strcasecmp(np->parent->name, "media-bay") == 0) {
 #ifdef CONFIG_PMAC_MEDIABAY
-		media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq, hwif->index);
+		media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq,
+					hwif);
 #endif /* CONFIG_PMAC_MEDIABAY */
 		pmif->mediabay = 1;
 		if (!bidp)
@@ -1119,7 +1037,6 @@
 	hwif->hwif_data = pmif;
 	ide_init_port_hw(hwif, hw);
 	hwif->noprobe = pmif->mediabay;
-	hwif->hold = pmif->mediabay;
 	hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 	hwif->set_pio_mode = pmac_ide_set_pio_mode;
 	if (pmif->kind == controller_un_ata6
@@ -1154,6 +1071,15 @@
 	return 0;
 }
 
+static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base)
+{
+	int i;
+
+	for (i = 0; i < 8; ++i)
+		hw->io_ports[i] = base + i * 0x10;
+	hw->io_ports[8] = base + 0x160;
+}
+
 /*
  * Attach to a macio probed interface
  */
@@ -1227,7 +1153,7 @@
 	dev_set_drvdata(&mdev->ofdev.dev, hwif);
 
 	memset(&hw, 0, sizeof(hw));
-	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL);
+	pmac_ide_init_ports(&hw, pmif->regbase);
 	hw.irq = irq;
 	hw.dev = &mdev->ofdev.dev;
 
@@ -1341,7 +1267,7 @@
 	pci_set_drvdata(pdev, hwif);
 
 	memset(&hw, 0, sizeof(hw));
-	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL);
+	pmac_ide_init_ports(&hw, pmif->regbase);
 	hw.irq = pdev->irq;
 	hw.dev = &pdev->dev;
 
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 634e3f6..f7ede0e 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -41,17 +41,6 @@
 	ide_hwif_t *hwif;
 
 	/*
-	 * Look for a hwif with matching io_base specified using
-	 * parameters to ide_setup().
-	 */
-	for (h = 0; h < MAX_HWIFS; ++h) {
-		hwif = &ide_hwifs[h];
-		if (hwif->io_ports[IDE_DATA_OFFSET] == io_base) {
-			if (hwif->chipset == ide_forced)
-				return hwif; /* a perfect match */
-		}
-	}
-	/*
 	 * Look for a hwif with matching io_base default value.
 	 * If chipset is "ide_unknown", then claim that hwif slot.
 	 * Otherwise, some other chipset has already claimed it..  :(
@@ -356,7 +345,6 @@
 	unsigned long ctl = 0, base = 0;
 	ide_hwif_t *hwif;
 	u8 bootable = (d->host_flags & IDE_HFLAG_BOOTABLE) ? 1 : 0;
-	u8 oldnoprobe = 0;
 	struct hw_regs_s hw;
 
 	if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
@@ -382,19 +370,13 @@
 		return NULL;	/* no room in ide_hwifs[] */
 
 	memset(&hw, 0, sizeof(hw));
-	hw.irq = hwif->irq ? hwif->irq : irq;
+	hw.irq = irq;
 	hw.dev = &dev->dev;
 	hw.chipset = d->chipset ? d->chipset : ide_pci;
 	ide_std_init_ports(&hw, base, ctl | 2);
 
-	if (hwif->io_ports[IDE_DATA_OFFSET] == base &&
-	    hwif->io_ports[IDE_CONTROL_OFFSET] == (ctl | 2))
-		oldnoprobe = hwif->noprobe;
-
 	ide_init_port_hw(hwif, &hw);
 
-	hwif->noprobe = oldnoprobe;
-
 	hwif->dev = &dev->dev;
 	hwif->cds = d;
 
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
index 52ac83e..c90be40 100644
--- a/drivers/ieee1394/csr.c
+++ b/drivers/ieee1394/csr.c
@@ -133,8 +133,7 @@
                 host->csr.state &= ~0x100;
         }
 
-        host->csr.topology_map[1] =
-                cpu_to_be32(be32_to_cpu(host->csr.topology_map[1]) + 1);
+	be32_add_cpu(&host->csr.topology_map[1], 1);
         host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16
                                                 | host->selfid_count);
         host->csr.topology_map[0] =
@@ -142,8 +141,7 @@
                             | csr_crc16(host->csr.topology_map + 1,
                                         host->selfid_count + 2));
 
-        host->csr.speed_map[1] =
-                cpu_to_be32(be32_to_cpu(host->csr.speed_map[1]) + 1);
+	be32_add_cpu(&host->csr.speed_map[1], 1);
         host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16
                                              | csr_crc16(host->csr.speed_map+1,
                                                          0x3f1));
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 6572211..6228fad 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2179,8 +2179,7 @@
 MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
 
 static struct hpsb_protocol_driver dv1394_driver = {
-	.name		= "dv1394",
-	.id_table	= dv1394_id_table,
+	.name = "dv1394",
 };
 
 
@@ -2568,7 +2567,6 @@
 
 	cdev_init(&dv1394_cdev, &dv1394_fops);
 	dv1394_cdev.owner = THIS_MODULE;
-	kobject_set_name(&dv1394_cdev.kobj, "dv1394");
 	ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16);
 	if (ret) {
 		printk(KERN_ERR "dv1394: unable to register character device\n");
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index b642546..fa2bfec 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -339,7 +339,7 @@
 	if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
 	    (hweight64(alignment) != 1)) {
 		HPSB_ERR("%s called with invalid alignment: 0x%048llx",
-			 __FUNCTION__, (unsigned long long)alignment);
+			 __func__, (unsigned long long)alignment);
 		return retval;
 	}
 
@@ -354,7 +354,7 @@
 	if (((start|end) & ~align_mask) || (start >= end) ||
 	    (end > CSR1212_ALL_SPACE_END)) {
 		HPSB_ERR("%s called with invalid addresses "
-			 "(start = %012Lx  end = %012Lx)", __FUNCTION__,
+			 "(start = %012Lx  end = %012Lx)", __func__,
 			 (unsigned long long)start,(unsigned long long)end);
 		return retval;
 	}
@@ -422,7 +422,7 @@
 
 	if (((start|end) & 3) || (start >= end) ||
 	    (end > CSR1212_ALL_SPACE_END)) {
-		HPSB_ERR("%s called with invalid addresses", __FUNCTION__);
+		HPSB_ERR("%s called with invalid addresses", __func__);
 		return 0;
 	}
 
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 36c747b..dcdb71a 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -242,7 +242,7 @@
 {
 	if (host->in_bus_reset) {
 		HPSB_NOTICE("%s called while bus reset already in progress",
-			    __FUNCTION__);
+			    __func__);
 		return 1;
 	}
 
@@ -373,6 +373,8 @@
 			if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
 
 			speedcap[n] = sid->speed;
+			if (speedcap[n] > host->csr.lnk_spd)
+				speedcap[n] = host->csr.lnk_spd;
 			n--;
 		}
 	}
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 511e432..70afa37 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -701,7 +701,11 @@
 		return 0;
 
 	driver = container_of(drv, struct hpsb_protocol_driver, driver);
-	for (id = driver->id_table; id->match_flags != 0; id++) {
+	id = driver->id_table;
+	if (!id)
+		return 0;
+
+	for (; id->match_flags != 0; id++) {
 		if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
 		    id->vendor_id != ud->vendor_id)
 			continue;
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 969de2a..0690469 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -149,7 +149,7 @@
 /* Module Parameters */
 static int phys_dma = 1;
 module_param(phys_dma, int, 0444);
-MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
 
 static void dma_trm_tasklet(unsigned long data);
 static void dma_trm_reset(struct dma_trm_ctx *d);
@@ -708,7 +708,7 @@
                                 /* FIXME: do something about it */
                                 PRINT(KERN_ERR,
                                       "%s: packet data addr: %p size %Zd bytes "
-                                      "cross page boundary", __FUNCTION__,
+				      "cross page boundary", __func__,
                                       packet->data, packet->data_size);
                         }
 #endif
@@ -2089,10 +2089,8 @@
 
 	spin_lock_irqsave(&d->lock, flags);
 
-	list_splice(&d->fifo_list, &packet_list);
-	list_splice(&d->pending_list, &packet_list);
-	INIT_LIST_HEAD(&d->fifo_list);
-	INIT_LIST_HEAD(&d->pending_list);
+	list_splice_init(&d->fifo_list, &packet_list);
+	list_splice_init(&d->pending_list, &packet_list);
 
 	d->branchAddrPtr = NULL;
 	d->sent_ind = d->prg_ind;
@@ -2787,7 +2785,7 @@
 	d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
 
 	if (d->buf_cpu == NULL || d->buf_bus == NULL) {
-		PRINT(KERN_ERR, "Failed to allocate dma buffer");
+		PRINT(KERN_ERR, "Failed to allocate %s", "DMA buffer");
 		free_dma_rcv_ctx(d);
 		return -ENOMEM;
 	}
@@ -2796,7 +2794,7 @@
 	d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
 
 	if (d->prg_cpu == NULL || d->prg_bus == NULL) {
-		PRINT(KERN_ERR, "Failed to allocate dma prg");
+		PRINT(KERN_ERR, "Failed to allocate %s", "DMA prg");
 		free_dma_rcv_ctx(d);
 		return -ENOMEM;
 	}
@@ -2804,7 +2802,7 @@
 	d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
 
 	if (d->spb == NULL) {
-		PRINT(KERN_ERR, "Failed to allocate split buffer");
+		PRINT(KERN_ERR, "Failed to allocate %s", "split buffer");
 		free_dma_rcv_ctx(d);
 		return -ENOMEM;
 	}
@@ -2830,7 +2828,7 @@
 			memset(d->buf_cpu[i], 0, d->buf_size);
 		} else {
 			PRINT(KERN_ERR,
-			      "Failed to allocate dma buffer");
+			      "Failed to allocate %s", "DMA buffer");
 			free_dma_rcv_ctx(d);
 			return -ENOMEM;
 		}
@@ -2841,7 +2839,7 @@
                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
 		} else {
 			PRINT(KERN_ERR,
-			      "Failed to allocate dma prg");
+			      "Failed to allocate %s", "DMA prg");
 			free_dma_rcv_ctx(d);
 			return -ENOMEM;
 		}
@@ -2902,7 +2900,7 @@
 	d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
 
 	if (d->prg_cpu == NULL || d->prg_bus == NULL) {
-		PRINT(KERN_ERR, "Failed to allocate at dma prg");
+		PRINT(KERN_ERR, "Failed to allocate %s", "AT DMA prg");
 		free_dma_trm_ctx(d);
 		return -ENOMEM;
 	}
@@ -2925,7 +2923,7 @@
                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
 		} else {
 			PRINT(KERN_ERR,
-			      "Failed to allocate at dma prg");
+			      "Failed to allocate %s", "AT DMA prg");
 			free_dma_trm_ctx(d);
 			return -ENOMEM;
 		}
@@ -2986,22 +2984,9 @@
  * PCI Driver Interface functions  *
  ***********************************/
 
-#define FAIL(err, fmt, args...)			\
-do {						\
-	PRINT_G(KERN_ERR, fmt , ## args);	\
-        ohci1394_pci_remove(dev);               \
-	return err;				\
-} while (0)
-
-static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
-					const struct pci_device_id *ent)
-{
-	struct hpsb_host *host;
-	struct ti_ohci *ohci;	/* shortcut to currently handled device */
-	resource_size_t ohci_base;
-
 #ifdef CONFIG_PPC_PMAC
-	/* Necessary on some machines if ohci1394 was loaded/ unloaded before */
+static void ohci1394_pmac_on(struct pci_dev *dev)
+{
 	if (machine_is(powermac)) {
 		struct device_node *ofn = pci_device_to_OF_node(dev);
 
@@ -3010,15 +2995,45 @@
 			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
 		}
 	}
+}
+
+static void ohci1394_pmac_off(struct pci_dev *dev)
+{
+	if (machine_is(powermac)) {
+		struct device_node *ofn = pci_device_to_OF_node(dev);
+
+		if (ofn) {
+			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
+			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
+		}
+	}
+}
+#else
+#define ohci1394_pmac_on(dev)
+#define ohci1394_pmac_off(dev)
 #endif /* CONFIG_PPC_PMAC */
 
-        if (pci_enable_device(dev))
-		FAIL(-ENXIO, "Failed to enable OHCI hardware");
+static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
+					const struct pci_device_id *ent)
+{
+	struct hpsb_host *host;
+	struct ti_ohci *ohci;	/* shortcut to currently handled device */
+	resource_size_t ohci_base;
+	int err = -ENOMEM;
+
+	ohci1394_pmac_on(dev);
+	if (pci_enable_device(dev)) {
+		PRINT_G(KERN_ERR, "Failed to enable OHCI hardware");
+		err = -ENXIO;
+		goto err;
+	}
         pci_set_master(dev);
 
 	host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
-	if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
-
+	if (!host) {
+		PRINT_G(KERN_ERR, "Failed to allocate %s", "host structure");
+		goto err;
+	}
 	ohci = host->hostdata;
 	ohci->dev = dev;
 	ohci->host = host;
@@ -3067,15 +3082,20 @@
 		      (unsigned long long)pci_resource_len(dev, 0));
 
 	if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
-				OHCI1394_DRIVER_NAME))
-		FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
+				OHCI1394_DRIVER_NAME)) {
+		PRINT_G(KERN_ERR, "MMIO resource (0x%llx - 0x%llx) unavailable",
 			(unsigned long long)ohci_base,
 			(unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
+		goto err;
+	}
 	ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
 
 	ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
-	if (ohci->registers == NULL)
-		FAIL(-ENXIO, "Failed to remap registers - card not accessible");
+	if (ohci->registers == NULL) {
+		PRINT_G(KERN_ERR, "Failed to remap registers");
+		err = -ENXIO;
+		goto err;
+	}
 	ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
 	DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
 
@@ -3083,16 +3103,20 @@
 	ohci->csr_config_rom_cpu =
 		pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
 				     &ohci->csr_config_rom_bus);
-	if (ohci->csr_config_rom_cpu == NULL)
-		FAIL(-ENOMEM, "Failed to allocate buffer config rom");
+	if (ohci->csr_config_rom_cpu == NULL) {
+		PRINT_G(KERN_ERR, "Failed to allocate %s", "buffer config rom");
+		goto err;
+	}
 	ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
 
 	/* self-id dma buffer allocation */
 	ohci->selfid_buf_cpu =
 		pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
                       &ohci->selfid_buf_bus);
-	if (ohci->selfid_buf_cpu == NULL)
-		FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
+	if (ohci->selfid_buf_cpu == NULL) {
+		PRINT_G(KERN_ERR, "Failed to allocate %s", "self-ID buffer");
+		goto err;
+	}
 	ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
 
 	if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
@@ -3108,28 +3132,32 @@
 	if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
 			      DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
 			      AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
-			      OHCI1394_AsReqRcvContextBase) < 0)
-		FAIL(-ENOMEM, "Failed to allocate AR Req context");
-
+			      OHCI1394_AsReqRcvContextBase) < 0) {
+		PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Req context");
+		goto err;
+	}
 	/* AR DMA response context allocation */
 	if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
 			      DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
 			      AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
-			      OHCI1394_AsRspRcvContextBase) < 0)
-		FAIL(-ENOMEM, "Failed to allocate AR Resp context");
-
+			      OHCI1394_AsRspRcvContextBase) < 0) {
+		PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Resp context");
+		goto err;
+	}
 	/* AT DMA request context */
 	if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
 			      DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
-			      OHCI1394_AsReqTrContextBase) < 0)
-		FAIL(-ENOMEM, "Failed to allocate AT Req context");
-
+			      OHCI1394_AsReqTrContextBase) < 0) {
+		PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Req context");
+		goto err;
+	}
 	/* AT DMA response context */
 	if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
 			      DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
-			      OHCI1394_AsRspTrContextBase) < 0)
-		FAIL(-ENOMEM, "Failed to allocate AT Resp context");
-
+			      OHCI1394_AsRspTrContextBase) < 0) {
+		PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Resp context");
+		goto err;
+	}
 	/* Start off with a soft reset, to clear everything to a sane
 	 * state. */
 	ohci_soft_reset(ohci);
@@ -3172,9 +3200,10 @@
 	 * by that point.
 	 */
 	if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
-			 OHCI1394_DRIVER_NAME, ohci))
-		FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
-
+			 OHCI1394_DRIVER_NAME, ohci)) {
+		PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
+		goto err;
+	}
 	ohci->init_state = OHCI_INIT_HAVE_IRQ;
 	ohci_initialize(ohci);
 
@@ -3194,25 +3223,28 @@
 	host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
 
 	/* Tell the highlevel this host is ready */
-	if (hpsb_add_host(host))
-		FAIL(-ENOMEM, "Failed to register host with highlevel");
-
+	if (hpsb_add_host(host)) {
+		PRINT_G(KERN_ERR, "Failed to register host with highlevel");
+		goto err;
+	}
 	ohci->init_state = OHCI_INIT_DONE;
 
 	return 0;
-#undef FAIL
+err:
+	ohci1394_pci_remove(dev);
+	return err;
 }
 
-static void ohci1394_pci_remove(struct pci_dev *pdev)
+static void ohci1394_pci_remove(struct pci_dev *dev)
 {
 	struct ti_ohci *ohci;
-	struct device *dev;
+	struct device *device;
 
-	ohci = pci_get_drvdata(pdev);
+	ohci = pci_get_drvdata(dev);
 	if (!ohci)
-		return;
+		goto out;
 
-	dev = get_device(&ohci->host->device);
+	device = get_device(&ohci->host->device);
 
 	switch (ohci->init_state) {
 	case OHCI_INIT_DONE:
@@ -3246,7 +3278,7 @@
 		/* Soft reset before we start - this disables
 		 * interrupts and clears linkEnable and LPS. */
 		ohci_soft_reset(ohci);
-		free_irq(ohci->dev->irq, ohci);
+		free_irq(dev->irq, ohci);
 
 	case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
 		/* The ohci_soft_reset() stops all DMA contexts, so we
@@ -3257,12 +3289,12 @@
 		free_dma_trm_ctx(&ohci->at_resp_context);
 
 	case OHCI_INIT_HAVE_SELFID_BUFFER:
-		pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
+		pci_free_consistent(dev, OHCI1394_SI_DMA_BUF_SIZE,
 				    ohci->selfid_buf_cpu,
 				    ohci->selfid_buf_bus);
 
 	case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
-		pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
+		pci_free_consistent(dev, OHCI_CONFIG_ROM_LEN,
 				    ohci->csr_config_rom_cpu,
 				    ohci->csr_config_rom_bus);
 
@@ -3270,35 +3302,24 @@
 		iounmap(ohci->registers);
 
 	case OHCI_INIT_HAVE_MEM_REGION:
-		release_mem_region(pci_resource_start(ohci->dev, 0),
+		release_mem_region(pci_resource_start(dev, 0),
 				   OHCI1394_REGISTER_SIZE);
 
-#ifdef CONFIG_PPC_PMAC
-	/* On UniNorth, power down the cable and turn off the chip clock
-	 * to save power on laptops */
-	if (machine_is(powermac)) {
-		struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
-
-		if (ofn) {
-			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
-			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
-		}
-	}
-#endif /* CONFIG_PPC_PMAC */
-
 	case OHCI_INIT_ALLOC_HOST:
-		pci_set_drvdata(ohci->dev, NULL);
+		pci_set_drvdata(dev, NULL);
 	}
 
-	if (dev)
-		put_device(dev);
+	if (device)
+		put_device(device);
+out:
+	ohci1394_pmac_off(dev);
 }
 
 #ifdef CONFIG_PM
-static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
 {
 	int err;
-	struct ti_ohci *ohci = pci_get_drvdata(pdev);
+	struct ti_ohci *ohci = pci_get_drvdata(dev);
 
 	if (!ohci) {
 		printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
@@ -3326,32 +3347,23 @@
 	ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
 	ohci_soft_reset(ohci);
 
-	err = pci_save_state(pdev);
+	err = pci_save_state(dev);
 	if (err) {
 		PRINT(KERN_ERR, "pci_save_state failed with %d", err);
 		return err;
 	}
-	err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	err = pci_set_power_state(dev, pci_choose_state(dev, state));
 	if (err)
 		DBGMSG("pci_set_power_state failed with %d", err);
-
-/* PowerMac suspend code comes last */
-#ifdef CONFIG_PPC_PMAC
-	if (machine_is(powermac)) {
-		struct device_node *ofn = pci_device_to_OF_node(pdev);
-
-		if (ofn)
-			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
-	}
-#endif /* CONFIG_PPC_PMAC */
+	ohci1394_pmac_off(dev);
 
 	return 0;
 }
 
-static int ohci1394_pci_resume(struct pci_dev *pdev)
+static int ohci1394_pci_resume(struct pci_dev *dev)
 {
 	int err;
-	struct ti_ohci *ohci = pci_get_drvdata(pdev);
+	struct ti_ohci *ohci = pci_get_drvdata(dev);
 
 	if (!ohci) {
 		printk(KERN_ERR "%s: tried to resume nonexisting host\n",
@@ -3360,19 +3372,10 @@
 	}
 	DBGMSG("resume called");
 
-/* PowerMac resume code comes first */
-#ifdef CONFIG_PPC_PMAC
-	if (machine_is(powermac)) {
-		struct device_node *ofn = pci_device_to_OF_node(pdev);
-
-		if (ofn)
-			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
-	}
-#endif /* CONFIG_PPC_PMAC */
-
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	err = pci_enable_device(pdev);
+	ohci1394_pmac_on(dev);
+	pci_set_power_state(dev, PCI_D0);
+	pci_restore_state(dev);
+	err = pci_enable_device(dev);
 	if (err) {
 		PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
 		return err;
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 8af01ab..7aee1ac 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -226,7 +226,7 @@
         if (addr > 15) {
                 PRINT(KERN_ERR, lynx->id,
                       "%s: PHY register address %d out of range",
-		      __FUNCTION__, addr);
+		      __func__, addr);
                 return -1;
         }
 
@@ -238,7 +238,7 @@
 
                 if (i > 10000) {
                         PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
-			      __FUNCTION__);
+			      __func__);
                         retval = -1;
                         break;
                 }
@@ -261,13 +261,13 @@
 
         if (addr > 15) {
                 PRINT(KERN_ERR, lynx->id,
-                      "%s: PHY register address %d out of range", __FUNCTION__, addr);
+		      "%s: PHY register address %d out of range", __func__, addr);
                 return -1;
         }
 
         if (val > 0xff) {
                 PRINT(KERN_ERR, lynx->id,
-                      "%s: PHY register value %d out of range", __FUNCTION__, val);
+		      "%s: PHY register value %d out of range", __func__, val);
                 return -1;
         }
 
@@ -287,7 +287,7 @@
 
         if (page > 7) {
                 PRINT(KERN_ERR, lynx->id,
-                      "%s: PHY page %d out of range", __FUNCTION__, page);
+		      "%s: PHY page %d out of range", __func__, page);
                 return -1;
         }
 
@@ -309,7 +309,7 @@
 
         if (port > 15) {
                 PRINT(KERN_ERR, lynx->id,
-                      "%s: PHY port %d out of range", __FUNCTION__, port);
+		      "%s: PHY port %d out of range", __func__, port);
                 return -1;
         }
 
@@ -738,8 +738,7 @@
                 spin_lock_irqsave(&lynx->async.queue_lock, flags);
 
                 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
-		list_splice(&lynx->async.queue, &packet_list);
-		INIT_LIST_HEAD(&lynx->async.queue);
+		list_splice_init(&lynx->async.queue, &packet_list);
 
                 if (list_empty(&lynx->async.pcl_queue)) {
                         spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 37e7e10..04e96ba 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2959,7 +2959,6 @@
 
 static struct hpsb_protocol_driver raw1394_driver = {
 	.name = "raw1394",
-	.id_table = raw1394_id_table,
 };
 
 /******************************************************************************/
@@ -3004,7 +3003,6 @@
 
 	cdev_init(&raw1394_cdev, &raw1394_fops);
 	raw1394_cdev.owner = THIS_MODULE;
-	kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME);
 	ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
 	if (ret) {
 		HPSB_ERR("raw1394 failed to register minor device block");
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f53f72d..16b9d0a 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -615,7 +615,7 @@
 		cmd->Current_SCpnt = Current_SCpnt;
 		list_add_tail(&cmd->list, &lu->cmd_orb_inuse);
 	} else
-		SBP2_ERR("%s: no orbs available", __FUNCTION__);
+		SBP2_ERR("%s: no orbs available", __func__);
 	spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
 	return cmd;
 }
@@ -1294,7 +1294,7 @@
 
 	data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
 	if (hpsb_node_write(lu->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
-		SBP2_ERR("%s error", __FUNCTION__);
+		SBP2_ERR("%s error", __func__);
 	return 0;
 }
 
@@ -1985,11 +1985,8 @@
 	lu->sdev = sdev;
 	sdev->allow_restart = 1;
 
-	/*
-	 * Update the dma alignment (minimum alignment requirements for
-	 * start and end of DMA transfers) to be a sector
-	 */
-	blk_queue_update_dma_alignment(sdev->request_queue, 511);
+	/* SBP-2 requires quadlet alignment of the data buffers. */
+	blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
 
 	if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
 		sdev->inquiry_len = 36;
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index bd28adf..e03024e 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1315,8 +1315,7 @@
 MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
 
 static struct hpsb_protocol_driver video1394_driver = {
-	.name		= VIDEO1394_DRIVER_NAME,
-	.id_table	= video1394_id_table,
+	.name = VIDEO1394_DRIVER_NAME,
 };
 
 
@@ -1504,7 +1503,6 @@
 
 	cdev_init(&video1394_cdev, &video1394_fops);
 	video1394_cdev.owner = THIS_MODULE;
-	kobject_set_name(&video1394_cdev.kobj, VIDEO1394_DRIVER_NAME);
 	ret = cdev_add(&video1394_cdev, IEEE1394_VIDEO1394_DEV, 16);
 	if (ret) {
 		PRINT_G(KERN_ERR, "video1394: unable to get minor device block");
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index faa7ce3..a47fe64 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -467,6 +467,31 @@
 	return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
 }
 
+/*
+ * Trivial helpers to strip endian annotation and compare; the
+ * endianness doesn't actually matter since we just need a stable
+ * order for the RB tree.
+ */
+static int be32_lt(__be32 a, __be32 b)
+{
+	return (__force u32) a < (__force u32) b;
+}
+
+static int be32_gt(__be32 a, __be32 b)
+{
+	return (__force u32) a > (__force u32) b;
+}
+
+static int be64_lt(__be64 a, __be64 b)
+{
+	return (__force u64) a < (__force u64) b;
+}
+
+static int be64_gt(__be64 a, __be64 b)
+{
+	return (__force u64) a > (__force u64) b;
+}
+
 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
 {
 	struct rb_node **link = &cm.listen_service_table.rb_node;
@@ -492,9 +517,9 @@
 			link = &(*link)->rb_left;
 		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
 			link = &(*link)->rb_right;
-		else if (service_id < cur_cm_id_priv->id.service_id)
+		else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
 			link = &(*link)->rb_left;
-		else if (service_id > cur_cm_id_priv->id.service_id)
+		else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
 			link = &(*link)->rb_right;
 		else if (data_cmp < 0)
 			link = &(*link)->rb_left;
@@ -527,9 +552,9 @@
 			node = node->rb_left;
 		else if (device > cm_id_priv->id.device)
 			node = node->rb_right;
-		else if (service_id < cm_id_priv->id.service_id)
+		else if (be64_lt(service_id, cm_id_priv->id.service_id))
 			node = node->rb_left;
-		else if (service_id > cm_id_priv->id.service_id)
+		else if (be64_gt(service_id, cm_id_priv->id.service_id))
 			node = node->rb_right;
 		else if (data_cmp < 0)
 			node = node->rb_left;
@@ -552,13 +577,13 @@
 		parent = *link;
 		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
 					     remote_id_node);
-		if (remote_id < cur_timewait_info->work.remote_id)
+		if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
 			link = &(*link)->rb_left;
-		else if (remote_id > cur_timewait_info->work.remote_id)
+		else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
 			link = &(*link)->rb_right;
-		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
+		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 			link = &(*link)->rb_left;
-		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
+		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 			link = &(*link)->rb_right;
 		else
 			return cur_timewait_info;
@@ -578,13 +603,13 @@
 	while (node) {
 		timewait_info = rb_entry(node, struct cm_timewait_info,
 					 remote_id_node);
-		if (remote_id < timewait_info->work.remote_id)
+		if (be32_lt(remote_id, timewait_info->work.remote_id))
 			node = node->rb_left;
-		else if (remote_id > timewait_info->work.remote_id)
+		else if (be32_gt(remote_id, timewait_info->work.remote_id))
 			node = node->rb_right;
-		else if (remote_ca_guid < timewait_info->remote_ca_guid)
+		else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
 			node = node->rb_left;
-		else if (remote_ca_guid > timewait_info->remote_ca_guid)
+		else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
 			node = node->rb_right;
 		else
 			return timewait_info;
@@ -605,13 +630,13 @@
 		parent = *link;
 		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
 					     remote_qp_node);
-		if (remote_qpn < cur_timewait_info->remote_qpn)
+		if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
 			link = &(*link)->rb_left;
-		else if (remote_qpn > cur_timewait_info->remote_qpn)
+		else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
 			link = &(*link)->rb_right;
-		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
+		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 			link = &(*link)->rb_left;
-		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
+		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 			link = &(*link)->rb_right;
 		else
 			return cur_timewait_info;
@@ -635,9 +660,9 @@
 		parent = *link;
 		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
 					  sidr_id_node);
-		if (remote_id < cur_cm_id_priv->id.remote_id)
+		if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
 			link = &(*link)->rb_left;
-		else if (remote_id > cur_cm_id_priv->id.remote_id)
+		else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
 			link = &(*link)->rb_right;
 		else {
 			int cmp;
@@ -2848,7 +2873,7 @@
 	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
 	sidr_req_msg->request_id = cm_id_priv->id.local_id;
-	sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
+	sidr_req_msg->pkey = param->path->pkey;
 	sidr_req_msg->service_id = param->service_id;
 
 	if (param->private_data && param->private_data_len)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d81c156..671f137 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1289,7 +1289,7 @@
 	new_cm_id = rdma_create_id(listen_id->id.event_handler,
 				   listen_id->id.context,
 				   RDMA_PS_TCP);
-	if (!new_cm_id) {
+	if (IS_ERR(new_cm_id)) {
 		ret = -ENOMEM;
 		goto out;
 	}
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 06d502c..1286dc1 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -158,8 +158,7 @@
 #endif
 	}
 
-	list_splice(&pool->dirty_list, &unmap_list);
-	INIT_LIST_HEAD(&pool->dirty_list);
+	list_splice_init(&pool->dirty_list, &unmap_list);
 	pool->dirty_len = 0;
 
 	spin_unlock_irq(&pool->pool_lock);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 15937eb..ca4cf3a 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -614,7 +614,7 @@
 	if (!ctx->cm_id->device)
 		goto out;
 
-	resp.node_guid = ctx->cm_id->device->node_guid;
+	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
 	resp.port_num = ctx->cm_id->port_num;
 	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
 	case RDMA_TRANSPORT_IB:
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index c75eb6c..2cad8b4 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -81,13 +81,13 @@
 
 struct ib_uverbs_event_file {
 	struct kref				ref;
-	struct file			       *file;
 	struct ib_uverbs_file		       *uverbs_file;
 	spinlock_t				lock;
-	int					is_async;
 	wait_queue_head_t			poll_wait;
 	struct fasync_struct		       *async_queue;
 	struct list_head			event_list;
+	int					is_async;
+	int					is_closed;
 };
 
 struct ib_uverbs_file {
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 495c803..2c3bff5 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1065,6 +1065,7 @@
 	attr.srq           = srq;
 	attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
 	attr.qp_type       = cmd.qp_type;
+	attr.create_flags  = 0;
 
 	attr.cap.max_send_wr     = cmd.max_send_wr;
 	attr.cap.max_recv_wr     = cmd.max_recv_wr;
@@ -1462,7 +1463,6 @@
 		next->num_sge    = user_wr->num_sge;
 		next->opcode     = user_wr->opcode;
 		next->send_flags = user_wr->send_flags;
-		next->imm_data   = (__be32 __force) user_wr->imm_data;
 
 		if (is_ud) {
 			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
@@ -1475,14 +1475,24 @@
 			next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
 		} else {
 			switch (next->opcode) {
-			case IB_WR_RDMA_WRITE:
 			case IB_WR_RDMA_WRITE_WITH_IMM:
+				next->ex.imm_data =
+					(__be32 __force) user_wr->ex.imm_data;
+			case IB_WR_RDMA_WRITE:
 			case IB_WR_RDMA_READ:
 				next->wr.rdma.remote_addr =
 					user_wr->wr.rdma.remote_addr;
 				next->wr.rdma.rkey        =
 					user_wr->wr.rdma.rkey;
 				break;
+			case IB_WR_SEND_WITH_IMM:
+				next->ex.imm_data =
+					(__be32 __force) user_wr->ex.imm_data;
+				break;
+			case IB_WR_SEND_WITH_INV:
+				next->ex.invalidate_rkey =
+					user_wr->ex.invalidate_rkey;
+				break;
 			case IB_WR_ATOMIC_CMP_AND_SWP:
 			case IB_WR_ATOMIC_FETCH_AND_ADD:
 				next->wr.atomic.remote_addr =
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 7c2ac39..f49f946 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -352,7 +352,7 @@
 	struct ib_uverbs_event *entry, *tmp;
 
 	spin_lock_irq(&file->lock);
-	file->file = NULL;
+	file->is_closed = 1;
 	list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
 		if (entry->counter)
 			list_del(&entry->obj_list);
@@ -390,7 +390,7 @@
 		return;
 
 	spin_lock_irqsave(&file->lock, flags);
-	if (!file->file) {
+	if (file->is_closed) {
 		spin_unlock_irqrestore(&file->lock, flags);
 		return;
 	}
@@ -423,7 +423,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&file->async_file->lock, flags);
-	if (!file->async_file->file) {
+	if (!file->async_file->is_closed) {
 		spin_unlock_irqrestore(&file->async_file->lock, flags);
 		return;
 	}
@@ -509,6 +509,7 @@
 	ev_file->uverbs_file = uverbs_file;
 	ev_file->async_queue = NULL;
 	ev_file->is_async    = is_async;
+	ev_file->is_closed   = 0;
 
 	*fd = get_unused_fd();
 	if (*fd < 0) {
@@ -516,25 +517,18 @@
 		goto err;
 	}
 
-	filp = get_empty_filp();
-	if (!filp) {
-		ret = -ENFILE;
-		goto err_fd;
-	}
-
-	ev_file->file      = filp;
-
 	/*
 	 * fops_get() can't fail here, because we're coming from a
 	 * system call on a uverbs file, which will already have a
 	 * module reference.
 	 */
-	filp->f_op 	   = fops_get(&uverbs_event_fops);
-	filp->f_path.mnt 	   = mntget(uverbs_event_mnt);
-	filp->f_path.dentry 	   = dget(uverbs_event_mnt->mnt_root);
-	filp->f_mapping    = filp->f_path.dentry->d_inode->i_mapping;
-	filp->f_flags      = O_RDONLY;
-	filp->f_mode       = FMODE_READ;
+	filp = alloc_file(uverbs_event_mnt, dget(uverbs_event_mnt->mnt_root),
+			  FMODE_READ, fops_get(&uverbs_event_fops));
+	if (!filp) {
+		ret = -ENFILE;
+		goto err_fd;
+	}
+
 	filp->private_data = ev_file;
 
 	return filp;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 86ed8af..0504208 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -248,7 +248,9 @@
 		  struct ib_srq_attr *srq_attr,
 		  enum ib_srq_attr_mask srq_attr_mask)
 {
-	return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL);
+	return srq->device->modify_srq ?
+		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
+		-ENOSYS;
 }
 EXPORT_SYMBOL(ib_modify_srq);
 
@@ -628,6 +630,13 @@
 }
 EXPORT_SYMBOL(ib_create_cq);
 
+int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+	return cq->device->modify_cq ?
+		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
+}
+EXPORT_SYMBOL(ib_modify_cq);
+
 int ib_destroy_cq(struct ib_cq *cq)
 {
 	if (atomic_read(&cq->usecnt))
@@ -672,6 +681,9 @@
 {
 	struct ib_mr *mr;
 
+	if (!pd->device->reg_phys_mr)
+		return ERR_PTR(-ENOSYS);
+
 	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
 				     mr_access_flags, iova_start);
 
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index f283a9f..113f3c0 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -130,10 +130,10 @@
 		tx_desc->status = 0;
 
 		/* Set TXP_HTXD_UNINIT */
-		__raw_writeq(cpu_to_be64(0x1122334455667788ULL),
+		__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
 			     (void __iomem *) txp_desc + C2_TXP_ADDR);
 		__raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
-		__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
+		__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
 			     (void __iomem *) txp_desc + C2_TXP_FLAGS);
 
 		elem->skb = NULL;
@@ -179,13 +179,13 @@
 		rx_desc->status = 0;
 
 		/* Set RXP_HRXD_UNINIT */
-		__raw_writew(cpu_to_be16(RXP_HRXD_OK),
+		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
 		       (void __iomem *) rxp_desc + C2_RXP_STATUS);
 		__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
 		__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
-		__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
+		__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
 			     (void __iomem *) rxp_desc + C2_RXP_ADDR);
-		__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
+		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
 			     (void __iomem *) rxp_desc + C2_RXP_FLAGS);
 
 		elem->skb = NULL;
@@ -239,10 +239,11 @@
 	rxp_hdr->flags = RXP_HRXD_READY;
 
 	__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-	__raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
+	__raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
 		     elem->hw_desc + C2_RXP_LEN);
-	__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
-	__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
+	__raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
+	__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
+		     elem->hw_desc + C2_RXP_FLAGS);
 
 	elem->skb = skb;
 	elem->mapaddr = mapaddr;
@@ -290,9 +291,9 @@
 		__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
 		__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
 		__raw_writew(0, elem->hw_desc + C2_RXP_LEN);
-		__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
+		__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
 			     elem->hw_desc + C2_RXP_ADDR);
-		__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
+		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
 			     elem->hw_desc + C2_RXP_FLAGS);
 
 		if (elem->skb) {
@@ -346,16 +347,16 @@
 					     elem->hw_desc + C2_TXP_LEN);
 				__raw_writeq(0,
 					     elem->hw_desc + C2_TXP_ADDR);
-				__raw_writew(cpu_to_be16(TXP_HTXD_DONE),
+				__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
 					     elem->hw_desc + C2_TXP_FLAGS);
 				c2_port->netstats.tx_dropped++;
 				break;
 			} else {
 				__raw_writew(0,
 					     elem->hw_desc + C2_TXP_LEN);
-				__raw_writeq(cpu_to_be64(0x1122334455667788ULL),
+				__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
 					     elem->hw_desc + C2_TXP_ADDR);
-				__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
+				__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
 					     elem->hw_desc + C2_TXP_FLAGS);
 			}
 
@@ -390,7 +391,7 @@
 	for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
 	     elem = elem->next) {
 		txp_htxd.flags =
-		    be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS));
+		    be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
 
 		if (txp_htxd.flags != TXP_HTXD_DONE)
 			break;
@@ -398,7 +399,7 @@
 		if (netif_msg_tx_done(c2_port)) {
 			/* PCI reads are expensive in fast path */
 			txp_htxd.len =
-			    be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN));
+			    be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
 			pr_debug("%s: tx done slot %3Zu status 0x%x len "
 				"%5u bytes\n",
 				netdev->name, elem - tx_ring->start,
@@ -448,10 +449,12 @@
 	/* Write the descriptor to the adapter's rx ring */
 	__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
 	__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
-	__raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
+	__raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
 		     elem->hw_desc + C2_RXP_LEN);
-	__raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR);
-	__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
+	__raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
+		     elem->hw_desc + C2_RXP_ADDR);
+	__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
+		     elem->hw_desc + C2_RXP_FLAGS);
 
 	pr_debug("packet dropped\n");
 	c2_port->netstats.rx_dropped++;
@@ -653,7 +656,7 @@
 	     i++, elem++) {
 		rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
 		rxp_hdr->flags = 0;
-		__raw_writew(cpu_to_be16(RXP_HRXD_READY),
+		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
 			     elem->hw_desc + C2_RXP_FLAGS);
 	}
 
@@ -787,9 +790,12 @@
 	elem->maplen = maplen;
 
 	/* Tell HW to xmit */
-	__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR);
-	__raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN);
-	__raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS);
+	__raw_writeq((__force u64) cpu_to_be64(mapaddr),
+		     elem->hw_desc + C2_TXP_ADDR);
+	__raw_writew((__force u16) cpu_to_be16(maplen),
+		     elem->hw_desc + C2_TXP_LEN);
+	__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
+		     elem->hw_desc + C2_TXP_FLAGS);
 
 	c2_port->netstats.tx_packets++;
 	c2_port->netstats.tx_bytes += maplen;
@@ -810,11 +816,11 @@
 			elem->maplen = maplen;
 
 			/* Tell HW to xmit */
-			__raw_writeq(cpu_to_be64(mapaddr),
+			__raw_writeq((__force u64) cpu_to_be64(mapaddr),
 				     elem->hw_desc + C2_TXP_ADDR);
-			__raw_writew(cpu_to_be16(maplen),
+			__raw_writew((__force u16) cpu_to_be16(maplen),
 				     elem->hw_desc + C2_TXP_LEN);
-			__raw_writew(cpu_to_be16(TXP_HTXD_READY),
+			__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
 				     elem->hw_desc + C2_TXP_FLAGS);
 
 			c2_port->netstats.tx_packets++;
@@ -1005,7 +1011,7 @@
 	/* Remap the adapter PCI registers in BAR4 */
 	mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
 				    sizeof(struct c2_adapter_pci_regs));
-	if (mmio_regs == 0UL) {
+	if (!mmio_regs) {
 		printk(KERN_ERR PFX
 			"Unable to remap adapter PCI registers in BAR4\n");
 		ret = -EIO;
@@ -1029,10 +1035,10 @@
 	}
 
 	/* Validate the adapter version */
-	if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
+	if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
 		printk(KERN_ERR PFX "Version mismatch "
 			"[fw=%u, c2=%u], Adapter not claimed\n",
-			be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)),
+			be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
 			C2_VERSION);
 		ret = -EINVAL;
 		iounmap(mmio_regs);
@@ -1040,12 +1046,12 @@
 	}
 
 	/* Validate the adapter IVN */
-	if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
+	if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
 		printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
 		       "the OpenIB device support kit. "
 		       "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
-			be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)),
-			C2_IVN);
+		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
+		       C2_IVN);
 		ret = -EINVAL;
 		iounmap(mmio_regs);
 		goto bail2;
@@ -1068,7 +1074,7 @@
 
 	/* Get the last RX index */
 	c2dev->cur_rx =
-	    (be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) -
+	    (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
 	     0xffffc000) / sizeof(struct c2_rxp_desc);
 
 	/* Request an interrupt line for the driver */
@@ -1090,7 +1096,7 @@
 	}
 
 	/* Save off the actual size prior to unmapping mmio_regs */
-	kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE));
+	kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
 
 	/* Unmap the adapter PCI registers in BAR4 */
 	iounmap(mmio_regs);
@@ -1109,7 +1115,7 @@
 	/* Remap the adapter HRXDQ PA space to kernel VA space */
 	c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
 					       C2_RXP_HRXDQ_SIZE);
-	if (c2dev->mmio_rxp_ring == 0UL) {
+	if (!c2dev->mmio_rxp_ring) {
 		printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
 		ret = -EIO;
 		goto bail6;
@@ -1118,7 +1124,7 @@
 	/* Remap the adapter HTXDQ PA space to kernel VA space */
 	c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
 					       C2_TXP_HTXDQ_SIZE);
-	if (c2dev->mmio_txp_ring == 0UL) {
+	if (!c2dev->mmio_txp_ring) {
 		printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
 		ret = -EIO;
 		goto bail7;
@@ -1129,7 +1135,7 @@
 
 	/* Remap the PCI registers in adapter BAR0 to kernel VA space */
 	c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
-	if (c2dev->regs == 0UL) {
+	if (!c2dev->regs) {
 		printk(KERN_ERR PFX "Unable to remap BAR0\n");
 		ret = -EIO;
 		goto bail8;
@@ -1139,7 +1145,7 @@
 	c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
 	c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
 				     kva_map_size);
-	if (c2dev->kva == 0UL) {
+	if (!c2dev->kva) {
 		printk(KERN_ERR PFX "Unable to remap BAR4\n");
 		ret = -EIO;
 		goto bail9;
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index fa58200..ed38ab8 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -346,7 +346,7 @@
 	//	spinlock_t aeq_lock;
 	//	spinlock_t rnic_lock;
 
-	u16 *hint_count;
+	__be16 *hint_count;
 	dma_addr_t hint_count_dma;
 	u16 hints_read;
 
@@ -425,10 +425,10 @@
 #endif
 
 #define C2_SET_CUR_RX(c2dev, cur_rx) \
-	__raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
+	__raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
 
 #define C2_GET_CUR_RX(c2dev) \
-	be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092))
+	be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
 
 static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
 {
@@ -485,8 +485,8 @@
 extern int c2_rnic_init(struct c2_dev *c2dev);
 extern void c2_rnic_term(struct c2_dev *c2dev);
 extern void c2_rnic_interrupt(struct c2_dev *c2dev);
-extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
-extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
+extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
+extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
 
 /* QPs */
 extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
@@ -545,7 +545,7 @@
 extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
 			     struct sp_chunk **root);
 extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
-extern u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
-			  dma_addr_t *dma_addr, gfp_t gfp_mask);
-extern void c2_free_mqsp(u16 * mqsp);
+extern __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
+			     dma_addr_t *dma_addr, gfp_t gfp_mask);
+extern void c2_free_mqsp(__be16* mqsp);
 #endif
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index a31439b..62af742 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -61,7 +61,7 @@
 	default:
 		printk(KERN_ERR PFX
 		       "%s - Unable to convert CM status: %d\n",
-		       __FUNCTION__, c2_status);
+		       __func__, c2_status);
 		return -EIO;
 	}
 }
@@ -193,9 +193,9 @@
 		pr_debug("%s: event = %s, user_context=%llx, "
 			"resource_type=%x, "
 			"resource=%x, qp_state=%s\n",
-			__FUNCTION__,
+			__func__,
 			to_event_str(event_id),
-			(unsigned long long) be64_to_cpu(wr->ae.ae_generic.user_context),
+			(unsigned long long) wr->ae.ae_generic.user_context,
 			be32_to_cpu(wr->ae.ae_generic.resource_type),
 			be32_to_cpu(wr->ae.ae_generic.resource),
 			to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
@@ -259,7 +259,7 @@
 			BUG_ON(1);
 			pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
 				"CM_ID=%p\n",
-				__FUNCTION__, __LINE__,
+				__func__, __LINE__,
 				event_id, qp, cm_id);
 			break;
 		}
@@ -276,7 +276,7 @@
 		pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
 		if (event_id != CCAE_CONNECTION_REQUEST) {
 			pr_debug("%s: Invalid event_id: %d\n",
-				__FUNCTION__, event_id);
+				__func__, event_id);
 			break;
 		}
 		cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index 0315f99..e911016 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -87,8 +87,8 @@
 	}
 }
 
-u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
-		   dma_addr_t *dma_addr, gfp_t gfp_mask)
+__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
+		      dma_addr_t *dma_addr, gfp_t gfp_mask)
 {
 	u16 mqsp;
 
@@ -113,14 +113,14 @@
 		*dma_addr = head->dma_addr +
 			    ((unsigned long) &(head->shared_ptr[mqsp]) -
 			     (unsigned long) head);
-		pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__,
+		pr_debug("%s addr %p dma_addr %llx\n", __func__,
 			 &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
-		return &(head->shared_ptr[mqsp]);
+		return (__force __be16 *) &(head->shared_ptr[mqsp]);
 	}
 	return NULL;
 }
 
-void c2_free_mqsp(u16 * mqsp)
+void c2_free_mqsp(__be16 *mqsp)
 {
 	struct sp_chunk *head;
 	u16 idx;
@@ -129,7 +129,7 @@
 	head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
 
 	/* Link head to new mqsp */
-	*mqsp = head->head;
+	*mqsp = (__force __be16) head->head;
 
 	/* Compute the shared_ptr index */
 	idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1;
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index d2b3366..bb17cce 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -422,8 +422,8 @@
 		goto bail1;
 
 	reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
-
-	vq_repbuf_free(c2dev, reply);
+	if (reply)
+		vq_repbuf_free(c2dev, reply);
       bail1:
 	vq_req_free(c2dev, vq_req);
       bail0:
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
index 0d0bc33..3b50954 100644
--- a/drivers/infiniband/hw/amso1100/c2_intr.c
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -174,7 +174,11 @@
 		return;
 	}
 
-	err = c2_errno(reply_msg);
+	if (reply_msg)
+		err = c2_errno(reply_msg);
+	else
+		err = -ENOMEM;
+
 	if (!err) switch (req->event) {
 	case IW_CM_EVENT_ESTABLISHED:
 		c2_set_qp_state(req->qp,
diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/infiniband/hw/amso1100/c2_mm.c
index 1e4f464..b506fe2 100644
--- a/drivers/infiniband/hw/amso1100/c2_mm.c
+++ b/drivers/infiniband/hw/amso1100/c2_mm.c
@@ -45,7 +45,7 @@
  *	  Reply buffer _is_ freed by this function.
  */
 static int
-send_pbl_messages(struct c2_dev *c2dev, u32 stag_index,
+send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
 		  unsigned long va, u32 pbl_depth,
 		  struct c2_vq_req *vq_req, int pbl_type)
 {
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.c b/drivers/infiniband/hw/amso1100/c2_mq.c
index b88a755..0cddc49 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.c
+++ b/drivers/infiniband/hw/amso1100/c2_mq.c
@@ -64,7 +64,7 @@
 		q->priv = (q->priv + 1) % q->q_size;
 		q->hint_count++;
 		/* Update peer's offset. */
-		__raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
+		__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
 	}
 }
 
@@ -105,7 +105,7 @@
 #endif
 		q->priv = (q->priv + 1) % q->q_size;
 		/* Update peer's offset. */
-		__raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
+		__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
 	}
 }
 
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index 9185bbb..acede00 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -75,7 +75,7 @@
 	u16 hint_count;
 	u16 priv;
 	struct c2_mq_shared __iomem *peer;
-	u16 *shared;
+	__be16 *shared;
 	dma_addr_t shared_dma;
 	u32 q_size;
 	u32 msg_size;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 7a6cece..e10d27a 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -67,7 +67,7 @@
 {
 	struct c2_dev *c2dev = to_c2dev(ibdev);
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	*props = c2dev->props;
 	return 0;
@@ -76,7 +76,7 @@
 static int c2_query_port(struct ib_device *ibdev,
 			 u8 port, struct ib_port_attr *props)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	props->max_mtu = IB_MTU_4096;
 	props->lid = 0;
@@ -102,14 +102,14 @@
 			  u8 port, int port_modify_mask,
 			  struct ib_port_modify *props)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return 0;
 }
 
 static int c2_query_pkey(struct ib_device *ibdev,
 			 u8 port, u16 index, u16 * pkey)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	*pkey = 0;
 	return 0;
 }
@@ -119,7 +119,7 @@
 {
 	struct c2_dev *c2dev = to_c2dev(ibdev);
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
 	memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
 
@@ -134,7 +134,7 @@
 {
 	struct c2_ucontext *context;
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	context = kmalloc(sizeof(*context), GFP_KERNEL);
 	if (!context)
 		return ERR_PTR(-ENOMEM);
@@ -144,14 +144,14 @@
 
 static int c2_dealloc_ucontext(struct ib_ucontext *context)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	kfree(context);
 	return 0;
 }
 
 static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return -ENOSYS;
 }
 
@@ -162,7 +162,7 @@
 	struct c2_pd *pd;
 	int err;
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	pd = kmalloc(sizeof(*pd), GFP_KERNEL);
 	if (!pd)
@@ -187,7 +187,7 @@
 
 static int c2_dealloc_pd(struct ib_pd *pd)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
 	kfree(pd);
 
@@ -196,13 +196,13 @@
 
 static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return ERR_PTR(-ENOSYS);
 }
 
 static int c2_ah_destroy(struct ib_ah *ah)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return -ENOSYS;
 }
 
@@ -230,7 +230,7 @@
 
 	qp = c2_find_qpn(c2dev, qpn);
 	pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
-		__FUNCTION__, qp, qpn, device,
+		__func__, qp, qpn, device,
 		(qp?atomic_read(&qp->refcount):0));
 
 	return (qp?&qp->ibqp:NULL);
@@ -243,13 +243,16 @@
 	struct c2_qp *qp;
 	int err;
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
+
+	if (init_attr->create_flags)
+		return ERR_PTR(-EINVAL);
 
 	switch (init_attr->qp_type) {
 	case IB_QPT_RC:
 		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
 		if (!qp) {
-			pr_debug("%s: Unable to allocate QP\n", __FUNCTION__);
+			pr_debug("%s: Unable to allocate QP\n", __func__);
 			return ERR_PTR(-ENOMEM);
 		}
 		spin_lock_init(&qp->lock);
@@ -266,7 +269,7 @@
 
 		break;
 	default:
-		pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__,
+		pr_debug("%s: Invalid QP type: %d\n", __func__,
 			init_attr->qp_type);
 		return ERR_PTR(-EINVAL);
 		break;
@@ -285,7 +288,7 @@
 	struct c2_qp *qp = to_c2qp(ib_qp);
 
 	pr_debug("%s:%u qp=%p,qp->state=%d\n",
-		__FUNCTION__, __LINE__,ib_qp,qp->state);
+		__func__, __LINE__, ib_qp, qp->state);
 	c2_free_qp(to_c2dev(ib_qp->device), qp);
 	kfree(qp);
 	return 0;
@@ -300,13 +303,13 @@
 
 	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
 	if (!cq) {
-		pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__);
+		pr_debug("%s: Unable to allocate CQ\n", __func__);
 		return ERR_PTR(-ENOMEM);
 	}
 
 	err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
 	if (err) {
-		pr_debug("%s: error initializing CQ\n", __FUNCTION__);
+		pr_debug("%s: error initializing CQ\n", __func__);
 		kfree(cq);
 		return ERR_PTR(err);
 	}
@@ -318,7 +321,7 @@
 {
 	struct c2_cq *cq = to_c2cq(ib_cq);
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	c2_free_cq(to_c2dev(ib_cq->device), cq);
 	kfree(cq);
@@ -400,7 +403,7 @@
 	mr->umem = NULL;
 	pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
 		"*iova_start %llx, first pa %llx, last pa %llx\n",
-		__FUNCTION__, page_shift, pbl_depth, total_len,
+		__func__, page_shift, pbl_depth, total_len,
 		(unsigned long long) *iova_start,
 	       	(unsigned long long) page_list[0],
 	       	(unsigned long long) page_list[pbl_depth-1]);
@@ -422,7 +425,7 @@
 	struct ib_phys_buf bl;
 	u64 kva = 0;
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	/* AMSO1100 limit */
 	bl.size = 0xffffffff;
@@ -442,7 +445,7 @@
 	struct c2_pd *c2pd = to_c2pd(pd);
 	struct c2_mr *c2mr;
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
 	if (!c2mr)
@@ -506,7 +509,7 @@
 	struct c2_mr *mr = to_c2mr(ib_mr);
 	int err;
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
 	if (err)
@@ -523,14 +526,14 @@
 static ssize_t show_rev(struct class_device *cdev, char *buf)
 {
 	struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return sprintf(buf, "%x\n", dev->props.hw_ver);
 }
 
 static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
 {
 	struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return sprintf(buf, "%x.%x.%x\n",
 		       (int) (dev->props.fw_ver >> 32),
 		       (int) (dev->props.fw_ver >> 16) & 0xffff,
@@ -539,13 +542,13 @@
 
 static ssize_t show_hca(struct class_device *cdev, char *buf)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return sprintf(buf, "AMSO1100\n");
 }
 
 static ssize_t show_board(struct class_device *cdev, char *buf)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
 }
 
@@ -575,13 +578,13 @@
 
 static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return -ENOSYS;
 }
 
 static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return -ENOSYS;
 }
 
@@ -592,13 +595,13 @@
 			  struct ib_grh *in_grh,
 			  struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	return -ENOSYS;
 }
 
 static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	/* Request a connection */
 	return c2_llp_connect(cm_id, iw_param);
@@ -606,7 +609,7 @@
 
 static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	/* Accept the new connection */
 	return c2_llp_accept(cm_id, iw_param);
@@ -616,7 +619,7 @@
 {
 	int err;
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	err = c2_llp_reject(cm_id, pdata, pdata_len);
 	return err;
@@ -626,10 +629,10 @@
 {
 	int err;
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	err = c2_llp_service_create(cm_id, backlog);
 	pr_debug("%s:%u err=%d\n",
-		__FUNCTION__, __LINE__,
+		__func__, __LINE__,
 		err);
 	return err;
 }
@@ -637,7 +640,7 @@
 static int c2_service_destroy(struct iw_cm_id *cm_id)
 {
 	int err;
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 
 	err = c2_llp_service_destroy(cm_id);
 
@@ -743,7 +746,7 @@
 	netdev = alloc_netdev(sizeof(*netdev), name, setup);
 	if (!netdev) {
 		printk(KERN_ERR PFX "%s -  etherdev alloc failed",
-			__FUNCTION__);
+			__func__);
 		return NULL;
 	}
 
@@ -780,7 +783,7 @@
 	if (ret)
 		goto out2;
 
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
 	dev->ibdev.owner = THIS_MODULE;
 	dev->ibdev.uverbs_cmd_mask =
@@ -873,13 +876,13 @@
 out2:
 	free_netdev(dev->pseudo_netdev);
 out3:
-	pr_debug("%s:%u ret=%d\n", __FUNCTION__, __LINE__, ret);
+	pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
 	return ret;
 }
 
 void c2_unregister_device(struct c2_dev *dev)
 {
-	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+	pr_debug("%s:%u\n", __func__, __LINE__);
 	unregister_netdev(dev->pseudo_netdev);
 	free_netdev(dev->pseudo_netdev);
 	ib_unregister_device(&dev->ibdev);
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 01d0786..a6d8944 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -121,7 +121,7 @@
 	int new_state = to_ib_state(c2_state);
 
 	pr_debug("%s: qp[%p] state modify %s --> %s\n",
-	       __FUNCTION__,
+	       __func__,
 		qp,
 		to_ib_state_str(qp->state),
 		to_ib_state_str(new_state));
@@ -141,7 +141,7 @@
 	int err;
 
 	pr_debug("%s:%d qp=%p, %s --> %s\n",
-		__FUNCTION__, __LINE__,
+		__func__, __LINE__,
 		qp,
 		to_ib_state_str(qp->state),
 		to_ib_state_str(attr->qp_state));
@@ -224,7 +224,7 @@
 		qp->state = next_state;
 #ifdef DEBUG
 	else
-		pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err);
+		pr_debug("%s: c2_errno=%d\n", __func__, err);
 #endif
 	/*
 	 * If we're going to error and generating the event here, then
@@ -243,7 +243,7 @@
 	vq_req_free(c2dev, vq_req);
 
 	pr_debug("%s:%d qp=%p, cur_state=%s\n",
-		__FUNCTION__, __LINE__,
+		__func__, __LINE__,
 		qp,
 		to_ib_state_str(qp->state));
 	return err;
@@ -811,16 +811,24 @@
 
 		switch (ib_wr->opcode) {
 		case IB_WR_SEND:
-			if (ib_wr->send_flags & IB_SEND_SOLICITED) {
-				c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
-				msg_size = sizeof(struct c2wr_send_req);
+		case IB_WR_SEND_WITH_INV:
+			if (ib_wr->opcode == IB_WR_SEND) {
+				if (ib_wr->send_flags & IB_SEND_SOLICITED)
+					c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
+				else
+					c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
+				wr.sqwr.send.remote_stag = 0;
 			} else {
-				c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
-				msg_size = sizeof(struct c2wr_send_req);
+				if (ib_wr->send_flags & IB_SEND_SOLICITED)
+					c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
+				else
+					c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
+				wr.sqwr.send.remote_stag =
+					cpu_to_be32(ib_wr->ex.invalidate_rkey);
 			}
 
-			wr.sqwr.send.remote_stag = 0;
-			msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge;
+			msg_size = sizeof(struct c2wr_send_req) +
+				sizeof(struct c2_data_addr) * ib_wr->num_sge;
 			if (ib_wr->num_sge > qp->send_sgl_depth) {
 				err = -EINVAL;
 				break;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 1687c51..9a054c6 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -208,7 +208,7 @@
 /*
  * Add an IP address to the RNIC interface
  */
-int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
+int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
 {
 	struct c2_vq_req *vq_req;
 	struct c2wr_rnic_setconfig_req *wr;
@@ -270,7 +270,7 @@
 /*
  * Delete an IP address from the RNIC interface
  */
-int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
+int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
 {
 	struct c2_vq_req *vq_req;
 	struct c2wr_rnic_setconfig_req *wr;
@@ -455,7 +455,8 @@
 	     IB_DEVICE_CURR_QP_STATE_MOD |
 	     IB_DEVICE_SYS_IMAGE_GUID |
 	     IB_DEVICE_ZERO_STAG |
-	     IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
+	     IB_DEVICE_MEM_WINDOW |
+	     IB_DEVICE_SEND_W_INV);
 
 	/* Allocate the qptr_array */
 	c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
@@ -506,17 +507,17 @@
 	mmio_regs = c2dev->kva;
 	/* Initialize the Verbs Request Queue */
 	c2_mq_req_init(&c2dev->req_vq, 0,
-		       be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)),
-		       be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
+		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
+		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
 		       mmio_regs +
-		       be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
+		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
 		       mmio_regs +
-		       be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)),
+		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
 		       C2_MQ_ADAPTER_TARGET);
 
 	/* Initialize the Verbs Reply Queue */
-	qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
-	msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
+	qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
+	msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
 	q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
 				      &c2dev->rep_vq.host_dma, GFP_KERNEL);
 	if (!q1_pages) {
@@ -524,7 +525,7 @@
 		goto bail1;
 	}
 	pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
-	pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
+	pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
 		 (unsigned long long) c2dev->rep_vq.host_dma);
 	c2_mq_rep_init(&c2dev->rep_vq,
 		   1,
@@ -532,12 +533,12 @@
 		   msgsize,
 		   q1_pages,
 		   mmio_regs +
-		   be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)),
+		   be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
 		   C2_MQ_HOST_TARGET);
 
 	/* Initialize the Asynchronus Event Queue */
-	qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
-	msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
+	qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
+	msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
 	q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
 				      &c2dev->aeq.host_dma, GFP_KERNEL);
 	if (!q2_pages) {
@@ -545,7 +546,7 @@
 		goto bail2;
 	}
 	pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
-	pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q2_pages,
+	pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
 		 (unsigned long long) c2dev->aeq.host_dma);
 	c2_mq_rep_init(&c2dev->aeq,
 		       2,
@@ -553,7 +554,7 @@
 		       msgsize,
 		       q2_pages,
 		       mmio_regs +
-		       be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)),
+		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
 		       C2_MQ_HOST_TARGET);
 
 	/* Initialize the verbs request allocator */
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index cfdacb1..9ce7819b 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -197,7 +197,7 @@
 	 */
 	while (msg == NULL) {
 		pr_debug("%s:%d no available msg in VQ, waiting...\n",
-		       __FUNCTION__, __LINE__);
+		       __func__, __LINE__);
 		init_waitqueue_entry(&__wait, current);
 		add_wait_queue(&c2dev->req_vq_wo, &__wait);
 		spin_unlock(&c2dev->vqlock);
diff --git a/drivers/infiniband/hw/amso1100/c2_wr.h b/drivers/infiniband/hw/amso1100/c2_wr.h
index 3ec6c43..c65fbdd 100644
--- a/drivers/infiniband/hw/amso1100/c2_wr.h
+++ b/drivers/infiniband/hw/amso1100/c2_wr.h
@@ -180,8 +180,8 @@
 };
 
 struct c2_netaddr {
-	u32 ip_addr;
-	u32 netmask;
+	__be32 ip_addr;
+	__be32 netmask;
 	u32 mtu;
 };
 
@@ -199,9 +199,9 @@
  * A Scatter Gather Entry.
  */
 struct c2_data_addr {
-	u32 stag;
-	u32 length;
-	u64 to;
+	__be32 stag;
+	__be32 length;
+	__be64 to;
 };
 
 /*
@@ -274,7 +274,7 @@
 	 * from the host to adapter by libccil, but we copy it anyway
 	 * to make the memcpy to the adapter better aligned.
 	 */
-	u32 wqe_count;
+	__be32 wqe_count;
 
 	/* Put these fields next so that later 32- and 64-bit
 	 * quantities are naturally aligned.
@@ -316,8 +316,8 @@
 struct c2wr_rnic_open_req {
 	struct c2wr_hdr hdr;
 	u64 user_context;
-	u16 flags;		/* See enum c2_rnic_flags */
-	u16 port_num;
+	__be16 flags;		/* See enum c2_rnic_flags */
+	__be16 port_num;
 } __attribute__((packed));
 
 struct c2wr_rnic_open_rep {
@@ -341,30 +341,30 @@
 struct c2wr_rnic_query_rep {
 	struct c2wr_hdr hdr;
 	u64 user_context;
-	u32 vendor_id;
-	u32 part_number;
-	u32 hw_version;
-	u32 fw_ver_major;
-	u32 fw_ver_minor;
-	u32 fw_ver_patch;
+	__be32 vendor_id;
+	__be32 part_number;
+	__be32 hw_version;
+	__be32 fw_ver_major;
+	__be32 fw_ver_minor;
+	__be32 fw_ver_patch;
 	char fw_ver_build_str[WR_BUILD_STR_LEN];
-	u32 max_qps;
-	u32 max_qp_depth;
+	__be32 max_qps;
+	__be32 max_qp_depth;
 	u32 max_srq_depth;
 	u32 max_send_sgl_depth;
 	u32 max_rdma_sgl_depth;
-	u32 max_cqs;
-	u32 max_cq_depth;
+	__be32 max_cqs;
+	__be32 max_cq_depth;
 	u32 max_cq_event_handlers;
-	u32 max_mrs;
+	__be32 max_mrs;
 	u32 max_pbl_depth;
-	u32 max_pds;
-	u32 max_global_ird;
+	__be32 max_pds;
+	__be32 max_global_ird;
 	u32 max_global_ord;
-	u32 max_qp_ird;
-	u32 max_qp_ord;
+	__be32 max_qp_ird;
+	__be32 max_qp_ord;
 	u32 flags;
-	u32 max_mws;
+	__be32 max_mws;
 	u32 pbe_range_low;
 	u32 pbe_range_high;
 	u32 max_srqs;
@@ -405,7 +405,7 @@
 struct c2wr_rnic_setconfig_req {
 	struct c2wr_hdr hdr;
 	u32 rnic_handle;
-	u32 option;		/* See c2_setconfig_cmd_t */
+	__be32 option;		/* See c2_setconfig_cmd_t */
 	/* variable data and pad. See c2_netaddr and c2_route */
 	u8 data[0];
 } __attribute__((packed)) ;
@@ -441,18 +441,18 @@
  */
 struct c2wr_cq_create_req {
 	struct c2wr_hdr hdr;
-	u64 shared_ht;
+	__be64 shared_ht;
 	u64 user_context;
-	u64 msg_pool;
+	__be64 msg_pool;
 	u32 rnic_handle;
-	u32 msg_size;
-	u32 depth;
+	__be32 msg_size;
+	__be32 depth;
 } __attribute__((packed)) ;
 
 struct c2wr_cq_create_rep {
 	struct c2wr_hdr hdr;
-	u32 mq_index;
-	u32 adapter_shared;
+	__be32 mq_index;
+	__be32 adapter_shared;
 	u32 cq_handle;
 } __attribute__((packed)) ;
 
@@ -585,40 +585,40 @@
 
 struct c2wr_qp_create_req {
 	struct c2wr_hdr hdr;
-	u64 shared_sq_ht;
-	u64 shared_rq_ht;
+	__be64 shared_sq_ht;
+	__be64 shared_rq_ht;
 	u64 user_context;
 	u32 rnic_handle;
 	u32 sq_cq_handle;
 	u32 rq_cq_handle;
-	u32 sq_depth;
-	u32 rq_depth;
+	__be32 sq_depth;
+	__be32 rq_depth;
 	u32 srq_handle;
 	u32 srq_limit;
-	u32 flags;		/* see enum c2wr_qp_flags */
-	u32 send_sgl_depth;
-	u32 recv_sgl_depth;
-	u32 rdma_write_sgl_depth;
-	u32 ord;
-	u32 ird;
+	__be32 flags;		/* see enum c2wr_qp_flags */
+	__be32 send_sgl_depth;
+	__be32 recv_sgl_depth;
+	__be32 rdma_write_sgl_depth;
+	__be32 ord;
+	__be32 ird;
 	u32 pd_id;
 } __attribute__((packed)) ;
 
 struct c2wr_qp_create_rep {
 	struct c2wr_hdr hdr;
-	u32 sq_depth;
-	u32 rq_depth;
+	__be32 sq_depth;
+	__be32 rq_depth;
 	u32 send_sgl_depth;
 	u32 recv_sgl_depth;
 	u32 rdma_write_sgl_depth;
 	u32 ord;
 	u32 ird;
-	u32 sq_msg_size;
-	u32 sq_mq_index;
-	u32 sq_mq_start;
-	u32 rq_msg_size;
-	u32 rq_mq_index;
-	u32 rq_mq_start;
+	__be32 sq_msg_size;
+	__be32 sq_mq_index;
+	__be32 sq_mq_start;
+	__be32 rq_msg_size;
+	__be32 rq_mq_index;
+	__be32 rq_mq_start;
 	u32 qp_handle;
 } __attribute__((packed)) ;
 
@@ -667,11 +667,11 @@
 	u32 stream_msg_length;
 	u32 rnic_handle;
 	u32 qp_handle;
-	u32 next_qp_state;
-	u32 ord;
-	u32 ird;
-	u32 sq_depth;
-	u32 rq_depth;
+	__be32 next_qp_state;
+	__be32 ord;
+	__be32 ird;
+	__be32 sq_depth;
+	__be32 rq_depth;
 	u32 llp_ep_handle;
 } __attribute__((packed)) ;
 
@@ -721,10 +721,10 @@
 	struct c2wr_hdr hdr;
 	u32 rnic_handle;
 	u32 qp_handle;
-	u32 remote_addr;
-	u16 remote_port;
+	__be32 remote_addr;
+	__be16 remote_port;
 	u16 pad;
-	u32 private_data_length;
+	__be32 private_data_length;
 	u8 private_data[0];	/* Private data in-line. */
 } __attribute__((packed)) ;
 
@@ -759,25 +759,25 @@
 
 struct c2wr_nsmr_register_req {
 	struct c2wr_hdr hdr;
-	u64 va;
+	__be64 va;
 	u32 rnic_handle;
-	u16 flags;
+	__be16 flags;
 	u8 stag_key;
 	u8 pad;
 	u32 pd_id;
-	u32 pbl_depth;
-	u32 pbe_size;
-	u32 fbo;
-	u32 length;
-	u32 addrs_length;
+	__be32 pbl_depth;
+	__be32 pbe_size;
+	__be32 fbo;
+	__be32 length;
+	__be32 addrs_length;
 	/* array of paddrs (must be aligned on a 64bit boundary) */
-	u64 paddrs[0];
+	__be64 paddrs[0];
 } __attribute__((packed)) ;
 
 struct c2wr_nsmr_register_rep {
 	struct c2wr_hdr hdr;
 	u32 pbl_depth;
-	u32 stag_index;
+	__be32 stag_index;
 } __attribute__((packed)) ;
 
 union c2wr_nsmr_register {
@@ -788,11 +788,11 @@
 struct c2wr_nsmr_pbl_req {
 	struct c2wr_hdr hdr;
 	u32 rnic_handle;
-	u32 flags;
-	u32 stag_index;
-	u32 addrs_length;
+	__be32 flags;
+	__be32 stag_index;
+	__be32 addrs_length;
 	/* array of paddrs (must be aligned on a 64bit boundary) */
-	u64 paddrs[0];
+	__be64 paddrs[0];
 } __attribute__((packed)) ;
 
 struct c2wr_nsmr_pbl_rep {
@@ -847,7 +847,7 @@
 struct c2wr_stag_dealloc_req {
 	struct c2wr_hdr hdr;
 	u32 rnic_handle;
-	u32 stag_index;
+	__be32 stag_index;
 } __attribute__((packed)) ;
 
 struct c2wr_stag_dealloc_rep {
@@ -949,7 +949,7 @@
 	u64 qp_user_context;	/* c2_user_qp_t * */
 	u32 qp_state;		/* Current QP State */
 	u32 handle;		/* QPID or EP Handle */
-	u32 bytes_rcvd;		/* valid for RECV WCs */
+	__be32 bytes_rcvd;		/* valid for RECV WCs */
 	u32 stag;
 } __attribute__((packed)) ;
 
@@ -984,8 +984,8 @@
  */
 struct c2wr_send_req {
 	struct c2_sq_hdr sq_hdr;
-	u32 sge_len;
-	u32 remote_stag;
+	__be32 sge_len;
+	__be32 remote_stag;
 	u8 data[0];		/* SGE array */
 } __attribute__((packed));
 
@@ -996,9 +996,9 @@
 
 struct c2wr_rdma_write_req {
 	struct c2_sq_hdr sq_hdr;
-	u64 remote_to;
-	u32 remote_stag;
-	u32 sge_len;
+	__be64 remote_to;
+	__be32 remote_stag;
+	__be32 sge_len;
 	u8 data[0];		/* SGE array */
 } __attribute__((packed));
 
@@ -1009,11 +1009,11 @@
 
 struct c2wr_rdma_read_req {
 	struct c2_sq_hdr sq_hdr;
-	u64 local_to;
-	u64 remote_to;
-	u32 local_stag;
-	u32 remote_stag;
-	u32 length;
+	__be64 local_to;
+	__be64 remote_to;
+	__be32 local_stag;
+	__be32 remote_stag;
+	__be32 length;
 } __attribute__((packed));
 
 union c2wr_rdma_read {
@@ -1113,9 +1113,9 @@
 struct c2wr_ae_hdr {
 	struct c2wr_hdr hdr;
 	u64 user_context;	/* user context for this res. */
-	u32 resource_type;	/* see enum c2_resource_indicator */
-	u32 resource;		/* handle for resource */
-	u32 qp_state;		/* current QP State */
+	__be32 resource_type;	/* see enum c2_resource_indicator */
+	__be32 resource;	/* handle for resource */
+	__be32 qp_state;	/* current QP State */
 } __attribute__((packed));
 
 /*
@@ -1124,11 +1124,11 @@
  */
 struct c2wr_ae_active_connect_results {
 	struct c2wr_ae_hdr ae_hdr;
-	u32 laddr;
-	u32 raddr;
-	u16 lport;
-	u16 rport;
-	u32 private_data_length;
+	__be32 laddr;
+	__be32 raddr;
+	__be16 lport;
+	__be16 rport;
+	__be32 private_data_length;
 	u8 private_data[0];	/* data is in-line in the msg. */
 } __attribute__((packed));
 
@@ -1142,11 +1142,11 @@
 struct c2wr_ae_connection_request {
 	struct c2wr_ae_hdr ae_hdr;
 	u32 cr_handle;		/* connreq handle (sock ptr) */
-	u32 laddr;
-	u32 raddr;
-	u16 lport;
-	u16 rport;
-	u32 private_data_length;
+	__be32 laddr;
+	__be32 raddr;
+	__be16 lport;
+	__be16 rport;
+	__be32 private_data_length;
 	u8 private_data[0];	/* data is in-line in the msg. */
 } __attribute__((packed));
 
@@ -1158,12 +1158,12 @@
 
 struct c2wr_init_req {
 	struct c2wr_hdr hdr;
-	u64 hint_count;
-	u64 q0_host_shared;
-	u64 q1_host_shared;
-	u64 q1_host_msg_pool;
-	u64 q2_host_shared;
-	u64 q2_host_msg_pool;
+	__be64 hint_count;
+	__be64 q0_host_shared;
+	__be64 q1_host_shared;
+	__be64 q1_host_msg_pool;
+	__be64 q2_host_shared;
+	__be64 q2_host_msg_pool;
 } __attribute__((packed));
 
 struct c2wr_init_rep {
@@ -1276,10 +1276,10 @@
 	struct c2wr_hdr hdr;
 	u64 user_context;	/* returned in AEs. */
 	u32 rnic_handle;
-	u32 local_addr;		/* local addr, or 0  */
-	u16 local_port;		/* 0 means "pick one" */
+	__be32 local_addr;		/* local addr, or 0  */
+	__be16 local_port;		/* 0 means "pick one" */
 	u16 pad;
-	u32 backlog;		/* tradional tcp listen bl */
+	__be32 backlog;		/* tradional tcp listen bl */
 } __attribute__((packed));
 
 struct c2wr_ep_listen_create_rep {
@@ -1340,7 +1340,7 @@
 	u32 rnic_handle;
 	u32 qp_handle;		/* QP to bind to this LLP conn */
 	u32 ep_handle;		/* LLP  handle to accept */
-	u32 private_data_length;
+	__be32 private_data_length;
 	u8 private_data[0];	/* data in-line in msg. */
 } __attribute__((packed));
 
@@ -1508,7 +1508,7 @@
 {
 	((struct c2wr_hdr *) wr)->sge_count = sge_count;
 }
-static __inline__ u32 c2_wr_get_wqe_count(void *wr)
+static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
 {
 	return ((struct c2wr_hdr *) wr)->wqe_count;
 }
diff --git a/drivers/infiniband/hw/cxgb3/cxio_dbg.c b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
index 75f7b16..a8d24d5 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_dbg.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
@@ -45,16 +45,16 @@
 
 	m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
 	if (!m) {
-		PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
+		PDBG("%s couldn't allocate memory.\n", __func__);
 		return;
 	}
 	m->mem_id = MEM_PMRX;
 	m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
 	m->len = size;
-	PDBG("%s TPT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len);
+	PDBG("%s TPT addr 0x%x len %d\n", __func__, m->addr, m->len);
 	rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
 	if (rc) {
-		PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
+		PDBG("%s toectl returned error %d\n", __func__, rc);
 		kfree(m);
 		return;
 	}
@@ -82,17 +82,17 @@
 
 	m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
 	if (!m) {
-		PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
+		PDBG("%s couldn't allocate memory.\n", __func__);
 		return;
 	}
 	m->mem_id = MEM_PMRX;
 	m->addr = pbl_addr;
 	m->len = size;
 	PDBG("%s PBL addr 0x%x len %d depth %d\n",
-		__FUNCTION__, m->addr, m->len, npages);
+		__func__, m->addr, m->len, npages);
 	rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
 	if (rc) {
-		PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
+		PDBG("%s toectl returned error %d\n", __func__, rc);
 		kfree(m);
 		return;
 	}
@@ -144,16 +144,16 @@
 
 	m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
 	if (!m) {
-		PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
+		PDBG("%s couldn't allocate memory.\n", __func__);
 		return;
 	}
 	m->mem_id = MEM_PMRX;
 	m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
 	m->len = size;
-	PDBG("%s RQT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len);
+	PDBG("%s RQT addr 0x%x len %d\n", __func__, m->addr, m->len);
 	rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
 	if (rc) {
-		PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
+		PDBG("%s toectl returned error %d\n", __func__, rc);
 		kfree(m);
 		return;
 	}
@@ -177,16 +177,16 @@
 
 	m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
 	if (!m) {
-		PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
+		PDBG("%s couldn't allocate memory.\n", __func__);
 		return;
 	}
 	m->mem_id = MEM_CM;
 	m->addr = hwtid * size;
 	m->len = size;
-	PDBG("%s TCB %d len %d\n", __FUNCTION__, m->addr, m->len);
+	PDBG("%s TCB %d len %d\n", __func__, m->addr, m->len);
 	rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
 	if (rc) {
-		PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
+		PDBG("%s toectl returned error %d\n", __func__, rc);
 		kfree(m);
 		return;
 	}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 03c5ff6..66eb703 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -140,7 +140,7 @@
 	struct t3_modify_qp_wr *wqe;
 	struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
 	if (!skb) {
-		PDBG("%s alloc_skb failed\n", __FUNCTION__);
+		PDBG("%s alloc_skb failed\n", __func__);
 		return -ENOMEM;
 	}
 	wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
@@ -225,7 +225,7 @@
 	}
 out:
 	mutex_unlock(&uctx->lock);
-	PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
+	PDBG("%s qpid 0x%x\n", __func__, qpid);
 	return qpid;
 }
 
@@ -237,7 +237,7 @@
 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
 	if (!entry)
 		return;
-	PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
+	PDBG("%s qpid 0x%x\n", __func__, qpid);
 	entry->qpid = qpid;
 	mutex_lock(&uctx->lock);
 	list_add_tail(&entry->entry, &uctx->qpids);
@@ -300,7 +300,7 @@
 	if (!kernel_domain)
 		wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
 					(wq->qpid << rdev_p->qpshift);
-	PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__,
+	PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
 	     wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
 	return 0;
 err4:
@@ -345,7 +345,7 @@
 {
 	struct t3_cqe cqe;
 
-	PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
+	PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
 	     wq, cq, cq->sw_rptr, cq->sw_wptr);
 	memset(&cqe, 0, sizeof(cqe));
 	cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -363,10 +363,10 @@
 {
 	u32 ptr;
 
-	PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq);
+	PDBG("%s wq %p cq %p\n", __func__, wq, cq);
 
 	/* flush RQ */
-	PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__,
+	PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
 	    wq->rq_rptr, wq->rq_wptr, count);
 	ptr = wq->rq_rptr + count;
 	while (ptr++ != wq->rq_wptr)
@@ -378,7 +378,7 @@
 {
 	struct t3_cqe cqe;
 
-	PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
+	PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
 	     wq, cq, cq->sw_rptr, cq->sw_wptr);
 	memset(&cqe, 0, sizeof(cqe));
 	cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -415,11 +415,11 @@
 {
 	struct t3_cqe *cqe, *swcqe;
 
-	PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid);
+	PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
 	cqe = cxio_next_hw_cqe(cq);
 	while (cqe) {
 		PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
-		     __FUNCTION__, cq->rptr, cq->sw_wptr);
+		     __func__, cq->rptr, cq->sw_wptr);
 		swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
 		*swcqe = *cqe;
 		swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
@@ -461,7 +461,7 @@
 			(*count)++;
 		ptr++;
 	}
-	PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
+	PDBG("%s cq %p count %d\n", __func__, cq, *count);
 }
 
 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
@@ -470,7 +470,7 @@
 	u32 ptr;
 
 	*count = 0;
-	PDBG("%s count zero %d\n", __FUNCTION__, *count);
+	PDBG("%s count zero %d\n", __func__, *count);
 	ptr = cq->sw_rptr;
 	while (!Q_EMPTY(ptr, cq->sw_wptr)) {
 		cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
@@ -479,7 +479,7 @@
 			(*count)++;
 		ptr++;
 	}
-	PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
+	PDBG("%s cq %p count %d\n", __func__, cq, *count);
 }
 
 static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
@@ -506,12 +506,12 @@
 
 	skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
 	if (!skb) {
-		PDBG("%s alloc_skb failed\n", __FUNCTION__);
+		PDBG("%s alloc_skb failed\n", __func__);
 		return -ENOMEM;
 	}
 	err = cxio_hal_init_ctrl_cq(rdev_p);
 	if (err) {
-		PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err);
+		PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
 		goto err;
 	}
 	rdev_p->ctrl_qp.workq = dma_alloc_coherent(
@@ -521,7 +521,7 @@
 					&(rdev_p->ctrl_qp.dma_addr),
 					GFP_KERNEL);
 	if (!rdev_p->ctrl_qp.workq) {
-		PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__);
+		PDBG("%s dma_alloc_coherent failed\n", __func__);
 		err = -ENOMEM;
 		goto err;
 	}
@@ -591,25 +591,25 @@
 	addr &= 0x7FFFFFF;
 	nr_wqe = len % 96 ? len / 96 + 1 : len / 96;	/* 96B max per WQE */
 	PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
-	     __FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
+	     __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
 	     nr_wqe, data, addr);
 	utx_len = 3;		/* in 32B unit */
 	for (i = 0; i < nr_wqe; i++) {
 		if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
 		           T3_CTRL_QP_SIZE_LOG2)) {
 			PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
-			     "wait for more space i %d\n", __FUNCTION__,
+			     "wait for more space i %d\n", __func__,
 			     rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
 			if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
 					     !Q_FULL(rdev_p->ctrl_qp.rptr,
 						     rdev_p->ctrl_qp.wptr,
 						     T3_CTRL_QP_SIZE_LOG2))) {
 				PDBG("%s ctrl_qp workq interrupted\n",
-				     __FUNCTION__);
+				     __func__);
 				return -ERESTARTSYS;
 			}
 			PDBG("%s ctrl_qp wakeup, continue posting work request "
-			     "i %d\n", __FUNCTION__, i);
+			     "i %d\n", __func__, i);
 		}
 		wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
 						(1 << T3_CTRL_QP_SIZE_LOG2)));
@@ -630,7 +630,7 @@
 		if ((i != 0) &&
 		    (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
 			flag = T3_COMPLETION_FLAG;
-			PDBG("%s force completion at i %d\n", __FUNCTION__, i);
+			PDBG("%s force completion at i %d\n", __func__, i);
 		}
 
 		/* build the utx mem command */
@@ -701,7 +701,7 @@
 		*stag = (stag_idx << 8) | ((*stag) & 0xFF);
 	}
 	PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
-	     __FUNCTION__, stag_state, type, pdid, stag_idx);
+	     __func__, stag_state, type, pdid, stag_idx);
 
 	if (reset_tpt_entry)
 		cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
@@ -718,7 +718,7 @@
 	if (pbl) {
 
 		PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
-		     __FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base,
+		     __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
 		     *pbl_size);
 		err = cxio_hal_ctrl_qp_write_mem(rdev_p,
 				(*pbl_addr >> 5),
@@ -814,7 +814,7 @@
 	struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
 	if (!skb)
 		return -ENOMEM;
-	PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p);
+	PDBG("%s rdev_p %p\n", __func__, rdev_p);
 	wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
 	wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
 	wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
@@ -856,7 +856,7 @@
 	struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
 	PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
 	     " se %0x notify %0x cqbranch %0x creditth %0x\n",
-	     cnt, __FUNCTION__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
+	     cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
 	     RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
 	     RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
 	     RSPQ_CREDIT_THRESH(rsp_msg));
@@ -868,7 +868,7 @@
 	     CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
 	rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
 	if (!rdev_p) {
-		PDBG("%s called by t3cdev %p with null ulp\n", __FUNCTION__,
+		PDBG("%s called by t3cdev %p with null ulp\n", __func__,
 		     t3cdev_p);
 		return 0;
 	}
@@ -908,13 +908,13 @@
 		strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
 			T3_MAX_DEV_NAME_LEN);
 	} else {
-		PDBG("%s t3cdev_p or dev_name must be set\n", __FUNCTION__);
+		PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
 		return -EINVAL;
 	}
 
 	list_add_tail(&rdev_p->entry, &rdev_list);
 
-	PDBG("%s opening rnic dev %s\n", __FUNCTION__, rdev_p->dev_name);
+	PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
 	memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
 	if (!rdev_p->t3cdev_p)
 		rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
@@ -923,14 +923,14 @@
 					 &(rdev_p->rnic_info));
 	if (err) {
 		printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
-		     __FUNCTION__, rdev_p->t3cdev_p, err);
+		     __func__, rdev_p->t3cdev_p, err);
 		goto err1;
 	}
 	err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
 				    &(rdev_p->port_info));
 	if (err) {
 		printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
-		     __FUNCTION__, rdev_p->t3cdev_p, err);
+		     __func__, rdev_p->t3cdev_p, err);
 		goto err1;
 	}
 
@@ -947,7 +947,7 @@
 	rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
 	PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
 	     "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
-	     __FUNCTION__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
+	     __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
 	     rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
 	     rdev_p->rnic_info.pbl_base,
 	     rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
@@ -961,7 +961,7 @@
 	err = cxio_hal_init_ctrl_qp(rdev_p);
 	if (err) {
 		printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
-		       __FUNCTION__, err);
+		       __func__, err);
 		goto err1;
 	}
 	err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
@@ -969,19 +969,19 @@
 				     T3_MAX_NUM_PD);
 	if (err) {
 		printk(KERN_ERR "%s error %d initializing hal resources.\n",
-		       __FUNCTION__, err);
+		       __func__, err);
 		goto err2;
 	}
 	err = cxio_hal_pblpool_create(rdev_p);
 	if (err) {
 		printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
-		       __FUNCTION__, err);
+		       __func__, err);
 		goto err3;
 	}
 	err = cxio_hal_rqtpool_create(rdev_p);
 	if (err) {
 		printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
-		       __FUNCTION__, err);
+		       __func__, err);
 		goto err4;
 	}
 	return 0;
@@ -1043,7 +1043,7 @@
 			 * Insert this completed cqe into the swcq.
 			 */
 			PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
-			     __FUNCTION__, Q_PTR2IDX(ptr,  wq->sq_size_log2),
+			     __func__, Q_PTR2IDX(ptr,  wq->sq_size_log2),
 			     Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
 			sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
 			*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
@@ -1112,7 +1112,7 @@
 
 	PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
 	     " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
-	     __FUNCTION__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
+	     __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
 	     CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
 	     CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
 	     CQE_WRID_LOW(*hw_cqe));
@@ -1215,7 +1215,7 @@
 		struct t3_swsq *sqp;
 
 		PDBG("%s out of order completion going in swsq at idx %ld\n",
-		     __FUNCTION__,
+		     __func__,
 		     Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
 		sqp = wq->sq +
 		      Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
@@ -1234,13 +1234,13 @@
 	 */
 	if (SQ_TYPE(*hw_cqe)) {
 		wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
-		PDBG("%s completing sq idx %ld\n", __FUNCTION__,
+		PDBG("%s completing sq idx %ld\n", __func__,
 		     Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
 		*cookie = (wq->sq +
 			   Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
 		wq->sq_rptr++;
 	} else {
-		PDBG("%s completing rq idx %ld\n", __FUNCTION__,
+		PDBG("%s completing rq idx %ld\n", __func__,
 		     Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
 		*cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
 		wq->rq_rptr++;
@@ -1255,11 +1255,11 @@
 skip_cqe:
 	if (SW_CQE(*hw_cqe)) {
 		PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
-		     __FUNCTION__, cq, cq->cqid, cq->sw_rptr);
+		     __func__, cq, cq->cqid, cq->sw_rptr);
 		++cq->sw_rptr;
 	} else {
 		PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
-		     __FUNCTION__, cq, cq->cqid, cq->rptr);
+		     __func__, cq, cq->cqid, cq->rptr);
 		++cq->rptr;
 
 		/*
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index d3095ae..45ed4f2 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -206,13 +206,13 @@
 u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
 {
 	u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo);
-	PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
+	PDBG("%s qpid 0x%x\n", __func__, qpid);
 	return qpid;
 }
 
 void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
 {
-	PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
+	PDBG("%s qpid 0x%x\n", __func__, qpid);
 	cxio_hal_put_resource(rscp->qpid_fifo, qpid);
 }
 
@@ -255,13 +255,13 @@
 u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
 {
 	unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
-	PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size);
+	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
 	return (u32)addr;
 }
 
 void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
 {
-	PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size);
+	PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
 	gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
 }
 
@@ -292,13 +292,13 @@
 u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
 {
 	unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
-	PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size << 6);
+	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
 	return (u32)addr;
 }
 
 void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
 {
-	PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size << 6);
+	PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
 	gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
 }
 
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 0315c9d..6ba4138 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,7 +65,7 @@
 
 static void rnic_init(struct iwch_dev *rnicp)
 {
-	PDBG("%s iwch_dev %p\n", __FUNCTION__,  rnicp);
+	PDBG("%s iwch_dev %p\n", __func__,  rnicp);
 	idr_init(&rnicp->cqidr);
 	idr_init(&rnicp->qpidr);
 	idr_init(&rnicp->mmidr);
@@ -106,7 +106,7 @@
 	struct iwch_dev *rnicp;
 	static int vers_printed;
 
-	PDBG("%s t3cdev %p\n", __FUNCTION__,  tdev);
+	PDBG("%s t3cdev %p\n", __func__,  tdev);
 	if (!vers_printed++)
 		printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
 		       DRV_VERSION);
@@ -144,7 +144,7 @@
 static void close_rnic_dev(struct t3cdev *tdev)
 {
 	struct iwch_dev *dev, *tmp;
-	PDBG("%s t3cdev %p\n", __FUNCTION__,  tdev);
+	PDBG("%s t3cdev %p\n", __func__,  tdev);
 	mutex_lock(&dev_mutex);
 	list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
 		if (dev->rdev.t3cdev_p == tdev) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index caf4e60..9ad9b1e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -147,7 +147,7 @@
 				void *handle, u32 id)
 {
 	int ret;
-	u32 newid;
+	int newid;
 
 	do {
 		if (!idr_pre_get(idr, GFP_KERNEL)) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 99f2f2a..72ca360 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -110,9 +110,9 @@
 
 static void start_ep_timer(struct iwch_ep *ep)
 {
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	if (timer_pending(&ep->timer)) {
-		PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep);
+		PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
 		del_timer_sync(&ep->timer);
 	} else
 		get_ep(&ep->com);
@@ -124,7 +124,7 @@
 
 static void stop_ep_timer(struct iwch_ep *ep)
 {
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	del_timer_sync(&ep->timer);
 	put_ep(&ep->com);
 }
@@ -190,7 +190,7 @@
 
 static void set_emss(struct iwch_ep *ep, u16 opt)
 {
-	PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt);
+	PDBG("%s ep %p opt %u\n", __func__, ep, opt);
 	ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
 	if (G_TCPOPT_TSTAMP(opt))
 		ep->emss -= 12;
@@ -220,7 +220,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&epc->lock, flags);
-	PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]);
+	PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
 	__state_set(epc, new);
 	spin_unlock_irqrestore(&epc->lock, flags);
 	return;
@@ -236,7 +236,7 @@
 		spin_lock_init(&epc->lock);
 		init_waitqueue_head(&epc->waitq);
 	}
-	PDBG("%s alloc ep %p\n", __FUNCTION__, epc);
+	PDBG("%s alloc ep %p\n", __func__, epc);
 	return epc;
 }
 
@@ -244,13 +244,13 @@
 {
 	struct iwch_ep_common *epc;
 	epc = container_of(kref, struct iwch_ep_common, kref);
-	PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]);
+	PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]);
 	kfree(epc);
 }
 
 static void release_ep_resources(struct iwch_ep *ep)
 {
-	PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
+	PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
 	cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
 	dst_release(ep->dst);
 	l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -349,7 +349,7 @@
 
 static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
 {
-	PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
+	PDBG("%s t3cdev %p\n", __func__, dev);
 	kfree_skb(skb);
 }
 
@@ -370,7 +370,7 @@
 {
 	struct cpl_abort_req *req = cplhdr(skb);
 
-	PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
+	PDBG("%s t3cdev %p\n", __func__, dev);
 	req->cmd = CPL_ABORT_NO_RST;
 	cxgb3_ofld_send(dev, skb);
 }
@@ -380,10 +380,10 @@
 	struct cpl_close_con_req *req;
 	struct sk_buff *skb;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	skb = get_skb(NULL, sizeof(*req), gfp);
 	if (!skb) {
-		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
 		return -ENOMEM;
 	}
 	skb->priority = CPL_PRIORITY_DATA;
@@ -400,11 +400,11 @@
 {
 	struct cpl_abort_req *req;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	skb = get_skb(skb, sizeof(*req), gfp);
 	if (!skb) {
 		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
-		       __FUNCTION__);
+		       __func__);
 		return -ENOMEM;
 	}
 	skb->priority = CPL_PRIORITY_DATA;
@@ -426,12 +426,12 @@
 	unsigned int mtu_idx;
 	int wscale;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 
 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
 	if (!skb) {
 		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
-		       __FUNCTION__);
+		       __func__);
 		return -ENOMEM;
 	}
 	mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
@@ -470,7 +470,7 @@
 	struct mpa_message *mpa;
 	int len;
 
-	PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen);
+	PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
 
 	BUG_ON(skb_cloned(skb));
 
@@ -530,13 +530,13 @@
 	struct mpa_message *mpa;
 	struct sk_buff *skb;
 
-	PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
+	PDBG("%s ep %p plen %d\n", __func__, ep, plen);
 
 	mpalen = sizeof(*mpa) + plen;
 
 	skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
 	if (!skb) {
-		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
 		return -ENOMEM;
 	}
 	skb_reserve(skb, sizeof(*req));
@@ -580,13 +580,13 @@
 	int len;
 	struct sk_buff *skb;
 
-	PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
+	PDBG("%s ep %p plen %d\n", __func__, ep, plen);
 
 	mpalen = sizeof(*mpa) + plen;
 
 	skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
 	if (!skb) {
-		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
 		return -ENOMEM;
 	}
 	skb->priority = CPL_PRIORITY_DATA;
@@ -630,7 +630,7 @@
 	struct cpl_act_establish *req = cplhdr(skb);
 	unsigned int tid = GET_TID(req);
 
-	PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid);
+	PDBG("%s ep %p tid %d\n", __func__, ep, tid);
 
 	dst_confirm(ep->dst);
 
@@ -663,7 +663,7 @@
 {
 	struct iw_cm_event event;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_CLOSE;
 	if (ep->com.cm_id) {
@@ -680,7 +680,7 @@
 {
 	struct iw_cm_event event;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_DISCONNECT;
 	if (ep->com.cm_id) {
@@ -694,7 +694,7 @@
 {
 	struct iw_cm_event event;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_CLOSE;
 	event.status = -ECONNRESET;
@@ -712,7 +712,7 @@
 {
 	struct iw_cm_event event;
 
-	PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status);
+	PDBG("%s ep %p status %d\n", __func__, ep, status);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_CONNECT_REPLY;
 	event.status = status;
@@ -724,7 +724,7 @@
 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
 	}
 	if (ep->com.cm_id) {
-		PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep,
+		PDBG("%s ep %p tid %d status %d\n", __func__, ep,
 		     ep->hwtid, status);
 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 	}
@@ -739,7 +739,7 @@
 {
 	struct iw_cm_event event;
 
-	PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
+	PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
 	event.local_addr = ep->com.local_addr;
@@ -759,11 +759,11 @@
 {
 	struct iw_cm_event event;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_ESTABLISHED;
 	if (ep->com.cm_id) {
-		PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
+		PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 	}
 }
@@ -773,7 +773,7 @@
 	struct cpl_rx_data_ack *req;
 	struct sk_buff *skb;
 
-	PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
+	PDBG("%s ep %p credits %u\n", __func__, ep, credits);
 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
 	if (!skb) {
 		printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
@@ -797,7 +797,7 @@
 	enum iwch_qp_attr_mask mask;
 	int err;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 
 	/*
 	 * Stop mpa timer.  If it expired, then the state has
@@ -884,7 +884,7 @@
 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
 	ep->mpa_attr.version = mpa_rev;
 	PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
-	     "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
+	     "xmit_marker_enabled=%d, version=%d\n", __func__,
 	     ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
 	     ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
 
@@ -915,7 +915,7 @@
 	struct mpa_message *mpa;
 	u16 plen;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 
 	/*
 	 * Stop mpa timer.  If it expired, then the state has
@@ -935,7 +935,7 @@
 		return;
 	}
 
-	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
 
 	/*
 	 * Copy the new data into our accumulation buffer.
@@ -950,7 +950,7 @@
 	 */
 	if (ep->mpa_pkt_len < sizeof(*mpa))
 		return;
-	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
 	mpa = (struct mpa_message *) ep->mpa_pkt;
 
 	/*
@@ -1000,7 +1000,7 @@
 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
 	ep->mpa_attr.version = mpa_rev;
 	PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
-	     "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
+	     "xmit_marker_enabled=%d, version=%d\n", __func__,
 	     ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
 	     ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
 
@@ -1017,7 +1017,7 @@
 	struct cpl_rx_data *hdr = cplhdr(skb);
 	unsigned int dlen = ntohs(hdr->len);
 
-	PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen);
+	PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
 
 	skb_pull(skb, sizeof(*hdr));
 	skb_trim(skb, dlen);
@@ -1037,7 +1037,7 @@
 	default:
 		printk(KERN_ERR MOD "%s Unexpected streaming data."
 		       " ep %p state %d tid %d\n",
-		       __FUNCTION__, ep, state_read(&ep->com), ep->hwtid);
+		       __func__, ep, state_read(&ep->com), ep->hwtid);
 
 		/*
 		 * The ep will timeout and inform the ULP of the failure.
@@ -1063,7 +1063,7 @@
 	struct cpl_wr_ack *hdr = cplhdr(skb);
 	unsigned int credits = ntohs(hdr->credits);
 
-	PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
+	PDBG("%s ep %p credits %u\n", __func__, ep, credits);
 
 	if (credits == 0)
 		return CPL_RET_BUF_DONE;
@@ -1084,7 +1084,7 @@
 {
 	struct iwch_ep *ep = ctx;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 
 	/*
 	 * We get 2 abort replies from the HW.  The first one must
@@ -1115,7 +1115,7 @@
 	struct iwch_ep *ep = ctx;
 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
 
-	PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status,
+	PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
 	     status2errno(rpl->status));
 	connect_reply_upcall(ep, status2errno(rpl->status));
 	state_set(&ep->com, DEAD);
@@ -1133,7 +1133,7 @@
 	struct sk_buff *skb;
 	struct cpl_pass_open_req *req;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
 	if (!skb) {
 		printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
@@ -1162,7 +1162,7 @@
 	struct iwch_listen_ep *ep = ctx;
 	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
 
-	PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep,
+	PDBG("%s ep %p status %d error %d\n", __func__, ep,
 	     rpl->status, status2errno(rpl->status));
 	ep->com.rpl_err = status2errno(rpl->status);
 	ep->com.rpl_done = 1;
@@ -1176,10 +1176,10 @@
 	struct sk_buff *skb;
 	struct cpl_close_listserv_req *req;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
 	if (!skb) {
-		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
 		return -ENOMEM;
 	}
 	req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
@@ -1197,7 +1197,7 @@
 	struct iwch_listen_ep *ep = ctx;
 	struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	ep->com.rpl_err = status2errno(rpl->status);
 	ep->com.rpl_done = 1;
 	wake_up(&ep->com.waitq);
@@ -1211,7 +1211,7 @@
 	u32 opt0h, opt0l, opt2;
 	int wscale;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	BUG_ON(skb_cloned(skb));
 	skb_trim(skb, sizeof(*rpl));
 	skb_get(skb);
@@ -1244,7 +1244,7 @@
 static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
 		      struct sk_buff *skb)
 {
-	PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid,
+	PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
 	     peer_ip);
 	BUG_ON(skb_cloned(skb));
 	skb_trim(skb, sizeof(struct cpl_tid_release));
@@ -1279,11 +1279,11 @@
 	struct rtable *rt;
 	struct iff_mac tim;
 
-	PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid);
+	PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
 
 	if (state_read(&parent_ep->com) != LISTEN) {
 		printk(KERN_ERR "%s - listening ep not in LISTEN\n",
-		       __FUNCTION__);
+		       __func__);
 		goto reject;
 	}
 
@@ -1295,7 +1295,7 @@
 	if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
 		printk(KERN_ERR
 			"%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
-			__FUNCTION__,
+			__func__,
 			req->dst_mac[0],
 			req->dst_mac[1],
 			req->dst_mac[2],
@@ -1313,21 +1313,21 @@
 			req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
 	if (!rt) {
 		printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
-		       __FUNCTION__);
+		       __func__);
 		goto reject;
 	}
 	dst = &rt->u.dst;
 	l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
 	if (!l2t) {
 		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
-		       __FUNCTION__);
+		       __func__);
 		dst_release(dst);
 		goto reject;
 	}
 	child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
 	if (!child_ep) {
 		printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
-		       __FUNCTION__);
+		       __func__);
 		l2t_release(L2DATA(tdev), l2t);
 		dst_release(dst);
 		goto reject;
@@ -1362,7 +1362,7 @@
 	struct iwch_ep *ep = ctx;
 	struct cpl_pass_establish *req = cplhdr(skb);
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	ep->snd_seq = ntohl(req->snd_isn);
 	ep->rcv_seq = ntohl(req->rcv_isn);
 
@@ -1383,7 +1383,7 @@
 	int disconnect = 1;
 	int release = 0;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	dst_confirm(ep->dst);
 
 	spin_lock_irqsave(&ep->com.lock, flags);
@@ -1473,7 +1473,7 @@
 	int state;
 
 	if (is_neg_adv_abort(req->status)) {
-		PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
+		PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
 		     ep->hwtid);
 		t3_l2t_send_event(ep->com.tdev, ep->l2t);
 		return CPL_RET_BUF_DONE;
@@ -1489,7 +1489,7 @@
 	}
 
 	state = state_read(&ep->com);
-	PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state);
+	PDBG("%s ep %p state %u\n", __func__, ep, state);
 	switch (state) {
 	case CONNECTING:
 		break;
@@ -1528,14 +1528,14 @@
 			if (ret)
 				printk(KERN_ERR MOD
 				       "%s - qp <- error failed!\n",
-				       __FUNCTION__);
+				       __func__);
 		}
 		peer_abort_upcall(ep);
 		break;
 	case ABORTING:
 		break;
 	case DEAD:
-		PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__);
+		PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
 		return CPL_RET_BUF_DONE;
 	default:
 		BUG_ON(1);
@@ -1546,7 +1546,7 @@
 	rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
 	if (!rpl_skb) {
 		printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
-		       __FUNCTION__);
+		       __func__);
 		dst_release(ep->dst);
 		l2t_release(L2DATA(ep->com.tdev), ep->l2t);
 		put_ep(&ep->com);
@@ -1573,7 +1573,7 @@
 	unsigned long flags;
 	int release = 0;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	BUG_ON(!ep);
 
 	/* The cm_id may be null if we failed to connect */
@@ -1624,9 +1624,9 @@
 {
 	struct iwch_ep *ep = ctx;
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	skb_pull(skb, sizeof(struct cpl_rdma_terminate));
-	PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);
+	PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
 	skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
 				  skb->len);
 	ep->com.qp->attr.terminate_msg_len = skb->len;
@@ -1639,13 +1639,13 @@
 	struct cpl_rdma_ec_status *rep = cplhdr(skb);
 	struct iwch_ep *ep = ctx;
 
-	PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid,
+	PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
 	     rep->status);
 	if (rep->status) {
 		struct iwch_qp_attributes attrs;
 
 		printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
-		       __FUNCTION__, ep->hwtid);
+		       __func__, ep->hwtid);
 		stop_ep_timer(ep);
 		attrs.next_state = IWCH_QP_STATE_ERROR;
 		iwch_modify_qp(ep->com.qp->rhp,
@@ -1663,7 +1663,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&ep->com.lock, flags);
-	PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid,
+	PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
 	     ep->com.state);
 	switch (ep->com.state) {
 	case MPA_REQ_SENT:
@@ -1693,7 +1693,7 @@
 {
 	int err;
 	struct iwch_ep *ep = to_ep(cm_id);
-	PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
+	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 
 	if (state_read(&ep->com) == DEAD) {
 		put_ep(&ep->com);
@@ -1718,7 +1718,7 @@
 	struct iwch_dev *h = to_iwch_dev(cm_id->device);
 	struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
 
-	PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
+	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 	if (state_read(&ep->com) == DEAD)
 		return -ECONNRESET;
 
@@ -1739,7 +1739,7 @@
 	ep->com.rpl_err = 0;
 	ep->ird = conn_param->ird;
 	ep->ord = conn_param->ord;
-	PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);
+	PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
 
 	get_ep(&ep->com);
 
@@ -1810,7 +1810,7 @@
 
 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
 	if (!ep) {
-		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
 		err = -ENOMEM;
 		goto out;
 	}
@@ -1827,7 +1827,7 @@
 	ep->com.cm_id = cm_id;
 	ep->com.qp = get_qhp(h, conn_param->qpn);
 	BUG_ON(!ep->com.qp);
-	PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn,
+	PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
 	     ep->com.qp, cm_id);
 
 	/*
@@ -1835,7 +1835,7 @@
 	 */
 	ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
 	if (ep->atid == -1) {
-		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
 		err = -ENOMEM;
 		goto fail2;
 	}
@@ -1847,7 +1847,7 @@
 			cm_id->local_addr.sin_port,
 			cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
 	if (!rt) {
-		printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
 		err = -EHOSTUNREACH;
 		goto fail3;
 	}
@@ -1857,7 +1857,7 @@
 	ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
 			     ep->dst->neighbour->dev);
 	if (!ep->l2t) {
-		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
 		err = -ENOMEM;
 		goto fail4;
 	}
@@ -1894,11 +1894,11 @@
 
 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
 	if (!ep) {
-		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
 		err = -ENOMEM;
 		goto fail1;
 	}
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 	ep->com.tdev = h->rdev.t3cdev_p;
 	cm_id->add_ref(cm_id);
 	ep->com.cm_id = cm_id;
@@ -1910,7 +1910,7 @@
 	 */
 	ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
 	if (ep->stid == -1) {
-		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
+		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
 		err = -ENOMEM;
 		goto fail2;
 	}
@@ -1942,7 +1942,7 @@
 	int err;
 	struct iwch_listen_ep *ep = to_listen_ep(cm_id);
 
-	PDBG("%s ep %p\n", __FUNCTION__, ep);
+	PDBG("%s ep %p\n", __func__, ep);
 
 	might_sleep();
 	state_set(&ep->com, DEAD);
@@ -1965,11 +1965,11 @@
 
 	spin_lock_irqsave(&ep->com.lock, flags);
 
-	PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep,
+	PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
 	     states[ep->com.state], abrupt);
 
 	if (ep->com.state == DEAD) {
-		PDBG("%s already dead ep %p\n", __FUNCTION__, ep);
+		PDBG("%s already dead ep %p\n", __func__, ep);
 		goto out;
 	}
 
@@ -2020,7 +2020,7 @@
 	if (ep->dst != old)
 		return 0;
 
-	PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new,
+	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
 	     l2t);
 	dst_hold(new);
 	l2t_release(L2DATA(ep->com.tdev), ep->l2t);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 6107e7c..2bb7fbd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -54,13 +54,13 @@
 #define MPA_FLAGS_MASK		0xE0
 
 #define put_ep(ep) { \
-	PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __FUNCTION__, __LINE__,  \
+	PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__,  \
 	     ep, atomic_read(&((ep)->kref.refcount))); \
 	kref_put(&((ep)->kref), __free_ep); \
 }
 
 #define get_ep(ep) { \
-	PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __FUNCTION__, __LINE__, \
+	PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
 	     ep, atomic_read(&((ep)->kref.refcount))); \
 	kref_get(&((ep)->kref));  \
 }
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
index d7624c1..4ee8ccd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cq.c
@@ -67,7 +67,7 @@
 	ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
 				   &credit);
 	if (t3a_device(chp->rhp) && credit) {
-		PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__,
+		PDBG("%s updating %d cq credits on id %d\n", __func__,
 		     credit, chp->cq.cqid);
 		cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
 	}
@@ -83,7 +83,7 @@
 	wc->vendor_err = CQE_STATUS(cqe);
 
 	PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
-	     "lo 0x%x cookie 0x%llx\n", __FUNCTION__,
+	     "lo 0x%x cookie 0x%llx\n", __func__,
 	     CQE_QPID(cqe), CQE_TYPE(cqe),
 	     CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
 	     CQE_WRID_LOW(cqe), (unsigned long long) cookie);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index b406766..7b67a67 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -52,7 +52,7 @@
 
 	if (!qhp) {
 		printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n",
-		       __FUNCTION__, CQE_STATUS(rsp_msg->cqe),
+		       __func__, CQE_STATUS(rsp_msg->cqe),
 		       CQE_QPID(rsp_msg->cqe));
 		spin_unlock(&rnicp->lock);
 		return;
@@ -61,14 +61,14 @@
 	if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
 	    (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
 		PDBG("%s AE received after RTS - "
-		     "qp state %d qpid 0x%x status 0x%x\n", __FUNCTION__,
+		     "qp state %d qpid 0x%x status 0x%x\n", __func__,
 		     qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
 		spin_unlock(&rnicp->lock);
 		return;
 	}
 
 	printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
-	       "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
+	       "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
 	       CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
 	       CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
 	       CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
@@ -132,10 +132,10 @@
 	    (CQE_STATUS(rsp_msg->cqe) == 0)) {
 		if (SQ_TYPE(rsp_msg->cqe)) {
 			PDBG("%s QPID 0x%x ep %p disconnecting\n",
-			     __FUNCTION__, qhp->wq.qpid, qhp->ep);
+			     __func__, qhp->wq.qpid, qhp->ep);
 			iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
 		} else {
-			PDBG("%s post REQ_ERR AE QPID 0x%x\n", __FUNCTION__,
+			PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__,
 			     qhp->wq.qpid);
 			post_qp_event(rnicp, chp, rsp_msg,
 				      IB_EVENT_QP_REQ_ERR, 0);
@@ -180,7 +180,7 @@
 	case TPT_ERR_INVALIDATE_SHARED_MR:
 	case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
 		printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
-		       "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
+		       "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
 		       CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
 		       CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
 		       CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index b8797c6..58c3d61 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -62,7 +62,7 @@
 	mmid = stag >> 8;
 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
 	insert_handle(rhp, &rhp->mmidr, mhp, mmid);
-	PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp);
+	PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
 	return 0;
 }
 
@@ -96,7 +96,7 @@
 	mmid = stag >> 8;
 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
 	insert_handle(rhp, &rhp->mmidr, mhp, mmid);
-	PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp);
+	PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
 	return 0;
 }
 
@@ -163,7 +163,7 @@
 			    ((u64) j << *shift));
 
 	PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
-	     __FUNCTION__, (unsigned long long) *iova_start,
+	     __func__, (unsigned long long) *iova_start,
 	     (unsigned long long) mask, *shift, (unsigned long long) *total_size,
 	     *npages);
 
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b2ea921..ca72654 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -101,7 +101,7 @@
 	struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
 	struct iwch_mm_entry *mm, *tmp;
 
-	PDBG("%s context %p\n", __FUNCTION__, context);
+	PDBG("%s context %p\n", __func__, context);
 	list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
 		kfree(mm);
 	cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
@@ -115,7 +115,7 @@
 	struct iwch_ucontext *context;
 	struct iwch_dev *rhp = to_iwch_dev(ibdev);
 
-	PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
+	PDBG("%s ibdev %p\n", __func__, ibdev);
 	context = kzalloc(sizeof(*context), GFP_KERNEL);
 	if (!context)
 		return ERR_PTR(-ENOMEM);
@@ -129,7 +129,7 @@
 {
 	struct iwch_cq *chp;
 
-	PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq);
+	PDBG("%s ib_cq %p\n", __func__, ib_cq);
 	chp = to_iwch_cq(ib_cq);
 
 	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
@@ -151,7 +151,7 @@
 	struct iwch_create_cq_req ureq;
 	struct iwch_ucontext *ucontext = NULL;
 
-	PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries);
+	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
 	rhp = to_iwch_dev(ibdev);
 	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
 	if (!chp)
@@ -233,7 +233,7 @@
 	struct t3_cq oldcq, newcq;
 	int ret;
 
-	PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe);
+	PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
 
 	/* We don't downsize... */
 	if (cqe <= cq->cqe)
@@ -281,7 +281,7 @@
 	ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
 	if (ret) {
 		printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
-			__FUNCTION__, ret);
+			__func__, ret);
 	}
 
 	/* add user hooks here */
@@ -316,7 +316,7 @@
 		chp->cq.rptr = rptr;
 	} else
 		spin_lock_irqsave(&chp->lock, flag);
-	PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr);
+	PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
 	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
 	spin_unlock_irqrestore(&chp->lock, flag);
 	if (err < 0)
@@ -337,7 +337,7 @@
 	struct iwch_ucontext *ucontext;
 	u64 addr;
 
-	PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff,
+	PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
 	     key, len);
 
 	if (vma->vm_start & (PAGE_SIZE-1)) {
@@ -390,7 +390,7 @@
 
 	php = to_iwch_pd(pd);
 	rhp = php->rhp;
-	PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__, pd, php->pdid);
+	PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
 	cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
 	kfree(php);
 	return 0;
@@ -404,7 +404,7 @@
 	u32 pdid;
 	struct iwch_dev *rhp;
 
-	PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
+	PDBG("%s ibdev %p\n", __func__, ibdev);
 	rhp = (struct iwch_dev *) ibdev;
 	pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
 	if (!pdid)
@@ -422,7 +422,7 @@
 			return ERR_PTR(-EFAULT);
 		}
 	}
-	PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php);
+	PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
 	return &php->ibpd;
 }
 
@@ -432,7 +432,7 @@
 	struct iwch_mr *mhp;
 	u32 mmid;
 
-	PDBG("%s ib_mr %p\n", __FUNCTION__, ib_mr);
+	PDBG("%s ib_mr %p\n", __func__, ib_mr);
 	/* There can be no memory windows */
 	if (atomic_read(&ib_mr->usecnt))
 		return -EINVAL;
@@ -447,7 +447,7 @@
 		kfree((void *) (unsigned long) mhp->kva);
 	if (mhp->umem)
 		ib_umem_release(mhp->umem);
-	PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp);
+	PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
 	kfree(mhp);
 	return 0;
 }
@@ -467,7 +467,7 @@
 	struct iwch_mr *mhp;
 	int ret;
 
-	PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
+	PDBG("%s ib_pd %p\n", __func__, pd);
 	php = to_iwch_pd(pd);
 	rhp = php->rhp;
 
@@ -531,7 +531,7 @@
 	int npages;
 	int ret;
 
-	PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__, mr, pd);
+	PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
 
 	/* There can be no memory windows */
 	if (atomic_read(&mr->usecnt))
@@ -594,7 +594,7 @@
 	struct iwch_mr *mhp;
 	struct iwch_reg_user_mr_resp uresp;
 
-	PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
+	PDBG("%s ib_pd %p\n", __func__, pd);
 
 	php = to_iwch_pd(pd);
 	rhp = php->rhp;
@@ -649,7 +649,7 @@
 	if (udata && !t3a_device(rhp)) {
 		uresp.pbl_addr = (mhp->attr.pbl_addr -
 	                         rhp->rdev.rnic_info.pbl_base) >> 3;
-		PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__,
+		PDBG("%s user resp pbl_addr 0x%x\n", __func__,
 		     uresp.pbl_addr);
 
 		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
@@ -673,7 +673,7 @@
 	u64 kva;
 	struct ib_mr *ibmr;
 
-	PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
+	PDBG("%s ib_pd %p\n", __func__, pd);
 
 	/*
 	 * T3 only supports 32 bits of size.
@@ -710,7 +710,7 @@
 	mhp->attr.stag = stag;
 	mmid = (stag) >> 8;
 	insert_handle(rhp, &rhp->mmidr, mhp, mmid);
-	PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__, mmid, mhp, stag);
+	PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
 	return &(mhp->ibmw);
 }
 
@@ -726,7 +726,7 @@
 	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
 	remove_handle(rhp, &rhp->mmidr, mmid);
 	kfree(mhp);
-	PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__, mw, mmid, mhp);
+	PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
 	return 0;
 }
 
@@ -754,7 +754,7 @@
 	cxio_destroy_qp(&rhp->rdev, &qhp->wq,
 			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
 
-	PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__,
+	PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
 	     ib_qp, qhp->wq.qpid, qhp);
 	kfree(qhp);
 	return 0;
@@ -773,7 +773,7 @@
 	int wqsize, sqsize, rqsize;
 	struct iwch_ucontext *ucontext;
 
-	PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
+	PDBG("%s ib_pd %p\n", __func__, pd);
 	if (attrs->qp_type != IB_QPT_RC)
 		return ERR_PTR(-EINVAL);
 	php = to_iwch_pd(pd);
@@ -805,7 +805,7 @@
 	 */
 	sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
 	wqsize = roundup_pow_of_two(rqsize + sqsize);
-	PDBG("%s wqsize %d sqsize %d rqsize %d\n", __FUNCTION__,
+	PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
 	     wqsize, sqsize, rqsize);
 	qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
 	if (!qhp)
@@ -898,7 +898,7 @@
 	init_timer(&(qhp->timer));
 	PDBG("%s sq_num_entries %d, rq_num_entries %d "
 	     "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n",
-	     __FUNCTION__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
+	     __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
 	     qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
 	     1 << qhp->wq.size_log2);
 	return &qhp->ibqp;
@@ -912,7 +912,7 @@
 	enum iwch_qp_attr_mask mask = 0;
 	struct iwch_qp_attributes attrs;
 
-	PDBG("%s ib_qp %p\n", __FUNCTION__, ibqp);
+	PDBG("%s ib_qp %p\n", __func__, ibqp);
 
 	/* iwarp does not support the RTR state */
 	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
@@ -945,20 +945,20 @@
 
 void iwch_qp_add_ref(struct ib_qp *qp)
 {
-	PDBG("%s ib_qp %p\n", __FUNCTION__, qp);
+	PDBG("%s ib_qp %p\n", __func__, qp);
 	atomic_inc(&(to_iwch_qp(qp)->refcnt));
 }
 
 void iwch_qp_rem_ref(struct ib_qp *qp)
 {
-	PDBG("%s ib_qp %p\n", __FUNCTION__, qp);
+	PDBG("%s ib_qp %p\n", __func__, qp);
 	if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
 	        wake_up(&(to_iwch_qp(qp)->wait));
 }
 
 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
 {
-	PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn);
+	PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
 	return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
 }
 
@@ -966,7 +966,7 @@
 static int iwch_query_pkey(struct ib_device *ibdev,
 			   u8 port, u16 index, u16 * pkey)
 {
-	PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
+	PDBG("%s ibdev %p\n", __func__, ibdev);
 	*pkey = 0;
 	return 0;
 }
@@ -977,7 +977,7 @@
 	struct iwch_dev *dev;
 
 	PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
-	       __FUNCTION__, ibdev, port, index, gid);
+	       __func__, ibdev, port, index, gid);
 	dev = to_iwch_dev(ibdev);
 	BUG_ON(port == 0 || port > 2);
 	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
@@ -990,7 +990,7 @@
 {
 
 	struct iwch_dev *dev;
-	PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
+	PDBG("%s ibdev %p\n", __func__, ibdev);
 
 	dev = to_iwch_dev(ibdev);
 	memset(props, 0, sizeof *props);
@@ -1017,7 +1017,7 @@
 static int iwch_query_port(struct ib_device *ibdev,
 			   u8 port, struct ib_port_attr *props)
 {
-	PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
+	PDBG("%s ibdev %p\n", __func__, ibdev);
 	props->max_mtu = IB_MTU_4096;
 	props->lid = 0;
 	props->lmc = 0;
@@ -1045,7 +1045,7 @@
 {
 	struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
 					    ibdev.class_dev);
-	PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
+	PDBG("%s class dev 0x%p\n", __func__, cdev);
 	return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type);
 }
 
@@ -1056,7 +1056,7 @@
 	struct ethtool_drvinfo info;
 	struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
 
-	PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
+	PDBG("%s class dev 0x%p\n", __func__, cdev);
 	rtnl_lock();
 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
 	rtnl_unlock();
@@ -1070,7 +1070,7 @@
 	struct ethtool_drvinfo info;
 	struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
 
-	PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
+	PDBG("%s class dev 0x%p\n", __func__, cdev);
 	rtnl_lock();
 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
 	rtnl_unlock();
@@ -1081,7 +1081,7 @@
 {
 	struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
 					    ibdev.class_dev);
-	PDBG("%s class dev 0x%p\n", __FUNCTION__, dev);
+	PDBG("%s class dev 0x%p\n", __func__, dev);
 	return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor,
 		                       dev->rdev.rnic_info.pdev->device);
 }
@@ -1103,14 +1103,13 @@
 	int ret;
 	int i;
 
-	PDBG("%s iwch_dev %p\n", __FUNCTION__, dev);
+	PDBG("%s iwch_dev %p\n", __func__, dev);
 	strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
 	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
 	memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
 	dev->ibdev.owner = THIS_MODULE;
 	dev->device_cap_flags =
-	    (IB_DEVICE_ZERO_STAG |
-	     IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
+	    (IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW);
 
 	dev->ibdev.uverbs_cmd_mask =
 	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1207,7 +1206,7 @@
 {
 	int i;
 
-	PDBG("%s iwch_dev %p\n", __FUNCTION__, dev);
+	PDBG("%s iwch_dev %p\n", __func__, dev);
 	for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
 		class_device_remove_file(&dev->ibdev.class_dev,
 					 iwch_class_attributes[i]);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 48833f3..61356f9 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -213,7 +213,7 @@
 		if (mm->key == key && mm->len == len) {
 			list_del_init(&mm->entry);
 			spin_unlock(&ucontext->mmap_lock);
-			PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
+			PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 			     key, (unsigned long long) mm->addr, mm->len);
 			return mm;
 		}
@@ -226,7 +226,7 @@
 			       struct iwch_mm_entry *mm)
 {
 	spin_lock(&ucontext->mmap_lock);
-	PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
+	PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 	     mm->key, (unsigned long long) mm->addr, mm->len);
 	list_add_tail(&mm->entry, &ucontext->mmaps);
 	spin_unlock(&ucontext->mmap_lock);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index ea2cdd7..8891c3b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -72,7 +72,7 @@
 	wqe->send.reserved[2] = 0;
 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
 		plen = 4;
-		wqe->send.sgl[0].stag = wr->imm_data;
+		wqe->send.sgl[0].stag = wr->ex.imm_data;
 		wqe->send.sgl[0].len = __constant_cpu_to_be32(0);
 		wqe->send.num_sgle = __constant_cpu_to_be32(0);
 		*flit_cnt = 5;
@@ -112,7 +112,7 @@
 
 	if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
 		plen = 4;
-		wqe->write.sgl[0].stag = wr->imm_data;
+		wqe->write.sgl[0].stag = wr->ex.imm_data;
 		wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
 		wqe->write.num_sgle = __constant_cpu_to_be32(0);
 		*flit_cnt = 6;
@@ -168,30 +168,30 @@
 
 		mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
 		if (!mhp) {
-			PDBG("%s %d\n", __FUNCTION__, __LINE__);
+			PDBG("%s %d\n", __func__, __LINE__);
 			return -EIO;
 		}
 		if (!mhp->attr.state) {
-			PDBG("%s %d\n", __FUNCTION__, __LINE__);
+			PDBG("%s %d\n", __func__, __LINE__);
 			return -EIO;
 		}
 		if (mhp->attr.zbva) {
-			PDBG("%s %d\n", __FUNCTION__, __LINE__);
+			PDBG("%s %d\n", __func__, __LINE__);
 			return -EIO;
 		}
 
 		if (sg_list[i].addr < mhp->attr.va_fbo) {
-			PDBG("%s %d\n", __FUNCTION__, __LINE__);
+			PDBG("%s %d\n", __func__, __LINE__);
 			return -EINVAL;
 		}
 		if (sg_list[i].addr + ((u64) sg_list[i].length) <
 		    sg_list[i].addr) {
-			PDBG("%s %d\n", __FUNCTION__, __LINE__);
+			PDBG("%s %d\n", __func__, __LINE__);
 			return -EINVAL;
 		}
 		if (sg_list[i].addr + ((u64) sg_list[i].length) >
 		    mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
-			PDBG("%s %d\n", __FUNCTION__, __LINE__);
+			PDBG("%s %d\n", __func__, __LINE__);
 			return -EINVAL;
 		}
 		offset = sg_list[i].addr - mhp->attr.va_fbo;
@@ -290,7 +290,7 @@
 				qhp->wq.oldest_read = sqp;
 			break;
 		default:
-			PDBG("%s post of type=%d TBD!\n", __FUNCTION__,
+			PDBG("%s post of type=%d TBD!\n", __func__,
 			     wr->opcode);
 			err = -EINVAL;
 		}
@@ -309,7 +309,7 @@
 			       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
 			       0, t3_wr_flit_cnt);
 		PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
-		     __FUNCTION__, (unsigned long long) wr->wr_id, idx,
+		     __func__, (unsigned long long) wr->wr_id, idx,
 		     Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
 		     sqp->opcode);
 		wr = wr->next;
@@ -361,7 +361,7 @@
 			       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
 			       0, sizeof(struct t3_receive_wr) >> 3);
 		PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
-		     "wqe %p \n", __FUNCTION__, (unsigned long long) wr->wr_id,
+		     "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
 		     idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
 		++(qhp->wq.rq_wptr);
 		++(qhp->wq.wptr);
@@ -407,7 +407,7 @@
 		return -ENOMEM;
 	}
 	idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
-	PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __FUNCTION__, idx,
+	PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
 	     mw, mw_bind);
 	wqe = (union t3_wr *) (qhp->wq.queue + idx);
 
@@ -595,10 +595,10 @@
 	struct terminate_message *term;
 	struct sk_buff *skb;
 
-	PDBG("%s %d\n", __FUNCTION__, __LINE__);
+	PDBG("%s %d\n", __func__, __LINE__);
 	skb = alloc_skb(40, GFP_ATOMIC);
 	if (!skb) {
-		printk(KERN_ERR "%s cannot send TERMINATE!\n", __FUNCTION__);
+		printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
 		return -ENOMEM;
 	}
 	wqe = (union t3_wr *)skb_put(skb, 40);
@@ -629,7 +629,7 @@
 	rchp = get_chp(qhp->rhp, qhp->attr.rcq);
 	schp = get_chp(qhp->rhp, qhp->attr.scq);
 
-	PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp);
+	PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
 	/* take a ref on the qhp since we must release the lock */
 	atomic_inc(&qhp->refcnt);
 	spin_unlock_irqrestore(&qhp->lock, *flag);
@@ -720,11 +720,11 @@
 	init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
 	init_attr.irs = qhp->ep->rcv_seq;
 	PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
-	     "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
+	     "flags 0x%x qpcaps 0x%x\n", __func__,
 	     init_attr.rq_addr, init_attr.rq_size,
 	     init_attr.flags, init_attr.qpcaps);
 	ret = cxio_rdma_init(&rhp->rdev, &init_attr);
-	PDBG("%s ret %d\n", __FUNCTION__, ret);
+	PDBG("%s ret %d\n", __func__, ret);
 	return ret;
 }
 
@@ -742,7 +742,7 @@
 	int free = 0;
 	struct iwch_ep *ep = NULL;
 
-	PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__,
+	PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
 	     qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
 	     (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
 
@@ -899,14 +899,14 @@
 		break;
 	default:
 		printk(KERN_ERR "%s in a bad state %d\n",
-		       __FUNCTION__, qhp->attr.state);
+		       __func__, qhp->attr.state);
 		ret = -EINVAL;
 		goto err;
 		break;
 	}
 	goto out;
 err:
-	PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__, qhp->ep,
+	PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
 	     qhp->wq.qpid);
 
 	/* disassociate the LLP connection */
@@ -939,7 +939,7 @@
 	if (free)
 		put_ep(&ep->com);
 
-	PDBG("%s exit state %d\n", __FUNCTION__, qhp->attr.state);
+	PDBG("%s exit state %d\n", __func__, qhp->attr.state);
 	return ret;
 }
 
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 194c1c3..56735ea 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -41,9 +41,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-
-#include <asm/current.h>
-
 #include "ehca_tools.h"
 #include "ehca_iverbs.h"
 #include "hcp_if.h"
@@ -170,17 +167,8 @@
 {
 	struct ehca_av *av;
 	struct ehca_ud_av new_ehca_av;
-	struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
 	struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
 					      ib_device);
-	u32 cur_pid = current->tgid;
-
-	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
-	    my_pd->ownpid != cur_pid) {
-		ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		return -EINVAL;
-	}
 
 	memset(&new_ehca_av, 0, sizeof(new_ehca_av));
 	new_ehca_av.sl = ah_attr->sl;
@@ -242,15 +230,6 @@
 int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
 {
 	struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
-	struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
-	u32 cur_pid = current->tgid;
-
-	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
-	    my_pd->ownpid != cur_pid) {
-		ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		return -EINVAL;
-	}
 
 	memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
 	       sizeof(ah_attr->grh.dgid));
@@ -273,16 +252,6 @@
 
 int ehca_destroy_ah(struct ib_ah *ah)
 {
-	struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
-	u32 cur_pid = current->tgid;
-
-	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
-	    my_pd->ownpid != cur_pid) {
-		ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		return -EINVAL;
-	}
-
 	kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
 
 	return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 92cce8a..0d13fe0 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -132,7 +132,6 @@
 struct ehca_pd {
 	struct ib_pd ib_pd;
 	struct ipz_pd fw_pd;
-	u32 ownpid;
 	/* small queue mgmt */
 	struct mutex lock;
 	struct list_head free[2];
@@ -215,7 +214,6 @@
 	atomic_t nr_events; /* #events seen */
 	wait_queue_head_t wait_completion;
 	spinlock_t task_lock;
-	u32 ownpid;
 	/* mmap counter for resources mapped into user space */
 	u32 mm_count_queue;
 	u32 mm_count_galpa;
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 0467c15..ec0cfcf 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -43,8 +43,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <asm/current.h>
-
 #include "ehca_iverbs.h"
 #include "ehca_classes.h"
 #include "ehca_irq.h"
@@ -148,7 +146,6 @@
 	spin_lock_init(&my_cq->task_lock);
 	atomic_set(&my_cq->nr_events, 0);
 	init_waitqueue_head(&my_cq->wait_completion);
-	my_cq->ownpid = current->tgid;
 
 	cq = &my_cq->ib_cq;
 
@@ -320,7 +317,6 @@
 	struct ehca_shca *shca = container_of(device, struct ehca_shca,
 					      ib_device);
 	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-	u32 cur_pid = current->tgid;
 	unsigned long flags;
 
 	if (cq->uobject) {
@@ -329,12 +325,6 @@
 				 "user space cq_num=%x", my_cq->cq_number);
 			return -EINVAL;
 		}
-		if (my_cq->ownpid != cur_pid) {
-			ehca_err(device, "Invalid caller pid=%x ownpid=%x "
-				 "cq_num=%x",
-				 cur_pid, my_cq->ownpid, my_cq->cq_number);
-			return -EINVAL;
-		}
 	}
 
 	/*
@@ -374,15 +364,6 @@
 
 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
 {
-	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	u32 cur_pid = current->tgid;
-
-	if (cq->uobject && my_cq->ownpid != cur_pid) {
-		ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_cq->ownpid);
-		return -EINVAL;
-	}
-
 	/* TODO: proper resize needs to be done */
 	ehca_err(cq->device, "not implemented yet");
 
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 5bd7b59..2515cbd 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -43,6 +43,11 @@
 #include "ehca_iverbs.h"
 #include "hcp_if.h"
 
+static unsigned int limit_uint(unsigned int value)
+{
+	return min_t(unsigned int, value, INT_MAX);
+}
+
 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
 {
 	int i, ret = 0;
@@ -83,37 +88,40 @@
 	props->vendor_id       = rblock->vendor_id >> 8;
 	props->vendor_part_id  = rblock->vendor_part_id >> 16;
 	props->hw_ver          = rblock->hw_ver;
-	props->max_qp          = min_t(unsigned, rblock->max_qp, INT_MAX);
-	props->max_qp_wr       = min_t(unsigned, rblock->max_wqes_wq, INT_MAX);
-	props->max_sge         = min_t(unsigned, rblock->max_sge, INT_MAX);
-	props->max_sge_rd      = min_t(unsigned, rblock->max_sge_rd, INT_MAX);
-	props->max_cq          = min_t(unsigned, rblock->max_cq, INT_MAX);
-	props->max_cqe         = min_t(unsigned, rblock->max_cqe, INT_MAX);
-	props->max_mr          = min_t(unsigned, rblock->max_mr, INT_MAX);
-	props->max_mw          = min_t(unsigned, rblock->max_mw, INT_MAX);
-	props->max_pd          = min_t(unsigned, rblock->max_pd, INT_MAX);
-	props->max_ah          = min_t(unsigned, rblock->max_ah, INT_MAX);
-	props->max_fmr         = min_t(unsigned, rblock->max_mr, INT_MAX);
+	props->max_qp          = limit_uint(rblock->max_qp);
+	props->max_qp_wr       = limit_uint(rblock->max_wqes_wq);
+	props->max_sge         = limit_uint(rblock->max_sge);
+	props->max_sge_rd      = limit_uint(rblock->max_sge_rd);
+	props->max_cq          = limit_uint(rblock->max_cq);
+	props->max_cqe         = limit_uint(rblock->max_cqe);
+	props->max_mr          = limit_uint(rblock->max_mr);
+	props->max_mw          = limit_uint(rblock->max_mw);
+	props->max_pd          = limit_uint(rblock->max_pd);
+	props->max_ah          = limit_uint(rblock->max_ah);
+	props->max_ee          = limit_uint(rblock->max_rd_ee_context);
+	props->max_rdd         = limit_uint(rblock->max_rd_domain);
+	props->max_fmr         = limit_uint(rblock->max_mr);
+	props->local_ca_ack_delay  = limit_uint(rblock->local_ca_ack_delay);
+	props->max_qp_rd_atom  = limit_uint(rblock->max_rr_qp);
+	props->max_ee_rd_atom  = limit_uint(rblock->max_rr_ee_context);
+	props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
+	props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
+	props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
 
 	if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
-		props->max_srq         = props->max_qp;
-		props->max_srq_wr      = props->max_qp_wr;
+		props->max_srq         = limit_uint(props->max_qp);
+		props->max_srq_wr      = limit_uint(props->max_qp_wr);
 		props->max_srq_sge     = 3;
 	}
 
-	props->max_pkeys       = 16;
-	props->local_ca_ack_delay
-		= rblock->local_ca_ack_delay;
-	props->max_raw_ipv6_qp
-		= min_t(unsigned, rblock->max_raw_ipv6_qp, INT_MAX);
-	props->max_raw_ethy_qp
-		= min_t(unsigned, rblock->max_raw_ethy_qp, INT_MAX);
-	props->max_mcast_grp
-		= min_t(unsigned, rblock->max_mcast_grp, INT_MAX);
-	props->max_mcast_qp_attach
-		= min_t(unsigned, rblock->max_mcast_qp_attach, INT_MAX);
+	props->max_pkeys           = 16;
+	props->local_ca_ack_delay  = limit_uint(rblock->local_ca_ack_delay);
+	props->max_raw_ipv6_qp     = limit_uint(rblock->max_raw_ipv6_qp);
+	props->max_raw_ethy_qp     = limit_uint(rblock->max_raw_ethy_qp);
+	props->max_mcast_grp       = limit_uint(rblock->max_mcast_grp);
+	props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
 	props->max_total_mcast_qp_attach
-		= min_t(unsigned, rblock->max_total_mcast_qp_attach, INT_MAX);
+		= limit_uint(rblock->max_total_mcast_qp_attach);
 
 	/* translate device capabilities */
 	props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
@@ -128,6 +136,46 @@
 	return ret;
 }
 
+static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
+{
+	switch (fw_mtu) {
+	case 0x1:
+		return IB_MTU_256;
+	case 0x2:
+		return IB_MTU_512;
+	case 0x3:
+		return IB_MTU_1024;
+	case 0x4:
+		return IB_MTU_2048;
+	case 0x5:
+		return IB_MTU_4096;
+	default:
+		ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
+			 fw_mtu);
+		return 0;
+	}
+}
+
+static int map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
+{
+	switch (vl_cap) {
+	case 0x1:
+		return 1;
+	case 0x2:
+		return 2;
+	case 0x3:
+		return 4;
+	case 0x4:
+		return 8;
+	case 0x5:
+		return 15;
+	default:
+		ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
+			 vl_cap);
+		return 0;
+	}
+}
+
 int ehca_query_port(struct ib_device *ibdev,
 		    u8 port, struct ib_port_attr *props)
 {
@@ -152,31 +200,13 @@
 
 	memset(props, 0, sizeof(struct ib_port_attr));
 
-	switch (rblock->max_mtu) {
-	case 0x1:
-		props->active_mtu = props->max_mtu = IB_MTU_256;
-		break;
-	case 0x2:
-		props->active_mtu = props->max_mtu = IB_MTU_512;
-		break;
-	case 0x3:
-		props->active_mtu = props->max_mtu = IB_MTU_1024;
-		break;
-	case 0x4:
-		props->active_mtu = props->max_mtu = IB_MTU_2048;
-		break;
-	case 0x5:
-		props->active_mtu = props->max_mtu = IB_MTU_4096;
-		break;
-	default:
-		ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
-			 rblock->max_mtu);
-		break;
-	}
-
+	props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
 	props->port_cap_flags  = rblock->capability_mask;
 	props->gid_tbl_len     = rblock->gid_tbl_len;
-	props->max_msg_sz      = rblock->max_msg_sz;
+	if (rblock->max_msg_sz)
+		props->max_msg_sz      = rblock->max_msg_sz;
+	else
+		props->max_msg_sz      = 0x1 << 31;
 	props->bad_pkey_cntr   = rblock->bad_pkey_cntr;
 	props->qkey_viol_cntr  = rblock->qkey_viol_cntr;
 	props->pkey_tbl_len    = rblock->pkey_tbl_len;
@@ -186,6 +216,7 @@
 	props->sm_sl           = rblock->sm_sl;
 	props->subnet_timeout  = rblock->subnet_timeout;
 	props->init_type_reply = rblock->init_type_reply;
+	props->max_vl_num      = map_number_of_vls(shca, rblock->vl_cap);
 
 	if (rblock->state && rblock->phys_width) {
 		props->phys_state      = rblock->phys_pstate;
@@ -314,7 +345,7 @@
 	return ret;
 }
 
-const u32 allowed_port_caps = (
+static const u32 allowed_port_caps = (
 	IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
 	IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
 	IB_PORT_VENDOR_CLASS_SUP);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index a86ebcc..65b3362 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -57,16 +57,17 @@
 MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
 MODULE_VERSION(HCAD_VERSION);
 
-int ehca_open_aqp1     = 0;
+static int ehca_open_aqp1     = 0;
+static int ehca_hw_level      = 0;
+static int ehca_poll_all_eqs  = 1;
+static int ehca_mr_largepage  = 1;
+
 int ehca_debug_level   = 0;
-int ehca_hw_level      = 0;
 int ehca_nr_ports      = 2;
 int ehca_use_hp_mr     = 0;
 int ehca_port_act_time = 30;
-int ehca_poll_all_eqs  = 1;
 int ehca_static_rate   = -1;
 int ehca_scaling_code  = 0;
-int ehca_mr_largepage  = 1;
 int ehca_lock_hcalls   = -1;
 
 module_param_named(open_aqp1,     ehca_open_aqp1,     int, S_IRUGO);
@@ -396,7 +397,7 @@
 	return ret;
 }
 
-int ehca_init_device(struct ehca_shca *shca)
+static int ehca_init_device(struct ehca_shca *shca)
 {
 	int ret;
 
@@ -579,8 +580,8 @@
 	return 1;
 }
 
-DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
-	    ehca_show_debug_level, ehca_store_debug_level);
+static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
+		   ehca_show_debug_level, ehca_store_debug_level);
 
 static struct attribute *ehca_drv_attrs[] = {
 	&driver_attr_debug_level.attr,
@@ -941,7 +942,7 @@
 	spin_unlock(&shca_list_lock);
 }
 
-int __init ehca_module_init(void)
+static int __init ehca_module_init(void)
 {
 	int ret;
 
@@ -988,7 +989,7 @@
 	return ret;
 };
 
-void __exit ehca_module_exit(void)
+static void __exit ehca_module_exit(void)
 {
 	if (ehca_poll_all_eqs == 1)
 		del_timer_sync(&poll_eqs_timer);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index e239bbf..f26997f 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -40,8 +40,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <asm/current.h>
-
 #include <rdma/ib_umem.h>
 
 #include "ehca_iverbs.h"
@@ -419,7 +417,6 @@
 	struct ehca_shca *shca =
 		container_of(mr->device, struct ehca_shca, ib_device);
 	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
 	u64 new_size;
 	u64 *new_start;
 	u32 new_acl;
@@ -429,15 +426,6 @@
 	u32 num_kpages = 0;
 	u32 num_hwpages = 0;
 	struct ehca_mr_pginfo pginfo;
-	u32 cur_pid = current->tgid;
-
-	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
-	    (my_pd->ownpid != cur_pid)) {
-		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		ret = -EINVAL;
-		goto rereg_phys_mr_exit0;
-	}
 
 	if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
 		/* TODO not supported, because PHYP rereg hCall needs pages */
@@ -577,19 +565,9 @@
 	struct ehca_shca *shca =
 		container_of(mr->device, struct ehca_shca, ib_device);
 	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
-	u32 cur_pid = current->tgid;
 	unsigned long sl_flags;
 	struct ehca_mr_hipzout_parms hipzout;
 
-	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
-	    (my_pd->ownpid != cur_pid)) {
-		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		ret = -EINVAL;
-		goto query_mr_exit0;
-	}
-
 	if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
 		ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
 			 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
@@ -634,16 +612,6 @@
 	struct ehca_shca *shca =
 		container_of(mr->device, struct ehca_shca, ib_device);
 	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
-	u32 cur_pid = current->tgid;
-
-	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
-	    (my_pd->ownpid != cur_pid)) {
-		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		ret = -EINVAL;
-		goto dereg_mr_exit0;
-	}
 
 	if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
 		ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
@@ -1952,9 +1920,8 @@
 	return ret;
 }
 
-int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
-			  u32 number,
-			  u64 *kpage)
+static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
+				 u32 number, u64 *kpage)
 {
 	int ret = 0;
 	struct ib_phys_buf *pbuf;
@@ -2012,9 +1979,8 @@
 	return ret;
 }
 
-int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
-			 u32 number,
-			 u64 *kpage)
+static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
+				u32 number, u64 *kpage)
 {
 	int ret = 0;
 	u64 *fmrlist;
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
index 43bcf08..2fe5548 100644
--- a/drivers/infiniband/hw/ehca/ehca_pd.c
+++ b/drivers/infiniband/hw/ehca/ehca_pd.c
@@ -38,8 +38,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <asm/current.h>
-
 #include "ehca_tools.h"
 #include "ehca_iverbs.h"
 
@@ -58,7 +56,6 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	pd->ownpid = current->tgid;
 	for (i = 0; i < 2; i++) {
 		INIT_LIST_HEAD(&pd->free[i]);
 		INIT_LIST_HEAD(&pd->full[i]);
@@ -85,18 +82,10 @@
 
 int ehca_dealloc_pd(struct ib_pd *pd)
 {
-	u32 cur_pid = current->tgid;
 	struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
 	int i, leftovers = 0;
 	struct ipz_small_queue_page *page, *tmp;
 
-	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
-	    my_pd->ownpid != cur_pid) {
-		ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		return -EINVAL;
-	}
-
 	for (i = 0; i < 2; i++) {
 		list_splice(&my_pd->full[i], &my_pd->free[i]);
 		list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 1012f15..3eb14a5 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -43,9 +43,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-
-#include <asm/current.h>
-
 #include "ehca_classes.h"
 #include "ehca_tools.h"
 #include "ehca_qes.h"
@@ -424,6 +421,9 @@
 	u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
 	unsigned long flags;
 
+	if (init_attr->create_flags)
+		return ERR_PTR(-EINVAL);
+
 	memset(&parms, 0, sizeof(parms));
 	qp_type = init_attr->qp_type;
 
@@ -1526,16 +1526,6 @@
 	struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
 					      ib_device);
 	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
-					     ib_pd);
-	u32 cur_pid = current->tgid;
-
-	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
-	    my_pd->ownpid != cur_pid) {
-		ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		return -EINVAL;
-	}
 
 	/* The if-block below caches qp_attr to be modified for GSI and SMI
 	 * qps during the initialization by ib_mad. When the respective port
@@ -1636,23 +1626,13 @@
 		  int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
 {
 	struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
-					     ib_pd);
 	struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
 					      ib_device);
 	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
 	struct hcp_modify_qp_control_block *qpcb;
-	u32 cur_pid = current->tgid;
 	int cnt, ret = 0;
 	u64 h_ret;
 
-	if (my_pd->ib_pd.uobject  && my_pd->ib_pd.uobject->context  &&
-	    my_pd->ownpid != cur_pid) {
-		ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		return -EINVAL;
-	}
-
 	if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
 		ehca_err(qp->device, "Invalid attribute mask "
 			 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
@@ -1797,8 +1777,6 @@
 {
 	struct ehca_qp *my_qp =
 		container_of(ibsrq, struct ehca_qp, ib_srq);
-	struct ehca_pd *my_pd =
-		container_of(ibsrq->pd, struct ehca_pd, ib_pd);
 	struct ehca_shca *shca =
 		container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
 	struct hcp_modify_qp_control_block *mqpcb;
@@ -1806,14 +1784,6 @@
 	u64 h_ret;
 	int ret = 0;
 
-	u32 cur_pid = current->tgid;
-	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
-	    my_pd->ownpid != cur_pid) {
-		ehca_err(ibsrq->pd->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		return -EINVAL;
-	}
-
 	mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
 	if (!mqpcb) {
 		ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
@@ -1864,22 +1834,13 @@
 int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
 {
 	struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
-	struct ehca_pd *my_pd = container_of(srq->pd, struct ehca_pd, ib_pd);
 	struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
 					      ib_device);
 	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
 	struct hcp_modify_qp_control_block *qpcb;
-	u32 cur_pid = current->tgid;
 	int ret = 0;
 	u64 h_ret;
 
-	if (my_pd->ib_pd.uobject  && my_pd->ib_pd.uobject->context  &&
-	    my_pd->ownpid != cur_pid) {
-		ehca_err(srq->device, "Invalid caller pid=%x ownpid=%x",
-			 cur_pid, my_pd->ownpid);
-		return -EINVAL;
-	}
-
 	qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
 	if (!qpcb) {
 		ehca_err(srq->device, "Out of memory for qpcb "
@@ -1919,7 +1880,6 @@
 	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
 					     ib_pd);
 	struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
-	u32 cur_pid = current->tgid;
 	u32 qp_num = my_qp->real_qp_num;
 	int ret;
 	u64 h_ret;
@@ -1934,11 +1894,6 @@
 				 "user space qp_num=%x", qp_num);
 			return -EINVAL;
 		}
-		if (my_pd->ownpid != cur_pid) {
-			ehca_err(dev, "Invalid caller pid=%x ownpid=%x",
-				 cur_pid, my_pd->ownpid);
-			return -EINVAL;
-		}
 	}
 
 	if (my_qp->send_cq) {
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 2ce8cff..a20bbf4 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -188,7 +188,7 @@
 	if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
 	    send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
 		/* this might not work as long as HW does not support it */
-		wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
+		wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
 		wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
 	}
 
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 4a8346a..ec950bf 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -73,37 +73,37 @@
 		if (unlikely(ehca_debug_level)) \
 			dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
 				   "PU%04x EHCA_DBG:%s " format "\n", \
-				   raw_smp_processor_id(), __FUNCTION__, \
+				   raw_smp_processor_id(), __func__, \
 				   ## arg); \
 	} while (0)
 
 #define ehca_info(ib_dev, format, arg...) \
 	dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
-		 raw_smp_processor_id(), __FUNCTION__, ## arg)
+		 raw_smp_processor_id(), __func__, ## arg)
 
 #define ehca_warn(ib_dev, format, arg...) \
 	dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
-		 raw_smp_processor_id(), __FUNCTION__, ## arg)
+		 raw_smp_processor_id(), __func__, ## arg)
 
 #define ehca_err(ib_dev, format, arg...) \
 	dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
-		raw_smp_processor_id(), __FUNCTION__, ## arg)
+		raw_smp_processor_id(), __func__, ## arg)
 
 /* use this one only if no ib_dev available */
 #define ehca_gen_dbg(format, arg...) \
 	do { \
 		if (unlikely(ehca_debug_level)) \
 			printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
-			       raw_smp_processor_id(), __FUNCTION__, ## arg); \
+			       raw_smp_processor_id(), __func__, ## arg); \
 	} while (0)
 
 #define ehca_gen_warn(format, arg...) \
 	printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
-	       raw_smp_processor_id(), __FUNCTION__, ## arg)
+	       raw_smp_processor_id(), __func__, ## arg)
 
 #define ehca_gen_err(format, arg...) \
 	printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
-	       raw_smp_processor_id(), __FUNCTION__, ## arg)
+	       raw_smp_processor_id(), __func__, ## arg)
 
 /**
  * ehca_dmp - printk a memory block, whose length is n*8 bytes.
@@ -118,7 +118,7 @@
 		for (x = 0; x < l; x += 16) { \
 			printk(KERN_INFO "EHCA_DMP:%s " format \
 			       " adr=%p ofs=%04x %016lx %016lx\n", \
-			       __FUNCTION__, ##args, deb, x, \
+			       __func__, ##args, deb, x, \
 			       *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
 			deb += 16; \
 		} \
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 5234d6c..1b07f2b 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -40,8 +40,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <asm/current.h>
-
 #include "ehca_classes.h"
 #include "ehca_iverbs.h"
 #include "ehca_mrmw.h"
@@ -253,11 +251,9 @@
 	u32 idr_handle = fileoffset & 0x1FFFFFF;
 	u32 q_type = (fileoffset >> 27) & 0x1;	  /* CQ, QP,...        */
 	u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
-	u32 cur_pid = current->tgid;
 	u32 ret;
 	struct ehca_cq *cq;
 	struct ehca_qp *qp;
-	struct ehca_pd *pd;
 	struct ib_uobject *uobject;
 
 	switch (q_type) {
@@ -270,13 +266,6 @@
 		if (!cq)
 			return -EINVAL;
 
-		if (cq->ownpid != cur_pid) {
-			ehca_err(cq->ib_cq.device,
-				 "Invalid caller pid=%x ownpid=%x",
-				 cur_pid, cq->ownpid);
-			return -ENOMEM;
-		}
-
 		if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
 			return -EINVAL;
 
@@ -298,14 +287,6 @@
 		if (!qp)
 			return -EINVAL;
 
-		pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
-		if (pd->ownpid != cur_pid) {
-			ehca_err(qp->ib_qp.device,
-				 "Invalid caller pid=%x ownpid=%x",
-				 cur_pid, pd->ownpid);
-			return -ENOMEM;
-		}
-
 		uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
 		if (!uobject || uobject->context != context)
 			return -EINVAL;
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index fe67388..75a6c91 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -20,17 +20,20 @@
 	ipath_qp.o \
 	ipath_rc.o \
 	ipath_ruc.o \
+	ipath_sdma.o \
 	ipath_srq.o \
 	ipath_stats.o \
 	ipath_sysfs.o \
 	ipath_uc.o \
 	ipath_ud.o \
 	ipath_user_pages.o \
+	ipath_user_sdma.o \
 	ipath_verbs_mcast.o \
 	ipath_verbs.o
 
 ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
 ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
+ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba7220.o ipath_sd7220.o ipath_sd7220_img.o
 
 ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
 ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/ipath/ipath_7220.h
new file mode 100644
index 0000000..74fa5cc
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_7220.h
@@ -0,0 +1,57 @@
+#ifndef _IPATH_7220_H
+#define _IPATH_7220_H
+/*
+ * Copyright (c) 2007 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This header file provides the declarations and common definitions
+ * for (mostly) manipulation of the SerDes blocks within the IBA7220.
+ * the functions declared should only be called from within other
+ * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
+ */
+int ipath_sd7220_presets(struct ipath_devdata *dd);
+int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
+int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
+	int len, int offset);
+int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
+	int len, int offset);
+/*
+ * Below used for sdnum parameter, selecting one of the two sections
+ * used for PCIe, or the single SerDes used for IB, which is the
+ * only one currently used
+ */
+#define IB_7220_SERDES 2
+
+int ipath_sd7220_ib_load(struct ipath_devdata *dd);
+int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
+
+#endif /* _IPATH_7220_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 591901a..28cfe97 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -80,6 +80,8 @@
 #define IPATH_IB_LINKDOWN_DISABLE	5
 #define IPATH_IB_LINK_LOOPBACK	6 /* enable local loopback */
 #define IPATH_IB_LINK_EXTERNAL	7 /* normal, disable local loopback */
+#define IPATH_IB_LINK_NO_HRTBT	8 /* disable Heartbeat, e.g. for loopback */
+#define IPATH_IB_LINK_HRTBT	9 /* enable heartbeat, normal, non-loopback */
 
 /*
  * These 3 values (SDR and DDR may be ORed for auto-speed
@@ -198,7 +200,8 @@
 #define IPATH_RUNTIME_FORCE_WC_ORDER	0x4
 #define IPATH_RUNTIME_RCVHDR_COPY	0x8
 #define IPATH_RUNTIME_MASTER	0x10
-/* 0x20 and 0x40 are no longer used, but are reserved for ABI compatibility */
+#define IPATH_RUNTIME_NODMA_RTAIL 0x80
+#define IPATH_RUNTIME_SDMA	      0x200
 #define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
 #define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
 
@@ -444,8 +447,9 @@
 #define IPATH_CMD_PIOAVAILUPD	27	/* force an update of PIOAvail reg */
 #define IPATH_CMD_POLL_TYPE	28	/* set the kind of polling we want */
 #define IPATH_CMD_ARMLAUNCH_CTRL	29 /* armlaunch detection control */
-
-#define IPATH_CMD_MAX		29
+/* 30 is unused */
+#define IPATH_CMD_SDMA_INFLIGHT 31	/* sdma inflight counter request */
+#define IPATH_CMD_SDMA_COMPLETE 32	/* sdma completion counter request */
 
 /*
  * Poll types
@@ -483,6 +487,17 @@
 	union {
 		struct ipath_tid_info tid_info;
 		struct ipath_user_info user_info;
+
+		/*
+		 * address in userspace where we should put the sdma
+		 * inflight counter
+		 */
+		__u64 sdma_inflight;
+		/*
+		 * address in userspace where we should put the sdma
+		 * completion counter
+		 */
+		__u64 sdma_complete;
 		/* address in userspace of struct ipath_port_info to
 		   write result to */
 		__u64 port_info;
@@ -537,7 +552,7 @@
 
 /* The second diag_pkt struct is the expanded version that allows
  * more control over the packet, specifically, by allowing a custom
- * pbc (+ extra) qword, so that special modes and deliberate
+ * pbc (+ static rate) qword, so that special modes and deliberate
  * changes to CRCs can be used. The elements were also re-ordered
  * for better alignment and to avoid padding issues.
  */
@@ -662,8 +677,12 @@
 #define INFINIPATH_RHF_LENGTH_SHIFT 0
 #define INFINIPATH_RHF_RCVTYPE_MASK 0x7
 #define INFINIPATH_RHF_RCVTYPE_SHIFT 11
-#define INFINIPATH_RHF_EGRINDEX_MASK 0x7FF
+#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
 #define INFINIPATH_RHF_EGRINDEX_SHIFT 16
+#define INFINIPATH_RHF_SEQ_MASK 0xF
+#define INFINIPATH_RHF_SEQ_SHIFT 0
+#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
+#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
 #define INFINIPATH_RHF_H_ICRCERR   0x80000000
 #define INFINIPATH_RHF_H_VCRCERR   0x40000000
 #define INFINIPATH_RHF_H_PARITYERR 0x20000000
@@ -673,6 +692,8 @@
 #define INFINIPATH_RHF_H_TIDERR    0x02000000
 #define INFINIPATH_RHF_H_MKERR     0x01000000
 #define INFINIPATH_RHF_H_IBERR     0x00800000
+#define INFINIPATH_RHF_H_ERR_MASK  0xFF800000
+#define INFINIPATH_RHF_L_USE_EGR   0x80000000
 #define INFINIPATH_RHF_L_SWA       0x00008000
 #define INFINIPATH_RHF_L_SWB       0x00004000
 
@@ -696,6 +717,7 @@
 /* SendPIO per-buffer control */
 #define INFINIPATH_SP_TEST    0x40
 #define INFINIPATH_SP_TESTEBP 0x20
+#define INFINIPATH_SP_TRIGGER_SHIFT  15
 
 /* SendPIOAvail bits */
 #define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
@@ -762,6 +784,7 @@
 #define IPATH_MSN_MASK 0xFFFFFF
 #define IPATH_QPN_MASK 0xFFFFFF
 #define IPATH_MULTICAST_LID_BASE 0xC000
+#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
 #define IPATH_MULTICAST_QPN 0xFFFFFF
 
 /* Receive Header Queue: receive type (from infinipath) */
@@ -781,7 +804,7 @@
  */
 static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
 {
-	return __le32_to_cpu(rbuf[1]);
+	return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
 }
 
 static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
@@ -802,6 +825,23 @@
 	    & INFINIPATH_RHF_EGRINDEX_MASK;
 }
 
+static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
+{
+	return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
+		& INFINIPATH_RHF_SEQ_MASK;
+}
+
+static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
+{
+	return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
+		& INFINIPATH_RHF_HDRQ_OFFSET_MASK;
+}
+
+static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
+{
+	return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
+}
+
 static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
 {
 	return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index d6f6953..65926cd 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -66,6 +66,7 @@
 #define __IPATH_IPATHERR    0x40000	/* Ethernet (IPATH) errors */
 #define __IPATH_IPATHPD     0x80000	/* Ethernet (IPATH) packet dump */
 #define __IPATH_IPATHTABLE  0x100000	/* Ethernet (IPATH) table dump */
+#define __IPATH_LINKVERBDBG 0x200000	/* very verbose linkchange debug */
 
 #else				/* _IPATH_DEBUGGING */
 
@@ -89,6 +90,7 @@
 #define __IPATH_IPATHERR  0x0	/* Ethernet (IPATH) errors on   */
 #define __IPATH_IPATHPD   0x0	/* Ethernet (IPATH) packet dump on   */
 #define __IPATH_IPATHTABLE 0x0	/* Ethernet (IPATH) packet dump on   */
+#define __IPATH_LINKVERBDBG 0x0	/* very verbose linkchange debug */
 
 #endif				/* _IPATH_DEBUGGING */
 
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 4137c77..6d49d2f 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -330,13 +330,19 @@
 	struct ipath_devdata *dd;
 	ssize_t ret = 0;
 	u64 val;
+	u32 l_state, lt_state; /* LinkState, LinkTrainingState */
 
-	if (count != sizeof(dp)) {
+	if (count < sizeof(odp)) {
 		ret = -EINVAL;
 		goto bail;
 	}
 
-	if (copy_from_user(&dp, data, sizeof(dp))) {
+	if (count == sizeof(dp)) {
+		if (copy_from_user(&dp, data, sizeof(dp))) {
+			ret = -EFAULT;
+			goto bail;
+		}
+	} else if (copy_from_user(&odp, data, sizeof(odp))) {
 		ret = -EFAULT;
 		goto bail;
 	}
@@ -396,10 +402,17 @@
 		ret = -ENODEV;
 		goto bail;
 	}
-	/* Check link state, but not if we have custom PBC */
-	val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
-	if (!dp.pbc_wd && val != IPATH_IBSTATE_INIT &&
-		val != IPATH_IBSTATE_ARM && val != IPATH_IBSTATE_ACTIVE) {
+	/*
+	 * Want to skip check for l_state if using custom PBC,
+	 * because we might be trying to force an SM packet out.
+	 * first-cut, skip _all_ state checking in that case.
+	 */
+	val = ipath_ib_state(dd, dd->ipath_lastibcstat);
+	lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
+	l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
+	if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
+	    (val != dd->ib_init && val != dd->ib_arm &&
+	    val != dd->ib_active))) {
 		ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
 			   dd->ipath_unit, (unsigned long long) val);
 		ret = -EINVAL;
@@ -431,15 +444,17 @@
 		goto bail;
 	}
 
-	piobuf = ipath_getpiobuf(dd, &pbufn);
+	plen >>= 2;		/* in dwords */
+
+	piobuf = ipath_getpiobuf(dd, plen, &pbufn);
 	if (!piobuf) {
 		ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
 			   dd->ipath_unit);
 		ret = -EBUSY;
 		goto bail;
 	}
-
-	plen >>= 2;		/* in dwords */
+	/* disarm it just to be extra sure */
+	ipath_disarm_piobufs(dd, pbufn, 1);
 
 	if (ipath_debug & __IPATH_PKTDBG)
 		ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index ca4d0ac..e0a64f0 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -41,7 +41,6 @@
 
 #include "ipath_kernel.h"
 #include "ipath_verbs.h"
-#include "ipath_common.h"
 
 static void ipath_update_pio_bufs(struct ipath_devdata *);
 
@@ -73,10 +72,27 @@
 MODULE_PARM_DESC(debug, "mask for debug prints");
 EXPORT_SYMBOL_GPL(ipath_debug);
 
+unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
+module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
+MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
+
+static unsigned ipath_hol_timeout_ms = 13000;
+module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
+MODULE_PARM_DESC(hol_timeout_ms,
+	"duration of user app suspension after link failure");
+
+unsigned ipath_linkrecovery = 1;
+module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
+
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("QLogic <support@pathscale.com>");
+MODULE_AUTHOR("QLogic <support@qlogic.com>");
 MODULE_DESCRIPTION("QLogic InfiniPath driver");
 
+/*
+ * Table to translate the LINKTRAININGSTATE portion of
+ * IBCStatus to a human-readable form.
+ */
 const char *ipath_ibcstatus_str[] = {
 	"Disabled",
 	"LinkUp",
@@ -91,9 +107,20 @@
 	"CfgWaitRmt",
 	"CfgIdle",
 	"RecovRetrain",
-	"LState0xD",		/* unused */
+	"CfgTxRevLane",		/* unused before IBA7220 */
 	"RecovWaitRmt",
 	"RecovIdle",
+	/* below were added for IBA7220 */
+	"CfgEnhanced",
+	"CfgTest",
+	"CfgWaitRmtTest",
+	"CfgWaitCfgEnhanced",
+	"SendTS_T",
+	"SendTstIdles",
+	"RcvTS_T",
+	"SendTst_TS1s",
+	"LTState18", "LTState19", "LTState1A", "LTState1B",
+	"LTState1C", "LTState1D", "LTState1E", "LTState1F"
 };
 
 static void __devexit ipath_remove_one(struct pci_dev *);
@@ -102,8 +129,10 @@
 
 /* Only needed for registration, nothing else needs this info */
 #define PCI_VENDOR_ID_PATHSCALE 0x1fc1
+#define PCI_VENDOR_ID_QLOGIC 0x1077
 #define PCI_DEVICE_ID_INFINIPATH_HT 0xd
 #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
+#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
 
 /* Number of seconds before our card status check...  */
 #define STATUS_TIMEOUT 60
@@ -111,6 +140,7 @@
 static const struct pci_device_id ipath_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
 	{ 0, }
 };
 
@@ -126,19 +156,6 @@
 	},
 };
 
-static void ipath_check_status(struct work_struct *work)
-{
-	struct ipath_devdata *dd = container_of(work, struct ipath_devdata,
-						status_work.work);
-
-	/*
-	 * If we don't have any interrupts, let the user know and
-	 * don't bother checking again.
-	 */
-	if (dd->ipath_int_counter == 0)
-		dev_err(&dd->pcidev->dev, "No interrupts detected.\n");
-}
-
 static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
 			     u32 *bar0, u32 *bar1)
 {
@@ -206,8 +223,6 @@
 	dd->pcidev = pdev;
 	pci_set_drvdata(pdev, dd);
 
-	INIT_DELAYED_WORK(&dd->status_work, ipath_check_status);
-
 	list_add(&dd->ipath_list, &ipath_dev_list);
 
 bail_unlock:
@@ -234,12 +249,12 @@
 	return dd;
 }
 
-int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp)
+int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
 {
 	int nunits, npresent, nup;
 	struct ipath_devdata *dd;
 	unsigned long flags;
-	u32 maxports;
+	int maxports;
 
 	nunits = npresent = nup = maxports = 0;
 
@@ -304,7 +319,7 @@
 	u32 *addr;
 	u64 msecs, emsecs;
 
-	piobuf = ipath_getpiobuf(dd, &pbnum);
+	piobuf = ipath_getpiobuf(dd, 0, &pbnum);
 	if (!piobuf) {
 		dev_info(&dd->pcidev->dev,
 			"No PIObufs for checking perf, skipping\n");
@@ -336,7 +351,14 @@
 
 	ipath_disable_armlaunch(dd);
 
-	writeq(0, piobuf); /* length 0, no dwords actually sent */
+	/*
+	 * length 0, no dwords actually sent, and mark as VL15
+	 * on chips where that may matter (due to IB flowcontrol)
+	 */
+	if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
+		writeq(1UL << 63, piobuf);
+	else
+		writeq(0, piobuf);
 	ipath_flush_wc();
 
 	/*
@@ -377,6 +399,7 @@
 	struct ipath_devdata *dd;
 	unsigned long long addr;
 	u32 bar0 = 0, bar1 = 0;
+	u8 rev;
 
 	dd = ipath_alloc_devdata(pdev);
 	if (IS_ERR(dd)) {
@@ -408,7 +431,7 @@
 	}
 	addr = pci_resource_start(pdev, 0);
 	len = pci_resource_len(pdev, 0);
-	ipath_cdbg(VERBOSE, "regbase (0) %llx len %d pdev->irq %d, vend %x/%x "
+	ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
 		   "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
 		   ent->device, ent->driver_data);
 
@@ -512,6 +535,13 @@
 			      "CONFIG_PCI_MSI is not enabled\n", ent->device);
 		return -ENODEV;
 #endif
+	case PCI_DEVICE_ID_INFINIPATH_7220:
+#ifndef CONFIG_PCI_MSI
+		ipath_dbg("CONFIG_PCI_MSI is not enabled, "
+			  "using IntX for unit %u\n", dd->ipath_unit);
+#endif
+		ipath_init_iba7220_funcs(dd);
+		break;
 	default:
 		ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
 			      "failing\n", ent->device);
@@ -533,7 +563,13 @@
 		goto bail_regions;
 	}
 
-	dd->ipath_pcirev = pdev->revision;
+	ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+	if (ret) {
+		ipath_dev_err(dd, "Failed to read PCI revision ID unit "
+			      "%u: err %d\n", dd->ipath_unit, -ret);
+		goto bail_regions;	/* shouldn't ever happen */
+	}
+	dd->ipath_pcirev = rev;
 
 #if defined(__powerpc__)
 	/* There isn't a generic way to specify writethrough mappings */
@@ -556,14 +592,6 @@
 	ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
 		   addr, dd->ipath_kregbase);
 
-	/*
-	 * clear ipath_flags here instead of in ipath_init_chip as it is set
-	 * by ipath_setup_htconfig.
-	 */
-	dd->ipath_flags = 0;
-	dd->ipath_lli_counter = 0;
-	dd->ipath_lli_errors = 0;
-
 	if (dd->ipath_f_bus(dd, pdev))
 		ipath_dev_err(dd, "Failed to setup config space; "
 			      "continuing anyway\n");
@@ -608,13 +636,11 @@
 	ipath_diag_add(dd);
 	ipath_register_ib_device(dd);
 
-	/* Check that card status in STATUS_TIMEOUT seconds. */
-	schedule_delayed_work(&dd->status_work, HZ * STATUS_TIMEOUT);
-
 	goto bail;
 
 bail_irqsetup:
-	if (pdev->irq) free_irq(pdev->irq, dd);
+	if (pdev->irq)
+		free_irq(pdev->irq, dd);
 
 bail_iounmap:
 	iounmap((volatile void __iomem *) dd->ipath_kregbase);
@@ -654,6 +680,10 @@
 		ipath_disable_wc(dd);
 	}
 
+	if (dd->ipath_spectriggerhit)
+		dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
+			 dd->ipath_spectriggerhit);
+
 	if (dd->ipath_pioavailregs_dma) {
 		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
 				  (void *) dd->ipath_pioavailregs_dma,
@@ -706,6 +736,8 @@
 		tmpp = dd->ipath_pageshadow;
 		dd->ipath_pageshadow = NULL;
 		vfree(tmpp);
+
+		dd->ipath_egrtidbase = NULL;
 	}
 
 	/*
@@ -738,7 +770,6 @@
 	 */
 	ipath_shutdown_device(dd);
 
-	cancel_delayed_work(&dd->status_work);
 	flush_scheduled_work();
 
 	if (dd->verbs_dev)
@@ -823,20 +854,8 @@
 		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 	}
-
-	/*
-	 * Disable PIOAVAILUPD, then re-enable, reading scratch in
-	 * between.  This seems to avoid a chip timing race that causes
-	 * pioavail updates to memory to stop.  We xor as we don't
-	 * know the state of the bit when we're called.
-	 */
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-		dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			 dd->ipath_sendctrl);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+	/* on some older chips, update may not happen after cancel */
+	ipath_force_pio_avail_update(dd);
 }
 
 /**
@@ -873,18 +892,52 @@
 			   (unsigned long long) ipath_read_kreg64(
 				   dd, dd->ipath_kregs->kr_ibcctrl),
 			   (unsigned long long) val,
-			   ipath_ibcstatus_str[val & 0xf]);
+			   ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
 	}
 	return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
 }
 
+static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
+	char *buf, size_t blen)
+{
+	static const struct {
+		ipath_err_t err;
+		const char *msg;
+	} errs[] = {
+		{ INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
+		{ INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
+		{ INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
+		{ INFINIPATH_E_SDMABASE, "SDmaBase" },
+		{ INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
+		{ INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
+		{ INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
+		{ INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
+		{ INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
+		{ INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
+		{ INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
+		{ INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
+	};
+	int i;
+	int expected;
+	size_t bidx = 0;
+
+	for (i = 0; i < ARRAY_SIZE(errs); i++) {
+		expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
+			test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
+		if ((err & errs[i].err) && !expected)
+			bidx += snprintf(buf + bidx, blen - bidx,
+					 "%s ", errs[i].msg);
+	}
+}
+
 /*
  * Decode the error status into strings, deciding whether to always
  * print * it or not depending on "normal packet errors" vs everything
  * else.   Return 1 if "real" errors, otherwise 0 if only packet
  * errors, so caller can decide what to print with the string.
  */
-int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
+int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
+	ipath_err_t err)
 {
 	int iserr = 1;
 	*buf = '\0';
@@ -922,6 +975,8 @@
 		strlcat(buf, "rbadversion ", blen);
 	if (err & INFINIPATH_E_RHDR)
 		strlcat(buf, "rhdr ", blen);
+	if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
+		strlcat(buf, "sendspecialtrigger ", blen);
 	if (err & INFINIPATH_E_RLONGPKTLEN)
 		strlcat(buf, "rlongpktlen ", blen);
 	if (err & INFINIPATH_E_RMAXPKTLEN)
@@ -964,6 +1019,10 @@
 		strlcat(buf, "hardware ", blen);
 	if (err & INFINIPATH_E_RESET)
 		strlcat(buf, "reset ", blen);
+	if (err & INFINIPATH_E_SDMAERRS)
+		decode_sdma_errs(dd, err, buf, blen);
+	if (err & INFINIPATH_E_INVALIDEEPCMD)
+		strlcat(buf, "invalideepromcmd ", blen);
 done:
 	return iserr;
 }
@@ -1076,18 +1135,17 @@
 			     u32 eflags,
 			     u32 l,
 			     u32 etail,
-			     u64 *rc)
+			     __le32 *rhf_addr,
+			     struct ipath_message_header *hdr)
 {
 	char emsg[128];
-	struct ipath_message_header *hdr;
 
 	get_rhf_errstring(eflags, emsg, sizeof emsg);
-	hdr = (struct ipath_message_header *)&rc[1];
 	ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
 		   "tlen=%x opcode=%x egridx=%x: %s\n",
 		   eflags, l,
-		   ipath_hdrget_rcv_type((__le32 *) rc),
-		   ipath_hdrget_length_in_bytes((__le32 *) rc),
+		   ipath_hdrget_rcv_type(rhf_addr),
+		   ipath_hdrget_length_in_bytes(rhf_addr),
 		   be32_to_cpu(hdr->bth[0]) >> 24,
 		   etail, emsg);
 
@@ -1112,55 +1170,52 @@
  */
 void ipath_kreceive(struct ipath_portdata *pd)
 {
-	u64 *rc;
 	struct ipath_devdata *dd = pd->port_dd;
+	__le32 *rhf_addr;
 	void *ebuf;
 	const u32 rsize = dd->ipath_rcvhdrentsize;	/* words */
 	const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;	/* words */
 	u32 etail = -1, l, hdrqtail;
 	struct ipath_message_header *hdr;
-	u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
+	u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
 	static u64 totcalls;	/* stats, may eventually remove */
-
-	if (!dd->ipath_hdrqtailptr) {
-		ipath_dev_err(dd,
-			      "hdrqtailptr not set, can't do receives\n");
-		goto bail;
-	}
+	int last;
 
 	l = pd->port_head;
-	hdrqtail = ipath_get_rcvhdrtail(pd);
-	if (l == hdrqtail)
-		goto bail;
+	rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
+	if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
+		u32 seq = ipath_hdrget_seq(rhf_addr);
+
+		if (seq != pd->port_seq_cnt)
+			goto bail;
+		hdrqtail = 0;
+	} else {
+		hdrqtail = ipath_get_rcvhdrtail(pd);
+		if (l == hdrqtail)
+			goto bail;
+		smp_rmb();
+	}
 
 reloop:
-	for (i = 0; l != hdrqtail; i++) {
-		u32 qp;
-		u8 *bthbytes;
-
-		rc = (u64 *) (pd->port_rcvhdrq + (l << 2));
-		hdr = (struct ipath_message_header *)&rc[1];
-		/*
-		 * could make a network order version of IPATH_KD_QP, and
-		 * do the obvious shift before masking to speed this up.
-		 */
-		qp = ntohl(hdr->bth[1]) & 0xffffff;
-		bthbytes = (u8 *) hdr->bth;
-
-		eflags = ipath_hdrget_err_flags((__le32 *) rc);
-		etype = ipath_hdrget_rcv_type((__le32 *) rc);
+	for (last = 0, i = 1; !last; i++) {
+		hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
+		eflags = ipath_hdrget_err_flags(rhf_addr);
+		etype = ipath_hdrget_rcv_type(rhf_addr);
 		/* total length */
-		tlen = ipath_hdrget_length_in_bytes((__le32 *) rc);
+		tlen = ipath_hdrget_length_in_bytes(rhf_addr);
 		ebuf = NULL;
-		if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
+		if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
+		    ipath_hdrget_use_egr_buf(rhf_addr) :
+		    (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
 			/*
-			 * it turns out that the chips uses an eager buffer
+			 * It turns out that the chip uses an eager buffer
 			 * for all non-expected packets, whether it "needs"
 			 * one or not.  So always get the index, but don't
 			 * set ebuf (so we try to copy data) unless the
 			 * length requires it.
 			 */
-			etail = ipath_hdrget_index((__le32 *) rc);
+			etail = ipath_hdrget_index(rhf_addr);
+			updegr = 1;
 			if (tlen > sizeof(*hdr) ||
 			    etype == RCVHQ_RCV_TYPE_NON_KD)
 				ebuf = ipath_get_egrbuf(dd, etail);
@@ -1171,75 +1226,91 @@
 		 * packets; only ipathhdrerr should be set.
 		 */
 
-		if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
-		    RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver(
-			    hdr->iph.ver_port_tid_offset) !=
-		    IPS_PROTO_VERSION) {
+		if (etype != RCVHQ_RCV_TYPE_NON_KD &&
+		    etype != RCVHQ_RCV_TYPE_ERROR &&
+		    ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
+		    IPS_PROTO_VERSION)
 			ipath_cdbg(PKT, "Bad InfiniPath protocol version "
 				   "%x\n", etype);
-		}
 
 		if (unlikely(eflags))
-			ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
+			ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
 		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
-			ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen);
+			ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
 			if (dd->ipath_lli_counter)
 				dd->ipath_lli_counter--;
+		} else if (etype == RCVHQ_RCV_TYPE_EAGER) {
+			u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
+			u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
 			ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
 				   "qp=%x), len %x; ignored\n",
-				   etype, bthbytes[0], qp, tlen);
+				   etype, opcode, qp, tlen);
 		}
-		else if (etype == RCVHQ_RCV_TYPE_EAGER)
-			ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
-				   "qp=%x), len %x; ignored\n",
-				   etype, bthbytes[0], qp, tlen);
 		else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
 			ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
-				  be32_to_cpu(hdr->bth[0]) & 0xff);
+				  be32_to_cpu(hdr->bth[0]) >> 24);
 		else {
 			/*
 			 * error packet, type of error unknown.
 			 * Probably type 3, but we don't know, so don't
 			 * even try to print the opcode, etc.
+			 * Usually caused by a "bad packet", that has no
+			 * BTH, when the LRH says it should.
 			 */
-			ipath_dbg("Error Pkt, but no eflags! egrbuf %x, "
-				  "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; "
-				  "hdr %llx %llx %llx %llx %llx\n",
-				  etail, tlen, (unsigned long) rc, l,
-				  (unsigned long long) rc[0],
-				  (unsigned long long) rc[1],
-				  (unsigned long long) rc[2],
-				  (unsigned long long) rc[3],
-				  (unsigned long long) rc[4],
-				  (unsigned long long) rc[5]);
+			ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
+				  " %x, len %x hdrq+%x rhf: %Lx\n",
+				  etail, tlen, l,
+				  le64_to_cpu(*(__le64 *) rhf_addr));
+			if (ipath_debug & __IPATH_ERRPKTDBG) {
+				u32 j, *d, dw = rsize-2;
+				if (rsize > (tlen>>2))
+					dw = tlen>>2;
+				d = (u32 *)hdr;
+				printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
+					dw);
+				for (j = 0; j < dw; j++)
+					printk(KERN_DEBUG "%8x%s", d[j],
+						(j%8) == 7 ? "\n" : " ");
+				printk(KERN_DEBUG ".\n");
+			}
 		}
 		l += rsize;
 		if (l >= maxcnt)
 			l = 0;
-		if (etype != RCVHQ_RCV_TYPE_EXPECTED)
-		    updegr = 1;
+		rhf_addr = (__le32 *) pd->port_rcvhdrq +
+			l + dd->ipath_rhf_offset;
+		if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
+			u32 seq = ipath_hdrget_seq(rhf_addr);
+
+			if (++pd->port_seq_cnt > 13)
+				pd->port_seq_cnt = 1;
+			if (seq != pd->port_seq_cnt)
+				last = 1;
+		} else if (l == hdrqtail)
+			last = 1;
 		/*
 		 * update head regs on last packet, and every 16 packets.
 		 * Reduce bus traffic, while still trying to prevent
 		 * rcvhdrq overflows, for when the queue is nearly full
 		 */
-		if (l == hdrqtail || (i && !(i&0xf))) {
-			u64 lval;
-			if (l == hdrqtail)
-				/* request IBA6120 interrupt only on last */
-				lval = dd->ipath_rhdrhead_intr_off | l;
-			else
-				lval = l;
-			(void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0);
+		if (last || !(i & 0xf)) {
+			u64 lval = l;
+
+			/* request IBA6120 and 7220 interrupt only on last */
+			if (last)
+				lval |= dd->ipath_rhdrhead_intr_off;
+			ipath_write_ureg(dd, ur_rcvhdrhead, lval,
+				pd->port_port);
 			if (updegr) {
-				(void)ipath_write_ureg(dd, ur_rcvegrindexhead,
-						       etail, 0);
+				ipath_write_ureg(dd, ur_rcvegrindexhead,
+						 etail, pd->port_port);
 				updegr = 0;
 			}
 		}
 	}
 
-	if (!dd->ipath_rhdrhead_intr_off && !reloop) {
+	if (!dd->ipath_rhdrhead_intr_off && !reloop &&
+	    !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
 		/* IBA6110 workaround; we can have a race clearing chip
 		 * interrupt with another interrupt about to be delivered,
 		 * and can clear it before it is delivered on the GPIO
@@ -1301,7 +1372,6 @@
 	 * happens when all buffers are in use, so only cpu overhead, not
 	 * latency or bandwidth is affected.
 	 */
-#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
 	if (!dd->ipath_pioavailregs_dma) {
 		ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
 		return;
@@ -1346,7 +1416,7 @@
 			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
 		else
 			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
-		pchg = _IPATH_ALL_CHECKBITS &
+		pchg = dd->ipath_pioavailkernel[i] &
 			~(dd->ipath_pioavailshadow[i] ^ piov);
 		pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
 		if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
@@ -1397,27 +1467,63 @@
 	return ret;
 }
 
-/**
- * ipath_getpiobuf - find an available pio buffer
- * @dd: the infinipath device
- * @pbufnum: the buffer number is placed here
+/*
+ * debugging code and stats updates if no pio buffers available.
+ */
+static noinline void no_pio_bufs(struct ipath_devdata *dd)
+{
+	unsigned long *shadow = dd->ipath_pioavailshadow;
+	__le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
+
+	dd->ipath_upd_pio_shadow = 1;
+
+	/*
+	 * not atomic, but if we lose a stat count in a while, that's OK
+	 */
+	ipath_stats.sps_nopiobufs++;
+	if (!(++dd->ipath_consec_nopiobuf % 100000)) {
+		ipath_dbg("%u pio sends with no bufavail; dmacopy: "
+			"%llx %llx %llx %llx; shadow:  %lx %lx %lx %lx\n",
+			dd->ipath_consec_nopiobuf,
+			(unsigned long long) le64_to_cpu(dma[0]),
+			(unsigned long long) le64_to_cpu(dma[1]),
+			(unsigned long long) le64_to_cpu(dma[2]),
+			(unsigned long long) le64_to_cpu(dma[3]),
+			shadow[0], shadow[1], shadow[2], shadow[3]);
+		/*
+		 * 4 buffers per byte, 4 registers above, cover rest
+		 * below
+		 */
+		if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
+		    (sizeof(shadow[0]) * 4 * 4))
+			ipath_dbg("2nd group: dmacopy: %llx %llx "
+				  "%llx %llx; shadow: %lx %lx %lx %lx\n",
+				  (unsigned long long)le64_to_cpu(dma[4]),
+				  (unsigned long long)le64_to_cpu(dma[5]),
+				  (unsigned long long)le64_to_cpu(dma[6]),
+				  (unsigned long long)le64_to_cpu(dma[7]),
+				  shadow[4], shadow[5], shadow[6],
+				  shadow[7]);
+	}
+}
+
+/*
+ * common code for normal driver pio buffer allocation, and reserved
+ * allocation.
  *
  * do appropriate marking as busy, etc.
  * returns buffer number if one found (>=0), negative number is error.
- * Used by ipath_layer_send
  */
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
+static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
+	u32 *pbufnum, u32 first, u32 last, u32 firsti)
 {
-	int i, j, starti, updated = 0;
-	unsigned piobcnt, iter;
+	int i, j, updated = 0;
+	unsigned piobcnt;
 	unsigned long flags;
 	unsigned long *shadow = dd->ipath_pioavailshadow;
 	u32 __iomem *buf;
 
-	piobcnt = (unsigned)(dd->ipath_piobcnt2k
-			     + dd->ipath_piobcnt4k);
-	starti = dd->ipath_lastport_piobuf;
-	iter = piobcnt - starti;
+	piobcnt = last - first;
 	if (dd->ipath_upd_pio_shadow) {
 		/*
 		 * Minor optimization.  If we had no buffers on last call,
@@ -1425,12 +1531,10 @@
 		 * if no buffers were updated, to be paranoid
 		 */
 		ipath_update_pio_bufs(dd);
-		/* we scanned here, don't do it at end of scan */
-		updated = 1;
-		i = starti;
+		updated++;
+		i = first;
 	} else
-		i = dd->ipath_lastpioindex;
-
+		i = firsti;
 rescan:
 	/*
 	 * while test_and_set_bit() is atomic, we do that and then the
@@ -1438,104 +1542,141 @@
 	 * of the remaining armlaunch errors.
 	 */
 	spin_lock_irqsave(&ipath_pioavail_lock, flags);
-	for (j = 0; j < iter; j++, i++) {
-		if (i >= piobcnt)
-			i = starti;
-		/*
-		 * To avoid bus lock overhead, we first find a candidate
-		 * buffer, then do the test and set, and continue if that
-		 * fails.
-		 */
-		if (test_bit((2 * i) + 1, shadow) ||
-		    test_and_set_bit((2 * i) + 1, shadow))
+	for (j = 0; j < piobcnt; j++, i++) {
+		if (i >= last)
+			i = first;
+		if (__test_and_set_bit((2 * i) + 1, shadow))
 			continue;
 		/* flip generation bit */
-		change_bit(2 * i, shadow);
+		__change_bit(2 * i, shadow);
 		break;
 	}
 	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
 
-	if (j == iter) {
-		volatile __le64 *dma = dd->ipath_pioavailregs_dma;
-
-		/*
-		 * first time through; shadow exhausted, but may be real
-		 * buffers available, so go see; if any updated, rescan
-		 * (once)
-		 */
+	if (j == piobcnt) {
 		if (!updated) {
+			/*
+			 * first time through; shadow exhausted, but may be
+			 * buffers available, try an update and then rescan.
+			 */
 			ipath_update_pio_bufs(dd);
-			updated = 1;
-			i = starti;
+			updated++;
+			i = first;
+			goto rescan;
+		} else if (updated == 1 && piobcnt <=
+			((dd->ipath_sendctrl
+			>> INFINIPATH_S_UPDTHRESH_SHIFT) &
+			INFINIPATH_S_UPDTHRESH_MASK)) {
+			/*
+			 * for chips supporting and using the update
+			 * threshold we need to force an update of the
+			 * in-memory copy if the count is less than the
+			 * thershold, then check one more time.
+			 */
+			ipath_force_pio_avail_update(dd);
+			ipath_update_pio_bufs(dd);
+			updated++;
+			i = first;
 			goto rescan;
 		}
-		dd->ipath_upd_pio_shadow = 1;
-		/*
-		 * not atomic, but if we lose one once in a while, that's OK
-		 */
-		ipath_stats.sps_nopiobufs++;
-		if (!(++dd->ipath_consec_nopiobuf % 100000)) {
-			ipath_dbg(
-				"%u pio sends with no bufavail; dmacopy: "
-				"%llx %llx %llx %llx; shadow:  "
-				"%lx %lx %lx %lx\n",
-				dd->ipath_consec_nopiobuf,
-				(unsigned long long) le64_to_cpu(dma[0]),
-				(unsigned long long) le64_to_cpu(dma[1]),
-				(unsigned long long) le64_to_cpu(dma[2]),
-				(unsigned long long) le64_to_cpu(dma[3]),
-				shadow[0], shadow[1], shadow[2],
-				shadow[3]);
-			/*
-			 * 4 buffers per byte, 4 registers above, cover rest
-			 * below
-			 */
-			if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
-			    (sizeof(shadow[0]) * 4 * 4))
-				ipath_dbg("2nd group: dmacopy: %llx %llx "
-					  "%llx %llx; shadow: %lx %lx "
-					  "%lx %lx\n",
-					  (unsigned long long)
-					  le64_to_cpu(dma[4]),
-					  (unsigned long long)
-					  le64_to_cpu(dma[5]),
-					  (unsigned long long)
-					  le64_to_cpu(dma[6]),
-					  (unsigned long long)
-					  le64_to_cpu(dma[7]),
-					  shadow[4], shadow[5],
-					  shadow[6], shadow[7]);
-		}
+
+		no_pio_bufs(dd);
 		buf = NULL;
-		goto bail;
+	} else {
+		if (i < dd->ipath_piobcnt2k)
+			buf = (u32 __iomem *) (dd->ipath_pio2kbase +
+					       i * dd->ipath_palign);
+		else
+			buf = (u32 __iomem *)
+				(dd->ipath_pio4kbase +
+				 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
+		if (pbufnum)
+			*pbufnum = i;
 	}
 
-	/*
-	 * set next starting place.  Since it's just an optimization,
-	 * it doesn't matter who wins on this, so no locking
-	 */
-	dd->ipath_lastpioindex = i + 1;
-	if (dd->ipath_upd_pio_shadow)
-		dd->ipath_upd_pio_shadow = 0;
-	if (dd->ipath_consec_nopiobuf)
-		dd->ipath_consec_nopiobuf = 0;
-	if (i < dd->ipath_piobcnt2k)
-		buf = (u32 __iomem *) (dd->ipath_pio2kbase +
-				       i * dd->ipath_palign);
-	else
-		buf = (u32 __iomem *)
-			(dd->ipath_pio4kbase +
-			 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
-	ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
-		   i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
-	if (pbufnum)
-		*pbufnum = i;
-
-bail:
 	return buf;
 }
 
 /**
+ * ipath_getpiobuf - find an available pio buffer
+ * @dd: the infinipath device
+ * @plen: the size of the PIO buffer needed in 32-bit words
+ * @pbufnum: the buffer number is placed here
+ */
+u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
+{
+	u32 __iomem *buf;
+	u32 pnum, nbufs;
+	u32 first, lasti;
+
+	if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
+		first = dd->ipath_piobcnt2k;
+		lasti = dd->ipath_lastpioindexl;
+	} else {
+		first = 0;
+		lasti = dd->ipath_lastpioindex;
+	}
+	nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
+	buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
+
+	if (buf) {
+		/*
+		 * Set next starting place.  It's just an optimization,
+		 * it doesn't matter who wins on this, so no locking
+		 */
+		if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
+			dd->ipath_lastpioindexl = pnum + 1;
+		else
+			dd->ipath_lastpioindex = pnum + 1;
+		if (dd->ipath_upd_pio_shadow)
+			dd->ipath_upd_pio_shadow = 0;
+		if (dd->ipath_consec_nopiobuf)
+			dd->ipath_consec_nopiobuf = 0;
+		ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
+			   pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
+		if (pbufnum)
+			*pbufnum = pnum;
+
+	}
+	return buf;
+}
+
+/**
+ * ipath_chg_pioavailkernel - change which send buffers are available for kernel
+ * @dd: the infinipath device
+ * @start: the starting send buffer number
+ * @len: the number of send buffers
+ * @avail: true if the buffers are available for kernel use, false otherwise
+ */
+void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
+			      unsigned len, int avail)
+{
+	unsigned long flags;
+	unsigned end;
+
+	/* There are two bits per send buffer (busy and generation) */
+	start *= 2;
+	len *= 2;
+	end = start + len;
+
+	/* Set or clear the generation bits. */
+	spin_lock_irqsave(&ipath_pioavail_lock, flags);
+	while (start < end) {
+		if (avail) {
+			__clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
+				dd->ipath_pioavailshadow);
+			__set_bit(start, dd->ipath_pioavailkernel);
+		} else {
+			__set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
+				dd->ipath_pioavailshadow);
+			__clear_bit(start, dd->ipath_pioavailkernel);
+		}
+		start += 2;
+	}
+	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
+}
+
+/**
  * ipath_create_rcvhdrq - create a receive header queue
  * @dd: the infinipath device
  * @pd: the port data
@@ -1566,19 +1707,27 @@
 			ret = -ENOMEM;
 			goto bail;
 		}
-		pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
-			&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL);
-		if (!pd->port_rcvhdrtail_kvaddr) {
-			ipath_dev_err(dd, "attempt to allocate 1 page "
-				      "for port %u rcvhdrqtailaddr failed\n",
-				      pd->port_port);
-			ret = -ENOMEM;
-			dma_free_coherent(&dd->pcidev->dev, amt,
-					  pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
-			pd->port_rcvhdrq = NULL;
-			goto bail;
+
+		if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
+			pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
+				&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
+				GFP_KERNEL);
+			if (!pd->port_rcvhdrtail_kvaddr) {
+				ipath_dev_err(dd, "attempt to allocate 1 page "
+					"for port %u rcvhdrqtailaddr "
+					"failed\n", pd->port_port);
+				ret = -ENOMEM;
+				dma_free_coherent(&dd->pcidev->dev, amt,
+					pd->port_rcvhdrq,
+					pd->port_rcvhdrq_phys);
+				pd->port_rcvhdrq = NULL;
+				goto bail;
+			}
+			pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
+			ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
+				   "physical\n", pd->port_port,
+				   (unsigned long long) phys_hdrqtail);
 		}
-		pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
 
 		pd->port_rcvhdrq_size = amt;
 
@@ -1588,10 +1737,6 @@
 			   (unsigned long) pd->port_rcvhdrq_phys,
 			   (unsigned long) pd->port_rcvhdrq_size,
 			   pd->port_port);
-
-		ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
-			   pd->port_port,
-			   (unsigned long long) phys_hdrqtail);
 	}
 	else
 		ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
@@ -1615,7 +1760,6 @@
 	ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
 			      pd->port_port, pd->port_rcvhdrq_phys);
 
-	ret = 0;
 bail:
 	return ret;
 }
@@ -1632,52 +1776,149 @@
  */
 void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
 {
-	ipath_dbg("Cancelling all in-progress send buffers\n");
-	dd->ipath_lastcancel = jiffies+HZ/2; /* skip armlaunch errs a bit */
-	/*
-	 * the abort bit is auto-clearing.  We read scratch to be sure
-	 * that cancels and the abort have taken effect in the chip.
-	 */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-		INFINIPATH_S_ABORT);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	ipath_disarm_piobufs(dd, 0,
-		(unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k));
-	if (restore_sendctrl) /* else done by caller later */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-				 dd->ipath_sendctrl);
+	unsigned long flags;
 
-	/* and again, be sure all have hit the chip */
+	if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
+		ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
+		goto bail;
+	}
+	/*
+	 * If we have SDMA, and it's not disabled, we have to kick off the
+	 * abort state machine, provided we aren't already aborting.
+	 * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
+	 * we skip the rest of this routine. It is already "in progress"
+	 */
+	if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
+		int skip_cancel;
+		u64 *statp = &dd->ipath_sdma_status;
+
+		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+		skip_cancel =
+			!test_bit(IPATH_SDMA_DISABLED, statp) &&
+			test_and_set_bit(IPATH_SDMA_ABORTING, statp);
+		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+		if (skip_cancel)
+			goto bail;
+	}
+
+	ipath_dbg("Cancelling all in-progress send buffers\n");
+
+	/* skip armlaunch errs for a while */
+	dd->ipath_lastcancel = jiffies + HZ / 2;
+
+	/*
+	 * The abort bit is auto-clearing.  We also don't want pioavail
+	 * update happening during this, and we don't want any other
+	 * sends going out, so turn those off for the duration.  We read
+	 * the scratch register to be sure that cancels and the abort
+	 * have taken effect in the chip.  Otherwise two parts are same
+	 * as ipath_force_pio_avail_update()
+	 */
+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+	dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
+		| INFINIPATH_S_PIOENABLE);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+		dd->ipath_sendctrl | INFINIPATH_S_ABORT);
 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+
+	/* disarm all send buffers */
+	ipath_disarm_piobufs(dd, 0,
+		dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
+
+	if (restore_sendctrl) {
+		/* else done by caller later if needed */
+		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+		dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
+			INFINIPATH_S_PIOENABLE;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+			dd->ipath_sendctrl);
+		/* and again, be sure all have hit the chip */
+		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+	}
+
+	if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
+	    !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
+	    test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
+		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+		/* only wait so long for intr */
+		dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
+		dd->ipath_sdma_reset_wait = 200;
+		__set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
+		if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+			tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
+		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+	}
+bail:;
 }
 
-
-static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
+/*
+ * Force an update of in-memory copy of the pioavail registers, when
+ * needed for any of a variety of reasons.  We read the scratch register
+ * to make it highly likely that the update will have happened by the
+ * time we return.  If already off (as in cancel_sends above), this
+ * routine is a nop, on the assumption that the caller will "do the
+ * right thing".
+ */
+void ipath_force_pio_avail_update(struct ipath_devdata *dd)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+	if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+			dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
+		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+			dd->ipath_sendctrl);
+		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	}
+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+}
+
+static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
+				int linitcmd)
+{
+	u64 mod_wd;
 	static const char *what[4] = {
 		[0] = "NOP",
 		[INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
 		[INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
 		[INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
 	};
-	int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
-			INFINIPATH_IBCC_LINKCMD_MASK;
 
-	ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate "
-		   "is %s\n", dd->ipath_unit,
-		   what[linkcmd],
-		   ipath_ibcstatus_str[
-			   (ipath_read_kreg64
-			    (dd, dd->ipath_kregs->kr_ibcstatus) >>
-			    INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-			   INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
-	/* flush all queued sends when going to DOWN to be sure that
-	 * they don't block MAD packets */
-	if (linkcmd == INFINIPATH_IBCC_LINKCMD_DOWN)
-		ipath_cancel_sends(dd, 1);
+	if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
+		/*
+		 * If we are told to disable, note that so link-recovery
+		 * code does not attempt to bring us back up.
+		 */
+		preempt_disable();
+		dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
+		preempt_enable();
+	} else if (linitcmd) {
+		/*
+		 * Any other linkinitcmd will lead to LINKDOWN and then
+		 * to INIT (if all is well), so clear flag to let
+		 * link-recovery code attempt to bring us back up.
+		 */
+		preempt_disable();
+		dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
+		preempt_enable();
+	}
+
+	mod_wd = (linkcmd << dd->ibcc_lc_shift) |
+		(linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+	ipath_cdbg(VERBOSE,
+		"Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
+		dd->ipath_unit, what[linkcmd], linitcmd,
+		ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
+			ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
 
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-			 dd->ipath_ibcctrl | which);
+			 dd->ipath_ibcctrl | mod_wd);
+	/* read from chip so write is flushed */
+	(void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
 }
 
 int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
@@ -1687,30 +1928,28 @@
 
 	switch (newstate) {
 	case IPATH_IB_LINKDOWN_ONLY:
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN <<
-				    INFINIPATH_IBCC_LINKCMD_SHIFT);
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
 		/* don't wait */
 		ret = 0;
 		goto bail;
 
 	case IPATH_IB_LINKDOWN:
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
-				    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
+					INFINIPATH_IBCC_LINKINITCMD_POLL);
 		/* don't wait */
 		ret = 0;
 		goto bail;
 
 	case IPATH_IB_LINKDOWN_SLEEP:
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
-				    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
+					INFINIPATH_IBCC_LINKINITCMD_SLEEP);
 		/* don't wait */
 		ret = 0;
 		goto bail;
 
 	case IPATH_IB_LINKDOWN_DISABLE:
-		ipath_set_ib_lstate(dd,
-				    INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
-				    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
+					INFINIPATH_IBCC_LINKINITCMD_DISABLE);
 		/* don't wait */
 		ret = 0;
 		goto bail;
@@ -1725,8 +1964,8 @@
 			ret = -EINVAL;
 			goto bail;
 		}
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
-				    INFINIPATH_IBCC_LINKCMD_SHIFT);
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
+
 		/*
 		 * Since the port can transition to ACTIVE by receiving
 		 * a non VL 15 packet, wait for either state.
@@ -1743,8 +1982,7 @@
 			ret = -EINVAL;
 			goto bail;
 		}
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
-				    INFINIPATH_IBCC_LINKCMD_SHIFT);
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
 		lstate = IPATH_LINKACTIVE;
 		break;
 
@@ -1753,16 +1991,41 @@
 		dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
 		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
 				 dd->ipath_ibcctrl);
+
+		/* turn heartbeat off, as it causes loopback to fail */
+		dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
+				       IPATH_IB_HRTBT_OFF);
+		/* don't wait */
 		ret = 0;
-		goto bail; // no state change to wait for
+		goto bail;
 
 	case IPATH_IB_LINK_EXTERNAL:
-		dev_info(&dd->pcidev->dev, "Disabling IB local loopback (normal)\n");
+		dev_info(&dd->pcidev->dev,
+			"Disabling IB local loopback (normal)\n");
+		dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
+				       IPATH_IB_HRTBT_ON);
 		dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
 		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
 				 dd->ipath_ibcctrl);
+		/* don't wait */
 		ret = 0;
-		goto bail; // no state change to wait for
+		goto bail;
+
+	/*
+	 * Heartbeat can be explicitly enabled by the user via
+	 * "hrtbt_enable" "file", and if disabled, trying to enable here
+	 * will have no effect.  Implicit changes (heartbeat off when
+	 * loopback on, and vice versa) are included to ease testing.
+	 */
+	case IPATH_IB_LINK_HRTBT:
+		ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
+			IPATH_IB_HRTBT_ON);
+		goto bail;
+
+	case IPATH_IB_LINK_NO_HRTBT:
+		ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
+			IPATH_IB_HRTBT_OFF);
+		goto bail;
 
 	default:
 		ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
@@ -1785,7 +2048,7 @@
  * sanity checking on this, and we don't deal with what happens to
  * programs that are already running when the size changes.
  * NOTE: changing the MTU will usually cause the IBC to go back to
- * link initialize (IPATH_IBSTATE_INIT) state...
+ * link INIT state...
  */
 int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
 {
@@ -1800,7 +2063,7 @@
 	 * piosize).  We check that it's one of the valid IB sizes.
 	 */
 	if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
-	    arg != 4096) {
+	    (arg != 4096 || !ipath_mtu4096)) {
 		ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
 		ret = -EINVAL;
 		goto bail;
@@ -1816,6 +2079,8 @@
 	if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
 		/* Only if it's not the initial value (or reset to it) */
 		if (piosize != dd->ipath_init_ibmaxlen) {
+			if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
+				piosize = dd->ipath_init_ibmaxlen;
 			dd->ipath_ibmaxlen = piosize;
 			changed = 1;
 		}
@@ -1829,24 +2094,17 @@
 	}
 
 	if (changed) {
+		u64 ibc = dd->ipath_ibcctrl, ibdw;
 		/*
-		 * set the IBC maxpktlength to the size of our pio
-		 * buffers in words
+		 * update our housekeeping variables, and set IBC max
+		 * size, same as init code; max IBC is max we allow in
+		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
 		 */
-		u64 ibc = dd->ipath_ibcctrl;
+		dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
+		ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
 		ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
-			 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
-
-		piosize = piosize - 2 * sizeof(u32);    /* ignore pbc */
-		dd->ipath_ibmaxlen = piosize;
-		piosize /= sizeof(u32); /* in words */
-		/*
-		 * for ICRC, which we only send in diag test pkt mode, and
-		 * we don't need to worry about that for mtu
-		 */
-		piosize += 1;
-
-		ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
+			 dd->ibcc_mpl_shift);
+		ibc |= ibdw << dd->ibcc_mpl_shift;
 		dd->ipath_ibcctrl = ibc;
 		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
 				 dd->ipath_ibcctrl);
@@ -1859,11 +2117,16 @@
 	return ret;
 }
 
-int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
+int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
 {
-	dd->ipath_lid = arg;
+	dd->ipath_lid = lid;
 	dd->ipath_lmc = lmc;
 
+	dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
+		(~((1U << lmc) - 1)) << 16);
+
+	dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
+
 	return 0;
 }
 
@@ -1925,10 +2188,8 @@
 	 * but leave that to per-chip functions.
 	 */
 	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-	ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-		  INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
-	lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
-		 INFINIPATH_IBCS_LINKSTATE_MASK;
+	ltstate = ipath_ib_linktrstate(dd, val);
+	lstate = ipath_ib_linkstate(dd, val);
 
 	dd->ipath_f_setextled(dd, lstate, ltstate);
 	mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
@@ -1969,9 +2230,8 @@
 		dd->ipath_led_override_timer.data = (unsigned long) dd;
 		dd->ipath_led_override_timer.expires = jiffies + 1;
 		add_timer(&dd->ipath_led_override_timer);
-	} else {
+	} else
 		atomic_dec(&dd->ipath_led_override_timer_active);
-	}
 }
 
 /**
@@ -1989,6 +2249,8 @@
 
 	ipath_dbg("Shutting down the device\n");
 
+	ipath_hol_up(dd); /* make sure user processes aren't suspended */
+
 	dd->ipath_flags |= IPATH_LINKUNK;
 	dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
 			     IPATH_LINKINIT | IPATH_LINKARMED |
@@ -2003,6 +2265,9 @@
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
 			 dd->ipath_rcvctrl);
 
+	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
+		teardown_sdma(dd);
+
 	/*
 	 * gracefully stop all sends allowing any in progress to trickle out
 	 * first.
@@ -2020,10 +2285,16 @@
 	 */
 	udelay(5);
 
-	ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
-			    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+	dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
+
+	ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
 	ipath_cancel_sends(dd, 0);
 
+	/*
+	 * we are shutting down, so tell components that care.  We don't do
+	 * this on just a link state change, much like ethernet, a cable
+	 * unplug, etc. doesn't change driver state
+	 */
 	signal_ib_event(dd, IB_EVENT_PORT_ERR);
 
 	/* disable IBC */
@@ -2038,10 +2309,20 @@
 	 */
 	dd->ipath_f_quiet_serdes(dd);
 
+	/* stop all the timers that might still be running */
+	del_timer_sync(&dd->ipath_hol_timer);
 	if (dd->ipath_stats_timer_active) {
 		del_timer_sync(&dd->ipath_stats_timer);
 		dd->ipath_stats_timer_active = 0;
 	}
+	if (dd->ipath_intrchk_timer.data) {
+		del_timer_sync(&dd->ipath_intrchk_timer);
+		dd->ipath_intrchk_timer.data = 0;
+	}
+	if (atomic_read(&dd->ipath_led_override_timer_active)) {
+		del_timer_sync(&dd->ipath_led_override_timer);
+		atomic_set(&dd->ipath_led_override_timer_active, 0);
+	}
 
 	/*
 	 * clear all interrupts and errors, so that the next time the driver
@@ -2115,13 +2396,13 @@
 		ipath_cdbg(VERBOSE, "free closed port %d "
 			   "ipath_port0_skbinfo @ %p\n", pd->port_port,
 			   skbinfo);
-		for (e = 0; e < dd->ipath_rcvegrcnt; e++)
-		if (skbinfo[e].skb) {
-			pci_unmap_single(dd->pcidev, skbinfo[e].phys,
-					 dd->ipath_ibmaxlen,
-					 PCI_DMA_FROMDEVICE);
-			dev_kfree_skb(skbinfo[e].skb);
-		}
+		for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
+			if (skbinfo[e].skb) {
+				pci_unmap_single(dd->pcidev, skbinfo[e].phys,
+						 dd->ipath_ibmaxlen,
+						 PCI_DMA_FROMDEVICE);
+				dev_kfree_skb(skbinfo[e].skb);
+			}
 		vfree(skbinfo);
 	}
 	kfree(pd->port_tid_pg_list);
@@ -2144,6 +2425,7 @@
 	 */
 	idr_init(&unit_table);
 	if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
+		printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
 		ret = -ENOMEM;
 		goto bail;
 	}
@@ -2235,13 +2517,18 @@
 			}
 		}
 
+	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
+		teardown_sdma(dd);
+
 	dd->ipath_flags &= ~IPATH_INITTED;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
 	ret = dd->ipath_f_reset(dd);
-	if (ret != 1)
-		ipath_dbg("reset was not successful\n");
-	ipath_dbg("Trying to reinitialize unit %u after reset attempt\n",
-		  unit);
-	ret = ipath_init_chip(dd, 1);
+	if (ret == 1) {
+		ipath_dbg("Reinitializing unit %u after reset attempt\n",
+			  unit);
+		ret = ipath_init_chip(dd, 1);
+	} else
+		ret = -EAGAIN;
 	if (ret)
 		ipath_dev_err(dd, "Reinitialize unit %u after "
 			      "reset failed with %d\n", unit, ret);
@@ -2253,13 +2540,121 @@
 	return ret;
 }
 
+/*
+ * send a signal to all the processes that have the driver open
+ * through the normal interfaces (i.e., everything other than diags
+ * interface).  Returns number of signalled processes.
+ */
+static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
+{
+	int i, sub, any = 0;
+	pid_t pid;
+
+	if (!dd->ipath_pd)
+		return 0;
+	for (i = 1; i < dd->ipath_cfgports; i++) {
+		if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt ||
+		    !dd->ipath_pd[i]->port_pid)
+			continue;
+		pid = dd->ipath_pd[i]->port_pid;
+		dev_info(&dd->pcidev->dev, "context %d in use "
+			  "(PID %u), sending signal %d\n",
+			  i, pid, sig);
+		kill_proc(pid, sig, 1);
+		any++;
+		for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
+			pid = dd->ipath_pd[i]->port_subpid[sub];
+			if (!pid)
+				continue;
+			dev_info(&dd->pcidev->dev, "sub-context "
+				"%d:%d in use (PID %u), sending "
+				"signal %d\n", i, sub, pid, sig);
+			kill_proc(pid, sig, 1);
+			any++;
+		}
+	}
+	return any;
+}
+
+static void ipath_hol_signal_down(struct ipath_devdata *dd)
+{
+	if (ipath_signal_procs(dd, SIGSTOP))
+		ipath_dbg("Stopped some processes\n");
+	ipath_cancel_sends(dd, 1);
+}
+
+
+static void ipath_hol_signal_up(struct ipath_devdata *dd)
+{
+	if (ipath_signal_procs(dd, SIGCONT))
+		ipath_dbg("Continued some processes\n");
+}
+
+/*
+ * link is down, stop any users processes, and flush pending sends
+ * to prevent HoL blocking, then start the HoL timer that
+ * periodically continues, then stop procs, so they can detect
+ * link down if they want, and do something about it.
+ * Timer may already be running, so use __mod_timer, not add_timer.
+ */
+void ipath_hol_down(struct ipath_devdata *dd)
+{
+	dd->ipath_hol_state = IPATH_HOL_DOWN;
+	ipath_hol_signal_down(dd);
+	dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
+	dd->ipath_hol_timer.expires = jiffies +
+		msecs_to_jiffies(ipath_hol_timeout_ms);
+	__mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
+}
+
+/*
+ * link is up, continue any user processes, and ensure timer
+ * is a nop, if running.  Let timer keep running, if set; it
+ * will nop when it sees the link is up
+ */
+void ipath_hol_up(struct ipath_devdata *dd)
+{
+	ipath_hol_signal_up(dd);
+	dd->ipath_hol_state = IPATH_HOL_UP;
+}
+
+/*
+ * toggle the running/not running state of user proceses
+ * to prevent HoL blocking on chip resources, but still allow
+ * user processes to do link down special case handling.
+ * Should only be called via the timer
+ */
+void ipath_hol_event(unsigned long opaque)
+{
+	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
+
+	if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
+		&& dd->ipath_hol_state != IPATH_HOL_UP) {
+		dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
+		ipath_dbg("Stopping processes\n");
+		ipath_hol_signal_down(dd);
+	} else { /* may do "extra" if also in ipath_hol_up() */
+		dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
+		ipath_dbg("Continuing processes\n");
+		ipath_hol_signal_up(dd);
+	}
+	if (dd->ipath_hol_state == IPATH_HOL_UP)
+		ipath_dbg("link's up, don't resched timer\n");
+	else {
+		dd->ipath_hol_timer.expires = jiffies +
+			msecs_to_jiffies(ipath_hol_timeout_ms);
+		__mod_timer(&dd->ipath_hol_timer,
+			dd->ipath_hol_timer.expires);
+	}
+}
+
 int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
 {
 	u64 val;
-	if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) {
+
+	if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
 		return -1;
-	}
-	if ( dd->ipath_rx_pol_inv != new_pol_inv ) {
+	if (dd->ipath_rx_pol_inv != new_pol_inv) {
 		dd->ipath_rx_pol_inv = new_pol_inv;
 		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
 		val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index e28a42f..dc37277 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -62,6 +62,33 @@
  * accessing eeprom contents from within the kernel, only via sysfs.
  */
 
+/* Added functionality for IBA7220-based cards */
+#define IPATH_EEPROM_DEV_V1 0xA0
+#define IPATH_EEPROM_DEV_V2 0xA2
+#define IPATH_TEMP_DEV 0x98
+#define IPATH_BAD_DEV (IPATH_EEPROM_DEV_V2+2)
+#define IPATH_NO_DEV (0xFF)
+
+/*
+ * The number of I2C chains is proliferating. Table below brings
+ * some order to the madness. The basic principle is that the
+ * table is scanned from the top, and a "probe" is made to the
+ * device probe_dev. If that succeeds, the chain is considered
+ * to be of that type, and dd->i2c_chain_type is set to the index+1
+ * of the entry.
+ * The +1 is so static initialization can mean "unknown, do probe."
+ */
+static struct i2c_chain_desc {
+	u8 probe_dev;	/* If seen at probe, chain is this type */
+	u8 eeprom_dev;	/* Dev addr (if any) for EEPROM */
+	u8 temp_dev;	/* Dev Addr (if any) for Temp-sense */
+} i2c_chains[] = {
+	{ IPATH_BAD_DEV, IPATH_NO_DEV, IPATH_NO_DEV }, /* pre-iba7220 bds */
+	{ IPATH_EEPROM_DEV_V1, IPATH_EEPROM_DEV_V1, IPATH_TEMP_DEV}, /* V1 */
+	{ IPATH_EEPROM_DEV_V2, IPATH_EEPROM_DEV_V2, IPATH_TEMP_DEV}, /* V2 */
+	{ IPATH_NO_DEV }
+};
+
 enum i2c_type {
 	i2c_line_scl = 0,
 	i2c_line_sda
@@ -75,13 +102,6 @@
 #define READ_CMD 1
 #define WRITE_CMD 0
 
-static int eeprom_init;
-
-/*
- * The gpioval manipulation really should be protected by spinlocks
- * or be converted to use atomic operations.
- */
-
 /**
  * i2c_gpio_set - set a GPIO line
  * @dd: the infinipath device
@@ -241,6 +261,27 @@
 }
 
 /**
+ * rd_byte - read a byte, leaving ACK, STOP, etc up to caller
+ * @dd: the infinipath device
+ *
+ * Returns byte shifted out of device
+ */
+static int rd_byte(struct ipath_devdata *dd)
+{
+	int bit_cntr, data;
+
+	data = 0;
+
+	for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
+		data <<= 1;
+		scl_out(dd, i2c_line_high);
+		data |= sda_in(dd, 0);
+		scl_out(dd, i2c_line_low);
+	}
+	return data;
+}
+
+/**
  * wr_byte - write a byte, one bit at a time
  * @dd: the infinipath device
  * @data: the byte to write
@@ -331,7 +372,6 @@
 	ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
 		   "is %llx\n", (unsigned long long) *gpioval);
 
-	eeprom_init = 1;
 	/*
 	 * This is to get the i2c into a known state, by first going low,
 	 * then tristate sda (and then tristate scl as first thing
@@ -340,12 +380,17 @@
 	scl_out(dd, i2c_line_low);
 	sda_out(dd, i2c_line_high);
 
+	/* Clock up to 9 cycles looking for SDA hi, then issue START and STOP */
 	while (clock_cycles_left--) {
 		scl_out(dd, i2c_line_high);
 
+		/* SDA seen high, issue START by dropping it while SCL high */
 		if (sda_in(dd, 0)) {
 			sda_out(dd, i2c_line_low);
 			scl_out(dd, i2c_line_low);
+			/* ATMEL spec says must be followed by STOP. */
+			scl_out(dd, i2c_line_high);
+			sda_out(dd, i2c_line_high);
 			ret = 0;
 			goto bail;
 		}
@@ -359,29 +404,121 @@
 	return ret;
 }
 
-/**
- * ipath_eeprom_read - receives bytes from the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: address to read from
- * @buffer: where to store result
- * @len: number of bytes to receive
+/*
+ * Probe for I2C device at specified address. Returns 0 for "success"
+ * to match rest of this file.
+ * Leave bus in "reasonable" state for further commands.
  */
+static int i2c_probe(struct ipath_devdata *dd, int devaddr)
+{
+	int ret = 0;
+
+	ret = eeprom_reset(dd);
+	if (ret) {
+		ipath_dev_err(dd, "Failed reset probing device 0x%02X\n",
+			      devaddr);
+		return ret;
+	}
+	/*
+	 * Reset no longer leaves bus in start condition, so normal
+	 * i2c_startcmd() will do.
+	 */
+	ret = i2c_startcmd(dd, devaddr | READ_CMD);
+	if (ret)
+		ipath_cdbg(VERBOSE, "Failed startcmd for device 0x%02X\n",
+			   devaddr);
+	else {
+		/*
+		 * Device did respond. Complete a single-byte read, because some
+		 * devices apparently cannot handle STOP immediately after they
+		 * ACK the start-cmd.
+		 */
+		int data;
+		data = rd_byte(dd);
+		stop_cmd(dd);
+		ipath_cdbg(VERBOSE, "Response from device 0x%02X\n", devaddr);
+	}
+	return ret;
+}
+
+/*
+ * Returns the "i2c type". This is a pointer to a struct that describes
+ * the I2C chain on this board. To minimize impact on struct ipath_devdata,
+ * the (small integer) index into the table is actually memoized, rather
+ * then the pointer.
+ * Memoization is because the type is determined on the first call per chip.
+ * An alternative would be to move type determination to early
+ * init code.
+ */
+static struct i2c_chain_desc *ipath_i2c_type(struct ipath_devdata *dd)
+{
+	int idx;
+
+	/* Get memoized index, from previous successful probes */
+	idx = dd->ipath_i2c_chain_type - 1;
+	if (idx >= 0 && idx < (ARRAY_SIZE(i2c_chains) - 1))
+		goto done;
+
+	idx = 0;
+	while (i2c_chains[idx].probe_dev != IPATH_NO_DEV) {
+		/* if probe succeeds, this is type */
+		if (!i2c_probe(dd, i2c_chains[idx].probe_dev))
+			break;
+		++idx;
+	}
+
+	/*
+	 * Old EEPROM (first entry) may require a reset after probe,
+	 * rather than being able to "start" after "stop"
+	 */
+	if (idx == 0)
+		eeprom_reset(dd);
+
+	if (i2c_chains[idx].probe_dev == IPATH_NO_DEV)
+		idx = -1;
+	else
+		dd->ipath_i2c_chain_type = idx + 1;
+done:
+	return (idx >= 0) ? i2c_chains + idx : NULL;
+}
 
 static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
 					u8 eeprom_offset, void *buffer, int len)
 {
-	/* compiler complains unless initialized */
-	u8 single_byte = 0;
-	int bit_cntr;
 	int ret;
+	struct i2c_chain_desc *icd;
+	u8 *bp = buffer;
 
-	if (!eeprom_init)
-		eeprom_reset(dd);
+	ret = 1;
+	icd = ipath_i2c_type(dd);
+	if (!icd)
+		goto bail;
 
-	eeprom_offset = (eeprom_offset << 1) | READ_CMD;
-
-	if (i2c_startcmd(dd, eeprom_offset)) {
-		ipath_dbg("Failed startcmd\n");
+	if (icd->eeprom_dev == IPATH_NO_DEV) {
+		/* legacy not-really-I2C */
+		ipath_cdbg(VERBOSE, "Start command only address\n");
+		eeprom_offset = (eeprom_offset << 1) | READ_CMD;
+		ret = i2c_startcmd(dd, eeprom_offset);
+	} else {
+		/* Actual I2C */
+		ipath_cdbg(VERBOSE, "Start command uses devaddr\n");
+		if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
+			ipath_dbg("Failed EEPROM startcmd\n");
+			stop_cmd(dd);
+			ret = 1;
+			goto bail;
+		}
+		ret = wr_byte(dd, eeprom_offset);
+		stop_cmd(dd);
+		if (ret) {
+			ipath_dev_err(dd, "Failed to write EEPROM address\n");
+			ret = 1;
+			goto bail;
+		}
+		ret = i2c_startcmd(dd, icd->eeprom_dev | READ_CMD);
+	}
+	if (ret) {
+		ipath_dbg("Failed startcmd for dev %02X\n", icd->eeprom_dev);
 		stop_cmd(dd);
 		ret = 1;
 		goto bail;
@@ -392,22 +529,11 @@
 	 * incrementing the address.
 	 */
 	while (len-- > 0) {
-		/* get data */
-		single_byte = 0;
-		for (bit_cntr = 8; bit_cntr; bit_cntr--) {
-			u8 bit;
-			scl_out(dd, i2c_line_high);
-			bit = sda_in(dd, 0);
-			single_byte |= bit << (bit_cntr - 1);
-			scl_out(dd, i2c_line_low);
-		}
-
+		/* get and store data */
+		*bp++ = rd_byte(dd);
 		/* send ack if not the last byte */
 		if (len)
 			send_ack(dd);
-
-		*((u8 *) buffer) = single_byte;
-		buffer++;
 	}
 
 	stop_cmd(dd);
@@ -418,31 +544,40 @@
 	return ret;
 }
 
-
-/**
- * ipath_eeprom_write - writes data to the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: where to place data
- * @buffer: data to write
- * @len: number of bytes to write
- */
 static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
 				       const void *buffer, int len)
 {
-	u8 single_byte;
 	int sub_len;
 	const u8 *bp = buffer;
 	int max_wait_time, i;
 	int ret;
+	struct i2c_chain_desc *icd;
 
-	if (!eeprom_init)
-		eeprom_reset(dd);
+	ret = 1;
+	icd = ipath_i2c_type(dd);
+	if (!icd)
+		goto bail;
 
 	while (len > 0) {
-		if (i2c_startcmd(dd, (eeprom_offset << 1) | WRITE_CMD)) {
-			ipath_dbg("Failed to start cmd offset %u\n",
-				  eeprom_offset);
-			goto failed_write;
+		if (icd->eeprom_dev == IPATH_NO_DEV) {
+			if (i2c_startcmd(dd,
+					 (eeprom_offset << 1) | WRITE_CMD)) {
+				ipath_dbg("Failed to start cmd offset %u\n",
+					eeprom_offset);
+				goto failed_write;
+			}
+		} else {
+			/* Real I2C */
+			if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
+				ipath_dbg("Failed EEPROM startcmd\n");
+				goto failed_write;
+			}
+			ret = wr_byte(dd, eeprom_offset);
+			if (ret) {
+				ipath_dev_err(dd, "Failed to write EEPROM "
+					      "address\n");
+				goto failed_write;
+			}
 		}
 
 		sub_len = min(len, 4);
@@ -468,9 +603,11 @@
 		 * the writes have completed.   We do this inline to avoid
 		 * the debug prints that are in the real read routine
 		 * if the startcmd fails.
+		 * We also use the proper device address, so it doesn't matter
+		 * whether we have real eeprom_dev. legacy likes any address.
 		 */
 		max_wait_time = 100;
-		while (i2c_startcmd(dd, READ_CMD)) {
+		while (i2c_startcmd(dd, icd->eeprom_dev | READ_CMD)) {
 			stop_cmd(dd);
 			if (!--max_wait_time) {
 				ipath_dbg("Did not get successful read to "
@@ -478,15 +615,8 @@
 				goto failed_write;
 			}
 		}
-		/* now read the zero byte */
-		for (i = single_byte = 0; i < 8; i++) {
-			u8 bit;
-			scl_out(dd, i2c_line_high);
-			bit = sda_in(dd, 0);
-			scl_out(dd, i2c_line_low);
-			single_byte <<= 1;
-			single_byte |= bit;
-		}
+		/* now read (and ignore) the resulting byte */
+		rd_byte(dd);
 		stop_cmd(dd);
 	}
 
@@ -501,9 +631,12 @@
 	return ret;
 }
 
-/*
- * The public entry-points ipath_eeprom_read() and ipath_eeprom_write()
- * are now just wrappers around the internal functions.
+/**
+ * ipath_eeprom_read - receives bytes from the eeprom via I2C
+ * @dd: the infinipath device
+ * @eeprom_offset: address to read from
+ * @buffer: where to store result
+ * @len: number of bytes to receive
  */
 int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
 			void *buff, int len)
@@ -519,6 +652,13 @@
 	return ret;
 }
 
+/**
+ * ipath_eeprom_write - writes data to the eeprom via I2C
+ * @dd: the infinipath device
+ * @eeprom_offset: where to place data
+ * @buffer: data to write
+ * @len: number of bytes to write
+ */
 int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
 			const void *buff, int len)
 {
@@ -820,7 +960,7 @@
 	 * if we log an hour at 31 minutes, then we would need to set
 	 * active_time to -29 to accurately count the _next_ hour.
 	 */
-	if (new_time > 3600) {
+	if (new_time >= 3600) {
 		new_hrs = new_time / 3600;
 		atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
 		new_hrs += dd->ipath_eep_hrs;
@@ -885,3 +1025,159 @@
 	spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
 	return;
 }
+
+static int ipath_tempsense_internal_read(struct ipath_devdata *dd, u8 regnum)
+{
+	int ret;
+	struct i2c_chain_desc *icd;
+
+	ret = -ENOENT;
+
+	icd = ipath_i2c_type(dd);
+	if (!icd)
+		goto bail;
+
+	if (icd->temp_dev == IPATH_NO_DEV) {
+		/* tempsense only exists on new, real-I2C boards */
+		ret = -ENXIO;
+		goto bail;
+	}
+
+	if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
+		ipath_dbg("Failed tempsense startcmd\n");
+		stop_cmd(dd);
+		ret = -ENXIO;
+		goto bail;
+	}
+	ret = wr_byte(dd, regnum);
+	stop_cmd(dd);
+	if (ret) {
+		ipath_dev_err(dd, "Failed tempsense WR command %02X\n",
+			      regnum);
+		ret = -ENXIO;
+		goto bail;
+	}
+	if (i2c_startcmd(dd, icd->temp_dev | READ_CMD)) {
+		ipath_dbg("Failed tempsense RD startcmd\n");
+		stop_cmd(dd);
+		ret = -ENXIO;
+		goto bail;
+	}
+	/*
+	 * We can only clock out one byte per command, sensibly
+	 */
+	ret = rd_byte(dd);
+	stop_cmd(dd);
+
+bail:
+	return ret;
+}
+
+#define VALID_TS_RD_REG_MASK 0xBF
+
+/**
+ * ipath_tempsense_read - read register of temp sensor via I2C
+ * @dd: the infinipath device
+ * @regnum: register to read from
+ *
+ * returns reg contents (0..255) or < 0 for error
+ */
+int ipath_tempsense_read(struct ipath_devdata *dd, u8 regnum)
+{
+	int ret;
+
+	if (regnum > 7)
+		return -EINVAL;
+
+	/* return a bogus value for (the one) register we do not have */
+	if (!((1 << regnum) & VALID_TS_RD_REG_MASK))
+		return 0;
+
+	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
+	if (!ret) {
+		ret = ipath_tempsense_internal_read(dd, regnum);
+		mutex_unlock(&dd->ipath_eep_lock);
+	}
+
+	/*
+	 * There are three possibilities here:
+	 * ret is actual value (0..255)
+	 * ret is -ENXIO or -EINVAL from code in this file
+	 * ret is -EINTR from mutex_lock_interruptible.
+	 */
+	return ret;
+}
+
+static int ipath_tempsense_internal_write(struct ipath_devdata *dd,
+					  u8 regnum, u8 data)
+{
+	int ret = -ENOENT;
+	struct i2c_chain_desc *icd;
+
+	icd = ipath_i2c_type(dd);
+	if (!icd)
+		goto bail;
+
+	if (icd->temp_dev == IPATH_NO_DEV) {
+		/* tempsense only exists on new, real-I2C boards */
+		ret = -ENXIO;
+		goto bail;
+	}
+	if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
+		ipath_dbg("Failed tempsense startcmd\n");
+		stop_cmd(dd);
+		ret = -ENXIO;
+		goto bail;
+	}
+	ret = wr_byte(dd, regnum);
+	if (ret) {
+		stop_cmd(dd);
+		ipath_dev_err(dd, "Failed to write tempsense command %02X\n",
+			      regnum);
+		ret = -ENXIO;
+		goto bail;
+	}
+	ret = wr_byte(dd, data);
+	stop_cmd(dd);
+	ret = i2c_startcmd(dd, icd->temp_dev | READ_CMD);
+	if (ret) {
+		ipath_dev_err(dd, "Failed tempsense data wrt to %02X\n",
+			      regnum);
+		ret = -ENXIO;
+	}
+
+bail:
+	return ret;
+}
+
+#define VALID_TS_WR_REG_MASK ((1 << 9) | (1 << 0xB) | (1 << 0xD))
+
+/**
+ * ipath_tempsense_write - write register of temp sensor via I2C
+ * @dd: the infinipath device
+ * @regnum: register to write
+ * @data: data to write
+ *
+ * returns 0 for success or < 0 for error
+ */
+int ipath_tempsense_write(struct ipath_devdata *dd, u8 regnum, u8 data)
+{
+	int ret;
+
+	if (regnum > 15 || !((1 << regnum) & VALID_TS_WR_REG_MASK))
+		return -EINVAL;
+
+	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
+	if (!ret) {
+		ret = ipath_tempsense_internal_write(dd, regnum, data);
+		mutex_unlock(&dd->ipath_eep_lock);
+	}
+
+	/*
+	 * There are three possibilities here:
+	 * ret is 0 for success
+	 * ret is -ENXIO or -EINVAL from code in this file
+	 * ret is -EINTR from mutex_lock_interruptible.
+	 */
+	return ret;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 7e025c8..1e627aa 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -36,21 +36,28 @@
 #include <linux/cdev.h>
 #include <linux/swap.h>
 #include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
 #include <asm/pgtable.h>
 
 #include "ipath_kernel.h"
 #include "ipath_common.h"
+#include "ipath_user_sdma.h"
 
 static int ipath_open(struct inode *, struct file *);
 static int ipath_close(struct inode *, struct file *);
 static ssize_t ipath_write(struct file *, const char __user *, size_t,
 			   loff_t *);
+static ssize_t ipath_writev(struct kiocb *, const struct iovec *,
+			    unsigned long , loff_t);
 static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
 static int ipath_mmap(struct file *, struct vm_area_struct *);
 
 static const struct file_operations ipath_file_ops = {
 	.owner = THIS_MODULE,
 	.write = ipath_write,
+	.aio_write = ipath_writev,
 	.open = ipath_open,
 	.release = ipath_close,
 	.poll = ipath_poll,
@@ -184,6 +191,29 @@
 		kinfo->spi_piobufbase = (u64) pd->port_piobufs +
 			dd->ipath_palign * kinfo->spi_piocnt * slave;
 	}
+
+	/*
+	 * Set the PIO avail update threshold to no larger
+	 * than the number of buffers per process. Note that
+	 * we decrease it here, but won't ever increase it.
+	 */
+	if (dd->ipath_pioupd_thresh &&
+	    kinfo->spi_piocnt < dd->ipath_pioupd_thresh) {
+		unsigned long flags;
+
+		dd->ipath_pioupd_thresh = kinfo->spi_piocnt;
+		ipath_dbg("Decreased pio update threshold to %u\n",
+			dd->ipath_pioupd_thresh);
+		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+		dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
+			<< INFINIPATH_S_UPDTHRESH_SHIFT);
+		dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
+			<< INFINIPATH_S_UPDTHRESH_SHIFT;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+			dd->ipath_sendctrl);
+		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+	}
+
 	if (shared) {
 		kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
 			dd->ipath_ureg_align * pd->port_port;
@@ -219,7 +249,12 @@
 	kinfo->spi_pioalign = dd->ipath_palign;
 
 	kinfo->spi_qpair = IPATH_KD_QP;
-	kinfo->spi_piosize = dd->ipath_ibmaxlen;
+	/*
+	 * user mode PIO buffers are always 2KB, even when 4KB can
+	 * be received, and sent via the kernel; this is ibmaxlen
+	 * for 2K MTU.
+	 */
+	kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
 	kinfo->spi_mtu = dd->ipath_ibmaxlen;	/* maxlen, not ibmtu */
 	kinfo->spi_port = pd->port_port;
 	kinfo->spi_subport = subport_fp(fp);
@@ -1598,6 +1633,9 @@
 		port_fp(fp) = pd;
 		pd->port_pid = current->pid;
 		strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
+		ipath_chg_pioavailkernel(dd,
+			dd->ipath_pbufsport * (pd->port_port - 1),
+			dd->ipath_pbufsport, 0);
 		ipath_stats.sps_ports++;
 		ret = 0;
 	} else
@@ -1760,7 +1798,7 @@
 	for (ndev = 0; ndev < devmax; ndev++) {
 		struct ipath_devdata *dd = ipath_lookup(ndev);
 
-		if (!dd)
+		if (!usable(dd))
 			continue;
 		for (i = 1; i < dd->ipath_cfgports; i++) {
 			struct ipath_portdata *pd = dd->ipath_pd[i];
@@ -1839,10 +1877,9 @@
 	if (ipath_compatible_subports(swmajor, swminor) &&
 	    uinfo->spu_subport_cnt &&
 	    (ret = find_shared_port(fp, uinfo))) {
-		mutex_unlock(&ipath_mutex);
 		if (ret > 0)
 			ret = 0;
-		goto done;
+		goto done_chk_sdma;
 	}
 
 	i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
@@ -1854,6 +1891,21 @@
 	else
 		ret = find_best_unit(fp, uinfo);
 
+done_chk_sdma:
+	if (!ret) {
+		struct ipath_filedata *fd = fp->private_data;
+		const struct ipath_portdata *pd = fd->pd;
+		const struct ipath_devdata *dd = pd->port_dd;
+
+		fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
+						      dd->ipath_unit,
+						      pd->port_port,
+						      fd->subport);
+
+		if (!fd->pq)
+			ret = -ENOMEM;
+	}
+
 	mutex_unlock(&ipath_mutex);
 
 done:
@@ -1922,22 +1974,25 @@
 	pd->port_hdrqfull_poll = pd->port_hdrqfull;
 
 	/*
-	 * now enable the port; the tail registers will be written to memory
-	 * by the chip as soon as it sees the write to
-	 * dd->ipath_kregs->kr_rcvctrl.  The update only happens on
-	 * transition from 0 to 1, so clear it first, then set it as part of
-	 * enabling the port.  This will (very briefly) affect any other
-	 * open ports, but it shouldn't be long enough to be an issue.
-	 * We explictly set the in-memory copy to 0 beforehand, so we don't
-	 * have to wait to be sure the DMA update has happened.
+	 * Now enable the port for receive.
+	 * For chips that are set to DMA the tail register to memory
+	 * when they change (and when the update bit transitions from
+	 * 0 to 1.  So for those chips, we turn it off and then back on.
+	 * This will (very briefly) affect any other open ports, but the
+	 * duration is very short, and therefore isn't an issue.  We
+	 * explictly set the in-memory tail copy to 0 beforehand, so we
+	 * don't have to wait to be sure the DMA update has happened
+	 * (chip resets head/tail to 0 on transition to enable).
 	 */
-	if (pd->port_rcvhdrtail_kvaddr)
-		ipath_clear_rcvhdrtail(pd);
 	set_bit(dd->ipath_r_portenable_shift + pd->port_port,
 		&dd->ipath_rcvctrl);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+	if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
+		if (pd->port_rcvhdrtail_kvaddr)
+			ipath_clear_rcvhdrtail(pd);
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
 			dd->ipath_rcvctrl &
 			~(1ULL << dd->ipath_r_tailupd_shift));
+	}
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
 			 dd->ipath_rcvctrl);
 	/* Notify any waiting slaves */
@@ -1965,14 +2020,15 @@
 	ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
 		   pd->port_port);
 	for (i = port_tidbase; i < maxtid; i++) {
-		if (!dd->ipath_pageshadow[i])
+		struct page *ps = dd->ipath_pageshadow[i];
+
+		if (!ps)
 			continue;
 
+		dd->ipath_pageshadow[i] = NULL;
 		pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
 			PAGE_SIZE, PCI_DMA_FROMDEVICE);
-		ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
-						  1);
-		dd->ipath_pageshadow[i] = NULL;
+		ipath_release_user_pages_on_close(&ps, 1);
 		cnt++;
 		ipath_stats.sps_pageunlocks++;
 	}
@@ -2007,6 +2063,13 @@
 		mutex_unlock(&ipath_mutex);
 		goto bail;
 	}
+
+	dd = pd->port_dd;
+
+	/* drain user sdma queue */
+	ipath_user_sdma_queue_drain(dd, fd->pq);
+	ipath_user_sdma_queue_destroy(fd->pq);
+
 	if (--pd->port_cnt) {
 		/*
 		 * XXX If the master closes the port before the slave(s),
@@ -2019,7 +2082,6 @@
 		goto bail;
 	}
 	port = pd->port_port;
-	dd = pd->port_dd;
 
 	if (pd->port_hdrqfull) {
 		ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
@@ -2039,7 +2101,7 @@
 			pd->port_rcvnowait = pd->port_pionowait = 0;
 	}
 	if (pd->port_flag) {
-		ipath_dbg("port %u port_flag still set to 0x%lx\n",
+		ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
 			  pd->port_port, pd->port_flag);
 		pd->port_flag = 0;
 	}
@@ -2076,6 +2138,7 @@
 
 		i = dd->ipath_pbufsport * (port - 1);
 		ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
+		ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1);
 
 		dd->ipath_f_clear_tids(dd, pd->port_port);
 
@@ -2140,17 +2203,31 @@
 	return ret;
 }
 
-static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
+static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
+				   u32 __user *inflightp)
 {
-	unsigned long flags;
+	const u32 val = ipath_user_sdma_inflight_counter(pq);
 
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-		dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+	if (put_user(val, inflightp))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int ipath_sdma_get_complete(struct ipath_devdata *dd,
+				   struct ipath_user_sdma_queue *pq,
+				   u32 __user *completep)
+{
+	u32 val;
+	int err;
+
+	err = ipath_user_sdma_make_progress(dd, pq);
+	if (err < 0)
+		return err;
+
+	val = ipath_user_sdma_complete_counter(pq);
+	if (put_user(val, completep))
+		return -EFAULT;
 
 	return 0;
 }
@@ -2229,6 +2306,16 @@
 		dest = &cmd.cmd.armlaunch_ctrl;
 		src = &ucmd->cmd.armlaunch_ctrl;
 		break;
+	case IPATH_CMD_SDMA_INFLIGHT:
+		copy = sizeof(cmd.cmd.sdma_inflight);
+		dest = &cmd.cmd.sdma_inflight;
+		src = &ucmd->cmd.sdma_inflight;
+		break;
+	case IPATH_CMD_SDMA_COMPLETE:
+		copy = sizeof(cmd.cmd.sdma_complete);
+		dest = &cmd.cmd.sdma_complete;
+		src = &ucmd->cmd.sdma_complete;
+		break;
 	default:
 		ret = -EINVAL;
 		goto bail;
@@ -2299,7 +2386,7 @@
 					   cmd.cmd.slave_mask_addr);
 		break;
 	case IPATH_CMD_PIOAVAILUPD:
-		ret = ipath_force_pio_avail_update(pd->port_dd);
+		ipath_force_pio_avail_update(pd->port_dd);
 		break;
 	case IPATH_CMD_POLL_TYPE:
 		pd->poll_type = cmd.cmd.poll_type;
@@ -2310,6 +2397,17 @@
 		else
 			ipath_disable_armlaunch(pd->port_dd);
 		break;
+	case IPATH_CMD_SDMA_INFLIGHT:
+		ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
+					      (u32 __user *) (unsigned long)
+					      cmd.cmd.sdma_inflight);
+		break;
+	case IPATH_CMD_SDMA_COMPLETE:
+		ret = ipath_sdma_get_complete(pd->port_dd,
+					      user_sdma_queue_fp(fp),
+					      (u32 __user *) (unsigned long)
+					      cmd.cmd.sdma_complete);
+		break;
 	}
 
 	if (ret >= 0)
@@ -2319,6 +2417,20 @@
 	return ret;
 }
 
+static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov,
+			    unsigned long dim, loff_t off)
+{
+	struct file *filp = iocb->ki_filp;
+	struct ipath_filedata *fp = filp->private_data;
+	struct ipath_portdata *pd = port_fp(filp);
+	struct ipath_user_sdma_queue *pq = fp->pq;
+
+	if (!dim)
+		return -EINVAL;
+
+	return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim);
+}
+
 static struct class *ipath_class;
 
 static int init_cdev(int minor, char *name, const struct file_operations *fops,
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 9e2ced3..02831ad 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -40,6 +40,7 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/htirq.h>
+#include <rdma/ib_verbs.h>
 
 #include "ipath_kernel.h"
 #include "ipath_registers.h"
@@ -305,7 +306,9 @@
 
 /* kr_intstatus, kr_intclear, kr_intmask bits */
 #define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
+#define INFINIPATH_I_RCVURG_SHIFT 0
 #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
+#define INFINIPATH_I_RCVAVAIL_SHIFT 12
 
 /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
 #define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
@@ -476,7 +479,13 @@
 #define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
 			  << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
 
-static int ipath_ht_txe_recover(struct ipath_devdata *);
+static void ipath_ht_txe_recover(struct ipath_devdata *dd)
+{
+	++ipath_stats.sps_txeparity;
+	dev_info(&dd->pcidev->dev,
+		"Recovering from TXE PIO parity error\n");
+}
+
 
 /**
  * ipath_ht_handle_hwerrors - display hardware errors.
@@ -557,11 +566,11 @@
 		 * occur if a processor speculative read is done to the PIO
 		 * buffer while we are sending a packet, for example.
 		 */
-		if ((hwerrs & TXE_PIO_PARITY) && ipath_ht_txe_recover(dd))
+		if (hwerrs & TXE_PIO_PARITY) {
+			ipath_ht_txe_recover(dd);
 			hwerrs &= ~TXE_PIO_PARITY;
-		if (hwerrs & RXE_EAGER_PARITY)
-			ipath_dev_err(dd, "RXE parity, Eager TID error is not "
-				"recoverable\n");
+		}
+
 		if (!hwerrs) {
 			ipath_dbg("Clearing freezemode on ignored or "
 				  "recovered hardware error\n");
@@ -735,11 +744,10 @@
 	 */
 	dd->ipath_flags |= IPATH_32BITCOUNTERS;
 	dd->ipath_flags |= IPATH_GPIO_INTR;
-	if (dd->ipath_htspeed != 800)
+	if (dd->ipath_lbus_speed != 800)
 		ipath_dev_err(dd,
 			      "Incorrectly configured for HT @ %uMHz\n",
-			      dd->ipath_htspeed);
-	ret = 0;
+			      dd->ipath_lbus_speed);
 
 	/*
 	 * set here, not in ipath_init_*_funcs because we have to do
@@ -839,7 +847,7 @@
 			/*
 			 * now write them back to clear the error.
 			 */
-			pci_write_config_byte(pdev, link_off,
+			pci_write_config_word(pdev, link_off,
 					      linkctrl & (0xf << 8));
 		}
 	}
@@ -904,7 +912,7 @@
 			break;
 		}
 
-		dd->ipath_htwidth = width;
+		dd->ipath_lbus_width = width;
 
 		if (linkwidth != 0x11) {
 			ipath_dev_err(dd, "Not configured for 16 bit HT "
@@ -952,8 +960,13 @@
 			speed = 200;
 			break;
 		}
-		dd->ipath_htspeed = speed;
+		dd->ipath_lbus_speed = speed;
 	}
+
+	snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
+		"HyperTransport,%uMHz,x%u\n",
+		dd->ipath_lbus_speed,
+		dd->ipath_lbus_width);
 }
 
 static int ipath_ht_intconfig(struct ipath_devdata *dd)
@@ -1653,22 +1666,6 @@
 }
 
 
-static int ipath_ht_txe_recover(struct ipath_devdata *dd)
-{
-	int cnt = ++ipath_stats.sps_txeparity;
-	if (cnt >= IPATH_MAX_PARITY_ATTEMPTS)  {
-		if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
-			ipath_dev_err(dd,
-				"Too many attempts to recover from "
-				"TXE parity, giving up\n");
-		return 0;
-	}
-	dev_info(&dd->pcidev->dev,
-		"Recovering from TXE PIO parity error\n");
-	return 1;
-}
-
-
 /**
  * ipath_init_ht_get_base_info - set chip-specific flags for user code
  * @dd: the infinipath device
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index c7a2f50..421cc2a 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -38,7 +38,7 @@
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
-
+#include <rdma/ib_verbs.h>
 
 #include "ipath_kernel.h"
 #include "ipath_registers.h"
@@ -311,9 +311,14 @@
 	.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
 };
 
+/* kr_control bits */
+#define INFINIPATH_C_RESET 1U
+
 /* kr_intstatus, kr_intclear, kr_intmask bits */
 #define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
+#define INFINIPATH_I_RCVURG_SHIFT 0
 #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
+#define INFINIPATH_I_RCVAVAIL_SHIFT 12
 
 /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
 #define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK  0x000000000000003fULL
@@ -338,6 +343,9 @@
 #define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
 #define INFINIPATH_EXTS_MEMBIST_FOUND       0x0000000000008000
 
+/* kr_xgxsconfig bits */
+#define INFINIPATH_XGXS_RESET          0x5ULL
+
 #define _IPATH_GPIO_SDA_NUM 1
 #define _IPATH_GPIO_SCL_NUM 0
 
@@ -346,6 +354,16 @@
 #define IPATH_GPIO_SCL (1ULL << \
 	(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
 
+#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL
+#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \
+	((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
+#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid))
+#define INFINIPATH_RT_IS_VALID(tid) \
+	(((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \
+	 ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK)))
+#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
+#define INFINIPATH_RT_ADDR_SHIFT 10
+
 #define INFINIPATH_R_INTRAVAIL_SHIFT 16
 #define INFINIPATH_R_TAILUPD_SHIFT 31
 
@@ -372,6 +390,8 @@
 #define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
 		        INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
 		        << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
+#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
+			  << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
 
 static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
 			       u32, unsigned long);
@@ -450,10 +470,8 @@
 	 * make sure we get this much out, unless told to be quiet,
 	 * or it's occurred within the last 5 seconds
 	 */
-	if ((hwerrs & ~(dd->ipath_lasthwerror |
-			((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
-			  INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
-			 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
+	if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
+			RXE_EAGER_PARITY)) ||
 	    (ipath_debug & __IPATH_VERBDBG))
 		dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
 			 "(cleared)\n", (unsigned long long) hwerrs);
@@ -465,7 +483,7 @@
 			      (hwerrs & ~dd->ipath_hwe_bitsextant));
 
 	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-	if (ctrl & INFINIPATH_C_FREEZEMODE) {
+	if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
 		/*
 		 * parity errors in send memory are recoverable,
 		 * just cancel the send (if indicated in * sendbuffererror),
@@ -540,12 +558,40 @@
 				 dd->ipath_hwerrmask);
 	}
 
-	if (*msg)
-		ipath_dev_err(dd, "%s hardware error\n", msg);
-	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
+	if (hwerrs) {
 		/*
-		 * for /sys status file ; if no trailing } is copied, we'll
-		 * know it was truncated.
+		 * if any set that we aren't ignoring; only
+		 * make the complaint once, in case it's stuck
+		 * or recurring, and we get here multiple
+		 * times.
+		 */
+		ipath_dev_err(dd, "%s hardware error\n", msg);
+		if (dd->ipath_flags & IPATH_INITTED) {
+			ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
+			ipath_setup_pe_setextled(dd,
+				INFINIPATH_IBCS_L_STATE_DOWN,
+				INFINIPATH_IBCS_LT_STATE_DISABLED);
+			ipath_dev_err(dd, "Fatal Hardware Error (freeze "
+					  "mode), no longer usable, SN %.16s\n",
+					  dd->ipath_serial);
+			isfatal = 1;
+		}
+		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+		/* mark as having had error */
+		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+		/*
+		 * mark as not usable, at a minimum until driver
+		 * is reloaded, probably until reboot, since no
+		 * other reset is possible.
+		 */
+		dd->ipath_flags &= ~IPATH_INITTED;
+	} else
+		*msg = 0; /* recovered from all of them */
+
+	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) {
+		/*
+		 * for /sys status file ; if no trailing brace is copied,
+		 * we'll know it was truncated.
 		 */
 		snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
 			 "{%s}", msg);
@@ -610,7 +656,6 @@
 			dd->ipath_f_put_tid = ipath_pe_put_tid_2;
 	}
 
-
 	/*
 	 * set here, not in ipath_init_*_funcs because we have to do
 	 * it after we can read chip registers.
@@ -838,7 +883,7 @@
 	extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
 				       INFINIPATH_EXTC_LED2PRIPORT_ON);
 
-	if (ltst & INFINIPATH_IBCS_LT_STATE_LINKUP)
+	if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
 		extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
 	if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
 		extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
@@ -863,6 +908,62 @@
 	pci_disable_msi(dd->pcidev);
 }
 
+static void ipath_6120_pcie_params(struct ipath_devdata *dd)
+{
+	u16 linkstat, speed;
+	int pos;
+
+	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+	if (!pos) {
+		ipath_dev_err(dd, "Can't find PCI Express capability!\n");
+		goto bail;
+	}
+
+	pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
+			     &linkstat);
+	/*
+	 * speed is bits 0-4, linkwidth is bits 4-8
+	 * no defines for them in headers
+	 */
+	speed = linkstat & 0xf;
+	linkstat >>= 4;
+	linkstat &= 0x1f;
+	dd->ipath_lbus_width = linkstat;
+
+	switch (speed) {
+	case 1:
+		dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
+		break;
+	case 2:
+		dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
+		break;
+	default: /* not defined, assume gen1 */
+		dd->ipath_lbus_speed = 2500;
+		break;
+	}
+
+	if (linkstat < 8)
+		ipath_dev_err(dd,
+			"PCIe width %u (x8 HCA), performance reduced\n",
+			linkstat);
+	else
+		ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n",
+			dd->ipath_lbus_speed, linkstat);
+
+	if (speed != 1)
+		ipath_dev_err(dd,
+			"PCIe linkspeed %u is incorrect; "
+			"should be 1 (2500)!\n", speed);
+bail:
+	/* fill in string, even on errors */
+	snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
+		"PCIe,%uMHz,x%u\n",
+		dd->ipath_lbus_speed,
+		dd->ipath_lbus_width);
+
+	return;
+}
+
 /**
  * ipath_setup_pe_config - setup PCIe config related stuff
  * @dd: the infinipath device
@@ -920,19 +1021,8 @@
 	} else
 		ipath_dev_err(dd, "Can't find MSI capability, "
 			      "can't save MSI settings for reset\n");
-	if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP))) {
-		u16 linkstat;
-		pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
-				     &linkstat);
-		linkstat >>= 4;
-		linkstat &= 0x1f;
-		if (linkstat != 8)
-			ipath_dev_err(dd, "PCIe width %u, "
-				      "performance reduced\n", linkstat);
-	}
-	else
-		ipath_dev_err(dd, "Can't find PCI Express "
-			      "capability!\n");
+
+	ipath_6120_pcie_params(dd);
 
 	dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
 	dd->ipath_link_speed_supported = IPATH_IB_SDR;
@@ -1065,10 +1155,7 @@
 		INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
 		INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
 
-	dd->ipath_eep_st_masks[2].errs_to_log =
-		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
-
-
+	dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
 	dd->delay_mult = 2; /* SDR, 4X, can't change */
 }
 
@@ -1142,6 +1229,9 @@
 	u64 val;
 	int i;
 	int ret;
+	u16 cmdval;
+
+	pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
 
 	/* Use ERROR so it shows up in logs, etc. */
 	ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
@@ -1169,10 +1259,14 @@
 			ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
 				      r);
 		/* now re-enable memory access */
+		pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
 		if ((r = pci_enable_device(dd->pcidev)))
 			ipath_dev_err(dd, "pci_enable_device failed after "
 				      "reset: %d\n", r);
-		/* whether it worked or not, mark as present, again */
+		/*
+		 * whether it fully enabled or not, mark as present,
+		 * again (but not INITTED)
+		 */
 		dd->ipath_flags |= IPATH_PRESENT;
 		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
 		if (val == dd->ipath_revision) {
@@ -1190,6 +1284,8 @@
 	ret = 0; /* failed */
 
 bail:
+	if (ret)
+		ipath_6120_pcie_params(dd);
 	return ret;
 }
 
@@ -1209,16 +1305,21 @@
 {
 	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
 	unsigned long flags = 0; /* keep gcc quiet */
+	int tidx;
+	spinlock_t *tidlockp;
+
+	if (!dd->ipath_kregbase)
+		return;
 
 	if (pa != dd->ipath_tidinvalid) {
 		if (pa & ((1U << 11) - 1)) {
 			dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
-				 "not 4KB aligned!\n", pa);
+				 "not 2KB aligned!\n", pa);
 			return;
 		}
 		pa >>= 11;
 		/* paranoia check */
-		if (pa & (7<<29))
+		if (pa & ~INFINIPATH_RT_ADDR_MASK)
 			ipath_dev_err(dd,
 				      "BUG: Physical page address 0x%lx "
 				      "has bits set in 31-29\n", pa);
@@ -1238,14 +1339,22 @@
 	 * call can be done from interrupt level for the port 0 eager TIDs,
 	 * so we have to use irqsave locks.
 	 */
-	spin_lock_irqsave(&dd->ipath_tid_lock, flags);
+	/*
+	 * Assumes tidptr always > ipath_egrtidbase
+	 * if type == RCVHQ_RCV_TYPE_EAGER.
+	 */
+	tidx = tidptr - dd->ipath_egrtidbase;
+
+	tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt)
+		? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock;
+	spin_lock_irqsave(tidlockp, flags);
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
-	if (dd->ipath_kregbase)
-		writel(pa, tidp32);
+	writel(pa, tidp32);
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
 	mmiowb();
-	spin_unlock_irqrestore(&dd->ipath_tid_lock, flags);
+	spin_unlock_irqrestore(tidlockp, flags);
 }
+
 /**
  * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
  * @dd: the infinipath device
@@ -1261,6 +1370,10 @@
 			     u32 type, unsigned long pa)
 {
 	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+	u32 tidx;
+
+	if (!dd->ipath_kregbase)
+		return;
 
 	if (pa != dd->ipath_tidinvalid) {
 		if (pa & ((1U << 11) - 1)) {
@@ -1270,7 +1383,7 @@
 		}
 		pa >>= 11;
 		/* paranoia check */
-		if (pa & (7<<29))
+		if (pa & ~INFINIPATH_RT_ADDR_MASK)
 			ipath_dev_err(dd,
 				      "BUG: Physical page address 0x%lx "
 				      "has bits set in 31-29\n", pa);
@@ -1280,8 +1393,8 @@
 		else /* for now, always full 4KB page */
 			pa |= 2 << 29;
 	}
-	if (dd->ipath_kregbase)
-		writel(pa, tidp32);
+	tidx = tidptr - dd->ipath_egrtidbase;
+	writel(pa, tidp32);
 	mmiowb();
 }
 
@@ -1379,17 +1492,13 @@
 	dd->ipath_egrtidbase = (u64 __iomem *)
 		((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
 
-	/*
-	 * To truly support a 4KB MTU (for usermode), we need to
-	 * bump this to a larger value.  For now, we use them for
-	 * the kernel only.
-	 */
-	dd->ipath_rcvegrbufsize = 2048;
+	dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
 	/*
 	 * the min() check here is currently a nop, but it may not always
 	 * be, depending on just how we do ipath_rcvegrbufsize
 	 */
-	dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
+	dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
+				 dd->ipath_piosize2k,
 				 dd->ipath_rcvegrbufsize +
 				 (dd->ipath_rcvhdrentsize << 2));
 	dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
new file mode 100644
index 0000000..1b2de2c
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -0,0 +1,2571 @@
+/*
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * InfiniPath 7220 chip (except that specific to the SerDes)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <rdma/ib_verbs.h>
+
+#include "ipath_kernel.h"
+#include "ipath_registers.h"
+#include "ipath_7220.h"
+
+static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64);
+
+static unsigned ipath_compat_ddr_negotiate = 1;
+
+module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint,
+			S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(compat_ddr_negotiate,
+		"Attempt pre-IBTA 1.2 DDR speed negotiation");
+
+static unsigned ipath_sdma_fetch_arb = 1;
+module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO);
+MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
+
+/*
+ * This file contains almost all the chip-specific register information and
+ * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the
+ * exception of SerDes support, which in in ipath_sd7220.c.
+ *
+ * This lists the InfiniPath registers, in the actual chip layout.
+ * This structure should never be directly accessed.
+ */
+struct _infinipath_do_not_use_kernel_regs {
+	unsigned long long Revision;
+	unsigned long long Control;
+	unsigned long long PageAlign;
+	unsigned long long PortCnt;
+	unsigned long long DebugPortSelect;
+	unsigned long long DebugSigsIntSel; /* was Reserved0;*/
+	unsigned long long SendRegBase;
+	unsigned long long UserRegBase;
+	unsigned long long CounterRegBase;
+	unsigned long long Scratch;
+	unsigned long long EEPROMAddrCmd; /* was Reserved1; */
+	unsigned long long EEPROMData; /* was Reserved2; */
+	unsigned long long IntBlocked;
+	unsigned long long IntMask;
+	unsigned long long IntStatus;
+	unsigned long long IntClear;
+	unsigned long long ErrorMask;
+	unsigned long long ErrorStatus;
+	unsigned long long ErrorClear;
+	unsigned long long HwErrMask;
+	unsigned long long HwErrStatus;
+	unsigned long long HwErrClear;
+	unsigned long long HwDiagCtrl;
+	unsigned long long MDIO;
+	unsigned long long IBCStatus;
+	unsigned long long IBCCtrl;
+	unsigned long long ExtStatus;
+	unsigned long long ExtCtrl;
+	unsigned long long GPIOOut;
+	unsigned long long GPIOMask;
+	unsigned long long GPIOStatus;
+	unsigned long long GPIOClear;
+	unsigned long long RcvCtrl;
+	unsigned long long RcvBTHQP;
+	unsigned long long RcvHdrSize;
+	unsigned long long RcvHdrCnt;
+	unsigned long long RcvHdrEntSize;
+	unsigned long long RcvTIDBase;
+	unsigned long long RcvTIDCnt;
+	unsigned long long RcvEgrBase;
+	unsigned long long RcvEgrCnt;
+	unsigned long long RcvBufBase;
+	unsigned long long RcvBufSize;
+	unsigned long long RxIntMemBase;
+	unsigned long long RxIntMemSize;
+	unsigned long long RcvPartitionKey;
+	unsigned long long RcvQPMulticastPort;
+	unsigned long long RcvPktLEDCnt;
+	unsigned long long IBCDDRCtrl;
+	unsigned long long HRTBT_GUID;
+	unsigned long long IB_SDTEST_IF_TX;
+	unsigned long long IB_SDTEST_IF_RX;
+	unsigned long long IBCDDRCtrl2;
+	unsigned long long IBCDDRStatus;
+	unsigned long long JIntReload;
+	unsigned long long IBNCModeCtrl;
+	unsigned long long SendCtrl;
+	unsigned long long SendBufBase;
+	unsigned long long SendBufSize;
+	unsigned long long SendBufCnt;
+	unsigned long long SendAvailAddr;
+	unsigned long long TxIntMemBase;
+	unsigned long long TxIntMemSize;
+	unsigned long long SendDmaBase;
+	unsigned long long SendDmaLenGen;
+	unsigned long long SendDmaTail;
+	unsigned long long SendDmaHead;
+	unsigned long long SendDmaHeadAddr;
+	unsigned long long SendDmaBufMask0;
+	unsigned long long SendDmaBufMask1;
+	unsigned long long SendDmaBufMask2;
+	unsigned long long SendDmaStatus;
+	unsigned long long SendBufferError;
+	unsigned long long SendBufferErrorCONT1;
+	unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */
+	unsigned long long Reserved6L[2];
+	unsigned long long AvailUpdCount;
+	unsigned long long RcvHdrAddr0;
+	unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */
+	unsigned long long Reserved7hdtl; /* Align next to 300 */
+	unsigned long long RcvHdrTailAddr0; /* 300, like others */
+	unsigned long long RcvHdrTailAddrs[16];
+	unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */
+	unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */
+	unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */
+	unsigned long long Reserved10sds; /* was SerdesStatus on */
+	unsigned long long XGXSConfig;
+	unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */
+	unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */
+	unsigned long long EEPAddrCmd;
+	unsigned long long EEPData;
+	unsigned long long PcieEpbAccCtl;
+	unsigned long long PcieEpbTransCtl;
+	unsigned long long EfuseCtl; /* E-Fuse control */
+	unsigned long long EfuseData[4];
+	unsigned long long ProcMon;
+	/* this chip moves following two from previous 200, 208 */
+	unsigned long long PCIeRBufTestReg0;
+	unsigned long long PCIeRBufTestReg1;
+	/* added for this chip */
+	unsigned long long PCIeRBufTestReg2;
+	unsigned long long PCIeRBufTestReg3;
+	/* added for this chip, debug only */
+	unsigned long long SPC_JTAG_ACCESS_REG;
+	unsigned long long LAControlReg;
+	unsigned long long GPIODebugSelReg;
+	unsigned long long DebugPortValueReg;
+	/* added for this chip, DMA */
+	unsigned long long SendDmaBufUsed[3];
+	unsigned long long SendDmaReqTagUsed;
+	/*
+	 * added for this chip, EFUSE: note that these program 64-bit
+	 * words 2 and 3 */
+	unsigned long long efuse_pgm_data[2];
+	unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */
+	/* we have 30 regs for DDS and RXEQ in IB SERDES */
+	unsigned long long SerDesDDSRXEQ[30];
+	unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */
+	/* added for LA debug support */
+	unsigned long long LAMemory[32];
+};
+
+struct _infinipath_do_not_use_counters {
+	__u64 LBIntCnt;
+	__u64 LBFlowStallCnt;
+	__u64 TxSDmaDescCnt;	/* was Reserved1 */
+	__u64 TxUnsupVLErrCnt;
+	__u64 TxDataPktCnt;
+	__u64 TxFlowPktCnt;
+	__u64 TxDwordCnt;
+	__u64 TxLenErrCnt;
+	__u64 TxMaxMinLenErrCnt;
+	__u64 TxUnderrunCnt;
+	__u64 TxFlowStallCnt;
+	__u64 TxDroppedPktCnt;
+	__u64 RxDroppedPktCnt;
+	__u64 RxDataPktCnt;
+	__u64 RxFlowPktCnt;
+	__u64 RxDwordCnt;
+	__u64 RxLenErrCnt;
+	__u64 RxMaxMinLenErrCnt;
+	__u64 RxICRCErrCnt;
+	__u64 RxVCRCErrCnt;
+	__u64 RxFlowCtrlErrCnt;
+	__u64 RxBadFormatCnt;
+	__u64 RxLinkProblemCnt;
+	__u64 RxEBPCnt;
+	__u64 RxLPCRCErrCnt;
+	__u64 RxBufOvflCnt;
+	__u64 RxTIDFullErrCnt;
+	__u64 RxTIDValidErrCnt;
+	__u64 RxPKeyMismatchCnt;
+	__u64 RxP0HdrEgrOvflCnt;
+	__u64 RxP1HdrEgrOvflCnt;
+	__u64 RxP2HdrEgrOvflCnt;
+	__u64 RxP3HdrEgrOvflCnt;
+	__u64 RxP4HdrEgrOvflCnt;
+	__u64 RxP5HdrEgrOvflCnt;
+	__u64 RxP6HdrEgrOvflCnt;
+	__u64 RxP7HdrEgrOvflCnt;
+	__u64 RxP8HdrEgrOvflCnt;
+	__u64 RxP9HdrEgrOvflCnt;	/* was Reserved6 */
+	__u64 RxP10HdrEgrOvflCnt;	/* was Reserved7 */
+	__u64 RxP11HdrEgrOvflCnt;	/* new for IBA7220 */
+	__u64 RxP12HdrEgrOvflCnt;	/* new for IBA7220 */
+	__u64 RxP13HdrEgrOvflCnt;	/* new for IBA7220 */
+	__u64 RxP14HdrEgrOvflCnt;	/* new for IBA7220 */
+	__u64 RxP15HdrEgrOvflCnt;	/* new for IBA7220 */
+	__u64 RxP16HdrEgrOvflCnt;	/* new for IBA7220 */
+	__u64 IBStatusChangeCnt;
+	__u64 IBLinkErrRecoveryCnt;
+	__u64 IBLinkDownedCnt;
+	__u64 IBSymbolErrCnt;
+	/* The following are new for IBA7220 */
+	__u64 RxVL15DroppedPktCnt;
+	__u64 RxOtherLocalPhyErrCnt;
+	__u64 PcieRetryBufDiagQwordCnt;
+	__u64 ExcessBufferOvflCnt;
+	__u64 LocalLinkIntegrityErrCnt;
+	__u64 RxVlErrCnt;
+	__u64 RxDlidFltrCnt;
+	__u64 Reserved8[7];
+	__u64 PSStat;
+	__u64 PSStart;
+	__u64 PSInterval;
+	__u64 PSRcvDataCount;
+	__u64 PSRcvPktsCount;
+	__u64 PSXmitDataCount;
+	__u64 PSXmitPktsCount;
+	__u64 PSXmitWaitCount;
+};
+
+#define IPATH_KREG_OFFSET(field) (offsetof( \
+	struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
+#define IPATH_CREG_OFFSET(field) (offsetof( \
+	struct _infinipath_do_not_use_counters, field) / sizeof(u64))
+
+static const struct ipath_kregs ipath_7220_kregs = {
+	.kr_control = IPATH_KREG_OFFSET(Control),
+	.kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
+	.kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
+	.kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
+	.kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
+	.kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
+	.kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
+	.kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
+	.kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
+	.kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
+	.kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
+	.kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
+	.kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
+	.kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
+	.kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
+	.kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
+	.kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
+	.kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
+	.kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
+	.kr_intclear = IPATH_KREG_OFFSET(IntClear),
+	.kr_intmask = IPATH_KREG_OFFSET(IntMask),
+	.kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
+	.kr_mdio = IPATH_KREG_OFFSET(MDIO),
+	.kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
+	.kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
+	.kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
+	.kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
+	.kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
+	.kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
+	.kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
+	.kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
+	.kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
+	.kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
+	.kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
+	.kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
+	.kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
+	.kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
+	.kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
+	.kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
+	.kr_revision = IPATH_KREG_OFFSET(Revision),
+	.kr_scratch = IPATH_KREG_OFFSET(Scratch),
+	.kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
+	.kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
+	.kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr),
+	.kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase),
+	.kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt),
+	.kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize),
+	.kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
+	.kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
+	.kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
+	.kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
+
+	.kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
+
+	/* send dma related regs */
+	.kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase),
+	.kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen),
+	.kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail),
+	.kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead),
+	.kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr),
+	.kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0),
+	.kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1),
+	.kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2),
+	.kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus),
+
+	/* SerDes related regs */
+	.kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl),
+	.kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl),
+	.kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg),
+	.kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl),
+	.kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl),
+	.kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ),
+
+	/*
+	 * These should not be used directly via ipath_read_kreg64(),
+	 * use them with ipath_read_kreg64_port()
+	 */
+	.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
+	.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
+
+	/*
+	 * The rcvpktled register controls one of the debug port signals, so
+	 * a packet activity LED can be connected to it.
+	 */
+	.kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
+	.kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
+	.kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
+
+	.kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID),
+	.kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl),
+	.kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus),
+	.kr_jintreload = IPATH_KREG_OFFSET(JIntReload)
+};
+
+static const struct ipath_cregs ipath_7220_cregs = {
+	.cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
+	.cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
+	.cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
+	.cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
+	.cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
+	.cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
+	.cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
+	.cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
+	.cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
+	.cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
+	.cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
+	.cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
+	.cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
+	.cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
+	.cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
+	.cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
+	.cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
+	.cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
+	.cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
+	.cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
+	.cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
+	.cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
+	.cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
+	.cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
+	.cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
+	.cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
+	.cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
+	.cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
+	.cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
+	.cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
+	.cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
+	.cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
+	.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt),
+	.cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt),
+	.cr_rxotherlocalphyerrcnt =
+		IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt),
+	.cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt),
+	.cr_locallinkintegrityerrcnt =
+		IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt),
+	.cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt),
+	.cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt),
+	.cr_psstat = IPATH_CREG_OFFSET(PSStat),
+	.cr_psstart = IPATH_CREG_OFFSET(PSStart),
+	.cr_psinterval = IPATH_CREG_OFFSET(PSInterval),
+	.cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount),
+	.cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount),
+	.cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount),
+	.cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount),
+	.cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount),
+};
+
+/* kr_control bits */
+#define INFINIPATH_C_RESET (1U<<7)
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1)
+#define INFINIPATH_I_RCVURG_SHIFT 32
+#define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1)
+#define INFINIPATH_I_RCVAVAIL_SHIFT 0
+#define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27)
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK  0x00000000000000ffULL
+#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define INFINIPATH_HWE_PCIEPOISONEDTLP      0x0000000010000000ULL
+#define INFINIPATH_HWE_PCIECPLTIMEOUT       0x0000000020000000ULL
+#define INFINIPATH_HWE_PCIEBUSPARITYXTLH    0x0000000040000000ULL
+#define INFINIPATH_HWE_PCIEBUSPARITYXADM    0x0000000080000000ULL
+#define INFINIPATH_HWE_PCIEBUSPARITYRADM    0x0000000100000000ULL
+#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
+#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
+#define INFINIPATH_HWE_PCIE1PLLFAILED       0x0400000000000000ULL
+#define INFINIPATH_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
+#define INFINIPATH_HWE_SERDESPLLFAILED      0x1000000000000000ULL
+/* specific to this chip */
+#define INFINIPATH_HWE_PCIECPLDATAQUEUEERR         0x0000000000000040ULL
+#define INFINIPATH_HWE_PCIECPLHDRQUEUEERR          0x0000000000000080ULL
+#define INFINIPATH_HWE_SDMAMEMREADERR              0x0000000010000000ULL
+#define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED	   0x2000000000000000ULL
+#define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT   0x0100000000000000ULL
+#define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT   0x0200000000000000ULL
+#define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT   0x0400000000000000ULL
+#define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT   0x0800000000000000ULL
+#define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR	   0x0000008000000000ULL
+#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR	   0x0000004000000000ULL
+#define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
+#define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
+
+#define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F
+#define IBA7220_IBCS_LINKSTATE_SHIFT 5
+#define IBA7220_IBCS_LINKSPEED_SHIFT 8
+#define IBA7220_IBCS_LINKWIDTH_SHIFT 9
+
+#define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL
+#define IBA7220_IBCC_LINKCMD_SHIFT 19
+#define IBA7220_IBCC_MAXPKTLEN_SHIFT 21
+
+/* kr_ibcddrctrl bits */
+#define IBA7220_IBC_DLIDLMC_MASK	0xFFFFFFFFUL
+#define IBA7220_IBC_DLIDLMC_SHIFT	32
+#define IBA7220_IBC_HRTBT_MASK	3
+#define IBA7220_IBC_HRTBT_SHIFT	16
+#define IBA7220_IBC_HRTBT_ENB	0x10000UL
+#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
+#define IBA7220_IBC_LREV_MASK	1
+#define IBA7220_IBC_LREV_SHIFT	8
+#define IBA7220_IBC_RXPOL_MASK	1
+#define IBA7220_IBC_RXPOL_SHIFT	7
+#define IBA7220_IBC_WIDTH_SHIFT	5
+#define IBA7220_IBC_WIDTH_MASK	0x3
+#define IBA7220_IBC_WIDTH_1X_ONLY	(0<<IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_4X_ONLY	(1<<IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_AUTONEG	(2<<IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_SPEED_AUTONEG	(1<<1)
+#define IBA7220_IBC_SPEED_SDR		(1<<2)
+#define IBA7220_IBC_SPEED_DDR		(1<<3)
+#define IBA7220_IBC_SPEED_AUTONEG_MASK  (0x7<<1)
+#define IBA7220_IBC_IBTA_1_2_MASK	(1)
+
+/* kr_ibcddrstatus */
+/* link latency shift is 0, don't bother defining */
+#define IBA7220_DDRSTAT_LINKLAT_MASK    0x3ffffff
+
+/* kr_extstatus bits */
+#define INFINIPATH_EXTS_FREQSEL 0x2
+#define INFINIPATH_EXTS_SERDESSEL 0x4
+#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
+#define INFINIPATH_EXTS_MEMBIST_DISABLED    0x0000000000008000
+
+/* kr_xgxsconfig bits */
+#define INFINIPATH_XGXS_RESET          0x5ULL
+#define INFINIPATH_XGXS_FC_SAFE        (1ULL<<63)
+
+/* kr_rcvpktledcnt */
+#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
+#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
+
+#define _IPATH_GPIO_SDA_NUM 1
+#define _IPATH_GPIO_SCL_NUM 0
+
+#define IPATH_GPIO_SDA (1ULL << \
+	(_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+#define IPATH_GPIO_SCL (1ULL << \
+	(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+
+#define IBA7220_R_INTRAVAIL_SHIFT 17
+#define IBA7220_R_TAILUPD_SHIFT 35
+#define IBA7220_R_PORTCFG_SHIFT 36
+
+#define INFINIPATH_JINT_PACKETSHIFT 16
+#define INFINIPATH_JINT_DEFAULT_IDLE_TICKS  0
+#define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0
+
+#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
+
+/*
+ * the size bits give us 2^N, in KB units.  0 marks as invalid,
+ * and 7 is reserved.  We currently use only 2KB and 4KB
+ */
+#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
+#define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */
+#define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */
+#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
+
+#define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
+
+static char int_type[16] = "auto";
+module_param_string(interrupt_type, int_type, sizeof(int_type), 0444);
+MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx\n");
+
+/* packet rate matching delay; chip has support */
+static u8 rate_to_delay[2][2] = {
+	/* 1x, 4x */
+	{   8, 2 }, /* SDR */
+	{   4, 1 }  /* DDR */
+};
+
+/* 7220 specific hardware errors... */
+static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = {
+	INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
+	INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
+	/*
+	 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+	 * parity or memory parity error failures, because most likely we
+	 * won't be able to talk to the core of the chip.  Nonetheless, we
+	 * might see them, if they are in parts of the PCIe core that aren't
+	 * essential.
+	 */
+	INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
+	INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
+	INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
+	INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
+	INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
+	INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
+	INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
+	INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"),
+	INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"),
+	INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"),
+	INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"),
+	INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT,
+		"PCIe serdes Q0 no clock"),
+	INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT,
+		"PCIe serdes Q1 no clock"),
+	INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT,
+		"PCIe serdes Q2 no clock"),
+	INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT,
+		"PCIe serdes Q3 no clock"),
+	INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR,
+		"DDS RXEQ memory parity"),
+	INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"),
+	INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR,
+		"PCIe uC oct0 memory parity"),
+	INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR,
+		"PCIe uC oct1 memory parity"),
+};
+
+static void autoneg_work(struct work_struct *);
+
+/*
+ * the offset is different for different configured port numbers, since
+ * port0 is fixed in size, but others can vary.   Make it a function to
+ * make the issue more obvious.
+*/
+static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port)
+{
+	 return port ? dd->ipath_p0_rcvegrcnt +
+		 (port-1) * dd->ipath_rcvegrcnt : 0;
+}
+
+static void ipath_7220_txe_recover(struct ipath_devdata *dd)
+{
+	++ipath_stats.sps_txeparity;
+
+	dev_info(&dd->pcidev->dev,
+		"Recovering from TXE PIO parity error\n");
+	ipath_disarm_senderrbufs(dd, 1);
+}
+
+
+/**
+ * ipath_7220_handle_hwerrors - display hardware errors.
+ * @dd: the infinipath device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use.  Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue.  We reuse the same message buffer as
+ * ipath_handle_errors() to avoid excessive stack usage.
+ */
+static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
+				       size_t msgl)
+{
+	ipath_err_t hwerrs;
+	u32 bits, ctrl;
+	int isfatal = 0;
+	char bitsmsg[64];
+	int log_idx;
+
+	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
+	if (!hwerrs) {
+		/*
+		 * better than printing cofusing messages
+		 * This seems to be related to clearing the crc error, or
+		 * the pll error during init.
+		 */
+		ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
+		goto bail;
+	} else if (hwerrs == ~0ULL) {
+		ipath_dev_err(dd, "Read of hardware error status failed "
+			      "(all bits set); ignoring\n");
+		goto bail;
+	}
+	ipath_stats.sps_hwerrs++;
+
+	/*
+	 * Always clear the error status register, except MEMBISTFAIL,
+	 * regardless of whether we continue or stop using the chip.
+	 * We want that set so we know it failed, even across driver reload.
+	 * We'll still ignore it in the hwerrmask.  We do this partly for
+	 * diagnostics, but also for support.
+	 */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+			 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
+
+	hwerrs &= dd->ipath_hwerrmask;
+
+	/* We log some errors to EEPROM, check if we have any of those. */
+	for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
+		if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
+			ipath_inc_eeprom_err(dd, log_idx, 1);
+	/*
+	 * Make sure we get this much out, unless told to be quiet,
+	 * or it's occurred within the last 5 seconds.
+	 */
+	if ((hwerrs & ~(dd->ipath_lasthwerror |
+			((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
+			  INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
+			 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
+	    (ipath_debug & __IPATH_VERBDBG))
+		dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
+			 "(cleared)\n", (unsigned long long) hwerrs);
+	dd->ipath_lasthwerror |= hwerrs;
+
+	if (hwerrs & ~dd->ipath_hwe_bitsextant)
+		ipath_dev_err(dd, "hwerror interrupt with unknown errors "
+			      "%llx set\n", (unsigned long long)
+			      (hwerrs & ~dd->ipath_hwe_bitsextant));
+
+	if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR)
+		ipath_sd7220_clr_ibpar(dd);
+
+	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
+	if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
+		/*
+		 * Parity errors in send memory are recoverable,
+		 * just cancel the send (if indicated in * sendbuffererror),
+		 * count the occurrence, unfreeze (if no other handled
+		 * hardware error bits are set), and continue.
+		 */
+		if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
+			       INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
+			      << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
+			ipath_7220_txe_recover(dd);
+			hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
+				     INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
+				    << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
+			if (!hwerrs) {
+				/* else leave in freeze mode */
+				ipath_write_kreg(dd,
+						 dd->ipath_kregs->kr_control,
+						 dd->ipath_control);
+				goto bail;
+			}
+		}
+		if (hwerrs) {
+			/*
+			 * If any set that we aren't ignoring only make the
+			 * complaint once, in case it's stuck or recurring,
+			 * and we get here multiple times
+			 * Force link down, so switch knows, and
+			 * LEDs are turned off.
+			 */
+			if (dd->ipath_flags & IPATH_INITTED) {
+				ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
+				ipath_setup_7220_setextled(dd,
+					INFINIPATH_IBCS_L_STATE_DOWN,
+					INFINIPATH_IBCS_LT_STATE_DISABLED);
+				ipath_dev_err(dd, "Fatal Hardware Error "
+					      "(freeze mode), no longer"
+					      " usable, SN %.16s\n",
+						  dd->ipath_serial);
+				isfatal = 1;
+			}
+			/*
+			 * Mark as having had an error for driver, and also
+			 * for /sys and status word mapped to user programs.
+			 * This marks unit as not usable, until reset.
+			 */
+			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+			*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+			dd->ipath_flags &= ~IPATH_INITTED;
+		} else {
+			ipath_dbg("Clearing freezemode on ignored hardware "
+				  "error\n");
+			ipath_clear_freeze(dd);
+		}
+	}
+
+	*msg = '\0';
+
+	if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
+		strlcat(msg, "[Memory BIST test failed, "
+			"InfiniPath hardware unusable]", msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+	}
+
+	ipath_format_hwerrors(hwerrs,
+			      ipath_7220_hwerror_msgs,
+			      ARRAY_SIZE(ipath_7220_hwerror_msgs),
+			      msg, msgl);
+
+	if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
+		      << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
+		bits = (u32) ((hwerrs >>
+			       INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
+			      INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
+		snprintf(bitsmsg, sizeof bitsmsg,
+			 "[PCIe Mem Parity Errs %x] ", bits);
+		strlcat(msg, bitsmsg, msgl);
+	}
+
+#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |	\
+			 INFINIPATH_HWE_COREPLL_RFSLIP)
+
+	if (hwerrs & _IPATH_PLL_FAIL) {
+		snprintf(bitsmsg, sizeof bitsmsg,
+			 "[PLL failed (%llx), InfiniPath hardware unusable]",
+			 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
+		strlcat(msg, bitsmsg, msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+	}
+
+	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
+		/*
+		 * If it occurs, it is left masked since the eternal
+		 * interface is unused.
+		 */
+		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+	}
+
+	ipath_dev_err(dd, "%s hardware error\n", msg);
+	/*
+	 * For /sys status file. if no trailing } is copied, we'll
+	 * know it was truncated.
+	 */
+	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
+		snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
+			 "{%s}", msg);
+bail:;
+}
+
+/**
+ * ipath_7220_boardname - fill in the board name
+ * @dd: the infinipath device
+ * @name: the output buffer
+ * @namelen: the size of the output buffer
+ *
+ * info is based on the board revision register
+ */
+static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
+	size_t namelen)
+{
+	char *n = NULL;
+	u8 boardrev = dd->ipath_boardrev;
+	int ret;
+
+	if (boardrev == 15) {
+		/*
+		 * Emulator sometimes comes up all-ones, rather than zero.
+		 */
+		boardrev = 0;
+		dd->ipath_boardrev = boardrev;
+	}
+	switch (boardrev) {
+	case 0:
+		n = "InfiniPath_7220_Emulation";
+		break;
+	case 1:
+		n = "InfiniPath_QLE7240";
+		break;
+	case 2:
+		n = "InfiniPath_QLE7280";
+		break;
+	case 3:
+		n = "InfiniPath_QLE7242";
+		break;
+	case 4:
+		n = "InfiniPath_QEM7240";
+		break;
+	case 5:
+		n = "InfiniPath_QMI7240";
+		break;
+	case 6:
+		n = "InfiniPath_QMI7264";
+		break;
+	case 7:
+		n = "InfiniPath_QMH7240";
+		break;
+	case 8:
+		n = "InfiniPath_QME7240";
+		break;
+	case 9:
+		n = "InfiniPath_QLE7250";
+		break;
+	case 10:
+		n = "InfiniPath_QLE7290";
+		break;
+	case 11:
+		n = "InfiniPath_QEM7250";
+		break;
+	case 12:
+		n = "InfiniPath_QLE-Bringup";
+		break;
+	default:
+		ipath_dev_err(dd,
+			      "Don't yet know about board with ID %u\n",
+			      boardrev);
+		snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
+			 boardrev);
+		break;
+	}
+	if (n)
+		snprintf(name, namelen, "%s", n);
+
+	if (dd->ipath_majrev != 5 || !dd->ipath_minrev ||
+		dd->ipath_minrev > 2) {
+		ipath_dev_err(dd, "Unsupported InfiniPath hardware "
+			      "revision %u.%u!\n",
+			      dd->ipath_majrev, dd->ipath_minrev);
+		ret = 1;
+	} else if (dd->ipath_minrev == 1) {
+		/* Rev1 chips are prototype. Complain, but allow use */
+		ipath_dev_err(dd, "Unsupported hardware "
+			      "revision %u.%u, Contact support@qlogic.com\n",
+			      dd->ipath_majrev, dd->ipath_minrev);
+		ret = 0;
+	} else
+		ret = 0;
+
+	/*
+	 * Set here not in ipath_init_*_funcs because we have to do
+	 * it after we can read chip registers.
+	 */
+	dd->ipath_ureg_align = 0x10000;  /* 64KB alignment */
+
+	return ret;
+}
+
+/**
+ * ipath_7220_init_hwerrors - enable hardware errors
+ * @dd: the infinipath device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void ipath_7220_init_hwerrors(struct ipath_devdata *dd)
+{
+	ipath_err_t val;
+	u64 extsval;
+
+	extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
+
+	if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST |
+			INFINIPATH_EXTS_MEMBIST_DISABLED)))
+		ipath_dev_err(dd, "MemBIST did not complete!\n");
+	if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED)
+		dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n");
+
+	val = ~0ULL;	/* barring bugs, all hwerrors become interrupts, */
+
+	if (!dd->ipath_boardrev)	/* no PLL for Emulator */
+		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+
+	if (dd->ipath_minrev == 1)
+		val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */
+
+	val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
+	dd->ipath_hwerrmask = val;
+
+	/*
+	 * special trigger "error" is for debugging purposes. It
+	 * works around a processor/chipset problem.  The error
+	 * interrupt allows us to count occurrences, but we don't
+	 * want to pay the overhead for normal use.  Emulation only
+	 */
+	if (!dd->ipath_boardrev)
+		dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER;
+}
+
+/*
+ * All detailed interaction with the SerDes has been moved to ipath_sd7220.c
+ *
+ * The portion of IBA7220-specific bringup_serdes() that actually deals with
+ * registers and memory within the SerDes itself is ipath_sd7220_init().
+ */
+
+/**
+ * ipath_7220_bringup_serdes - bring up the serdes
+ * @dd: the infinipath device
+ */
+static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
+{
+	int ret = 0;
+	u64 val, prev_val, guid;
+	int was_reset;		/* Note whether uC was reset */
+
+	ipath_dbg("Trying to bringup serdes\n");
+
+	if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
+	    INFINIPATH_HWE_SERDESPLLFAILED) {
+		ipath_dbg("At start, serdes PLL failed bit set "
+			  "in hwerrstatus, clearing and continuing\n");
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+				 INFINIPATH_HWE_SERDESPLLFAILED);
+	}
+
+	if (!dd->ipath_ibcddrctrl) {
+		/* not on re-init after reset */
+		dd->ipath_ibcddrctrl =
+			ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl);
+
+		if (dd->ipath_link_speed_enabled ==
+			(IPATH_IB_SDR | IPATH_IB_DDR))
+			dd->ipath_ibcddrctrl |=
+				IBA7220_IBC_SPEED_AUTONEG_MASK |
+				IBA7220_IBC_IBTA_1_2_MASK;
+		else
+			dd->ipath_ibcddrctrl |=
+				dd->ipath_link_speed_enabled == IPATH_IB_DDR
+				?  IBA7220_IBC_SPEED_DDR :
+				IBA7220_IBC_SPEED_SDR;
+		if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
+			IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X))
+			dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
+		else
+			dd->ipath_ibcddrctrl |=
+				dd->ipath_link_width_enabled == IB_WIDTH_4X
+				? IBA7220_IBC_WIDTH_4X_ONLY :
+				IBA7220_IBC_WIDTH_1X_ONLY;
+
+		/* always enable these on driver reload, not sticky */
+		dd->ipath_ibcddrctrl |=
+			IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
+		dd->ipath_ibcddrctrl |=
+			IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
+		/*
+		 * automatic lane reversal detection for receive
+		 * doesn't work correctly in rev 1, so disable it
+		 * on that rev, otherwise enable (disabling not
+		 * sticky across reload for >rev1)
+		 */
+		if (dd->ipath_minrev == 1)
+			dd->ipath_ibcddrctrl &=
+			~IBA7220_IBC_LANE_REV_SUPPORTED;
+		else
+			dd->ipath_ibcddrctrl |=
+				IBA7220_IBC_LANE_REV_SUPPORTED;
+	}
+
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
+			dd->ipath_ibcddrctrl);
+
+	ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull);
+
+	/* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+	/* remember if uC was in Reset or not, for dactrim */
+	was_reset = (val & 1);
+	ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n",
+		   was_reset ? "Asserted" : "Negated", (unsigned long long)
+		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+
+	if (dd->ipath_boardrev) {
+		/*
+		 * Hardware is not emulator, and may have been reset. Init it.
+		 * Below will release reset, but needs to know if chip was
+		 * originally in reset, to only trim DACs on first time
+		 * after chip reset or powercycle (not driver reload)
+		 */
+		ret = ipath_sd7220_init(dd, was_reset);
+	}
+
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+	prev_val = val;
+	val |= INFINIPATH_XGXS_FC_SAFE;
+	if (val != prev_val) {
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	}
+	if (val & INFINIPATH_XGXS_RESET)
+		val &= ~INFINIPATH_XGXS_RESET;
+	if (val != prev_val)
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+
+	ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
+		   (unsigned long long)
+		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
+		   prev_val);
+
+	guid = be64_to_cpu(dd->ipath_guid);
+
+	if (!guid) {
+		/* have to have something, so use likely unique tsc */
+		guid = get_cycles();
+		ipath_dbg("No GUID for heartbeat, faking %llx\n",
+			(unsigned long long)guid);
+	} else
+		ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", guid);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
+	return ret;
+}
+
+static void ipath_7220_config_jint(struct ipath_devdata *dd,
+				   u16 idle_ticks, u16 max_packets)
+{
+
+	/*
+	 * We can request a receive interrupt for 1 or more packets
+	 * from current offset.
+	 */
+	if (idle_ticks == 0 || max_packets == 0)
+		/* interrupt after one packet if no mitigation */
+		dd->ipath_rhdrhead_intr_off =
+			1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT;
+	else
+		/* Turn off RcvHdrHead interrupts if using mitigation */
+		dd->ipath_rhdrhead_intr_off = 0ULL;
+
+	/* refresh kernel RcvHdrHead registers... */
+	ipath_write_ureg(dd, ur_rcvhdrhead,
+			 dd->ipath_rhdrhead_intr_off |
+			 dd->ipath_pd[0]->port_head, 0);
+
+	dd->ipath_jint_max_packets = max_packets;
+	dd->ipath_jint_idle_ticks = idle_ticks;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload,
+			 ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) |
+			 idle_ticks);
+}
+
+/**
+ * ipath_7220_quiet_serdes - set serdes to txidle
+ * @dd: the infinipath device
+ * Called when driver is being unloaded
+ */
+static void ipath_7220_quiet_serdes(struct ipath_devdata *dd)
+{
+	u64 val;
+	dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
+	wake_up(&dd->ipath_autoneg_wait);
+	cancel_delayed_work(&dd->ipath_autoneg_work);
+	flush_scheduled_work();
+	ipath_shutdown_relock_poll(dd);
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+	val |= INFINIPATH_XGXS_RESET;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+}
+
+static int ipath_7220_intconfig(struct ipath_devdata *dd)
+{
+	ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks,
+			       dd->ipath_jint_max_packets);
+	return 0;
+}
+
+/**
+ * ipath_setup_7220_setextled - set the state of the two external LEDs
+ * @dd: the infinipath device
+ * @lst: the L state
+ * @ltst: the LT state
+ *
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note:  We try to match the Mellanox HCA LED behavior as best
+ * we can.  Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate.  That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst,
+				       u64 ltst)
+{
+	u64 extctl, ledblink = 0;
+	unsigned long flags = 0;
+
+	/* the diags use the LED to indicate diag info, so we leave
+	 * the external LED alone when the diags are running */
+	if (ipath_diag_inuse)
+		return;
+
+	/* Allow override of LED display for, e.g. Locating system in rack */
+	if (dd->ipath_led_override) {
+		ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
+			? INFINIPATH_IBCS_LT_STATE_LINKUP
+			: INFINIPATH_IBCS_LT_STATE_DISABLED;
+		lst = (dd->ipath_led_override & IPATH_LED_LOG)
+			? INFINIPATH_IBCS_L_STATE_ACTIVE
+			: INFINIPATH_IBCS_L_STATE_DOWN;
+	}
+
+	spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
+	extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
+				       INFINIPATH_EXTC_LED2PRIPORT_ON);
+	if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) {
+		extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
+		/*
+		 * counts are in chip clock (4ns) periods.
+		 * This is 1/16 sec (66.6ms) on,
+		 * 3/16 sec (187.5 ms) off, with packets rcvd
+		 */
+		ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT)
+			| ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT);
+	}
+	if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
+		extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
+	dd->ipath_extctrl = extctl;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
+	spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
+
+	if (ledblink) /* blink the LED on packet receive */
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt,
+			ledblink);
+}
+
+/*
+ * Similar to pci_intx(pdev, 1), except that we make sure
+ * msi is off...
+ */
+static void ipath_enable_intx(struct pci_dev *pdev)
+{
+	u16 cw, new;
+	int pos;
+
+	/* first, turn on INTx */
+	pci_read_config_word(pdev, PCI_COMMAND, &cw);
+	new = cw & ~PCI_COMMAND_INTX_DISABLE;
+	if (new != cw)
+		pci_write_config_word(pdev, PCI_COMMAND, new);
+
+	/* then turn off MSI */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+	if (pos) {
+		pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
+		new = cw & ~PCI_MSI_FLAGS_ENABLE;
+		if (new != cw)
+			pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
+	}
+}
+
+static int ipath_msi_enabled(struct pci_dev *pdev)
+{
+	int pos, ret = 0;
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+	if (pos) {
+		u16 cw;
+
+		pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
+		ret = !!(cw & PCI_MSI_FLAGS_ENABLE);
+	}
+	return ret;
+}
+
+/*
+ * disable msi interrupt if enabled, and clear the flag.
+ * flag is used primarily for the fallback to IntX, but
+ * is also used in reinit after reset as a flag.
+ */
+static void ipath_7220_nomsi(struct ipath_devdata *dd)
+{
+	dd->ipath_msi_lo = 0;
+#ifdef CONFIG_PCI_MSI
+	if (ipath_msi_enabled(dd->pcidev)) {
+		/*
+		 * free, but don't zero; later kernels require
+		 * it be freed before disable_msi, so the intx
+		 * setup has to request it again.
+		 */
+		 if (dd->ipath_irq)
+			free_irq(dd->ipath_irq, dd);
+		pci_disable_msi(dd->pcidev);
+	}
+#endif
+}
+
+/*
+ * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the infinipath device
+ *
+ * Nothing but msi interrupt cleanup for now.
+ *
+ * This is called during driver unload.
+ */
+static void ipath_setup_7220_cleanup(struct ipath_devdata *dd)
+{
+	ipath_7220_nomsi(dd);
+}
+
+
+static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev)
+{
+	u16 linkstat, minwidth, speed;
+	int pos;
+
+	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+	if (!pos) {
+		ipath_dev_err(dd, "Can't find PCI Express capability!\n");
+		goto bail;
+	}
+
+	pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
+			     &linkstat);
+	/*
+	 * speed is bits 0-4, linkwidth is bits 4-8
+	 * no defines for them in headers
+	 */
+	speed = linkstat & 0xf;
+	linkstat >>= 4;
+	linkstat &= 0x1f;
+	dd->ipath_lbus_width = linkstat;
+	switch (boardrev) {
+	case 0:
+	case 2:
+	case 10:
+	case 12:
+		minwidth = 16; /* x16 capable boards */
+		break;
+	default:
+		minwidth = 8; /* x8 capable boards */
+		break;
+	}
+
+	switch (speed) {
+	case 1:
+		dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
+		break;
+	case 2:
+		dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
+		break;
+	default: /* not defined, assume gen1 */
+		dd->ipath_lbus_speed = 2500;
+		break;
+	}
+
+	if (linkstat < minwidth)
+		ipath_dev_err(dd,
+			"PCIe width %u (x%u HCA), performance "
+			"reduced\n", linkstat, minwidth);
+	else
+		ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n",
+			dd->ipath_lbus_speed, linkstat, minwidth);
+
+	if (speed != 1)
+		ipath_dev_err(dd,
+			"PCIe linkspeed %u is incorrect; "
+			"should be 1 (2500)!\n", speed);
+
+bail:
+	/* fill in string, even on errors */
+	snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
+		"PCIe,%uMHz,x%u\n",
+		dd->ipath_lbus_speed,
+		dd->ipath_lbus_width);
+	return;
+}
+
+
+/**
+ * ipath_setup_7220_config - setup PCIe config related stuff
+ * @dd: the infinipath device
+ * @pdev: the PCI device
+ *
+ * The pci_enable_msi() call will fail on systems with MSI quirks
+ * such as those with AMD8131, even if the device of interest is not
+ * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
+ * late in 2.6.16).
+ * All that can be done is to edit the kernel source to remove the quirk
+ * check until that is fixed.
+ * We do not need to call enable_msi() for our HyperTransport chip,
+ * even though it uses MSI, and we want to avoid the quirk warning, so
+ * So we call enable_msi only for PCIe.  If we do end up needing
+ * pci_enable_msi at some point in the future for HT, we'll move the
+ * call back into the main init_one code.
+ * We save the msi lo and hi values, so we can restore them after
+ * chip reset (the kernel PCI infrastructure doesn't yet handle that
+ * correctly).
+ */
+static int ipath_setup_7220_config(struct ipath_devdata *dd,
+				   struct pci_dev *pdev)
+{
+	int pos, ret = -1;
+	u32 boardrev;
+
+	dd->ipath_msi_lo = 0;	/* used as a flag during reset processing */
+#ifdef CONFIG_PCI_MSI
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+	if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto"))
+		ret = pci_enable_msi(pdev);
+	if (ret) {
+		if (!strcmp(int_type, "force_msi")) {
+			ipath_dev_err(dd, "pci_enable_msi failed: %d, "
+				      "force_msi is on, so not continuing.\n",
+				      ret);
+			return ret;
+		}
+
+		ipath_enable_intx(pdev);
+		if (!strcmp(int_type, "auto"))
+			ipath_dev_err(dd, "pci_enable_msi failed: %d, "
+				      "falling back to INTx\n", ret);
+	} else if (pos) {
+		u16 control;
+		pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
+				      &dd->ipath_msi_lo);
+		pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
+				      &dd->ipath_msi_hi);
+		pci_read_config_word(pdev, pos + PCI_MSI_FLAGS,
+				     &control);
+		/* now save the data (vector) info */
+		pci_read_config_word(pdev,
+				     pos + ((control & PCI_MSI_FLAGS_64BIT)
+					    ? PCI_MSI_DATA_64 :
+					    PCI_MSI_DATA_32),
+				     &dd->ipath_msi_data);
+	} else
+		ipath_dev_err(dd, "Can't find MSI capability, "
+			      "can't save MSI settings for reset\n");
+#else
+	ipath_dbg("PCI_MSI not configured, using IntX interrupts\n");
+	ipath_enable_intx(pdev);
+#endif
+
+	dd->ipath_irq = pdev->irq;
+
+	/*
+	 * We save the cachelinesize also, although it doesn't
+	 * really matter.
+	 */
+	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+			     &dd->ipath_pci_cacheline);
+
+	/*
+	 * this function called early, ipath_boardrev not set yet.  Can't
+	 * use ipath_read_kreg64() yet, too early in init, so use readq()
+	 */
+	boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision])
+		 >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK;
+
+	ipath_7220_pcie_params(dd, boardrev);
+
+	dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA |
+		IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE;
+	dd->ipath_pioupd_thresh = 4U; /* set default update threshold */
+	return 0;
+}
+
+static void ipath_init_7220_variables(struct ipath_devdata *dd)
+{
+	/*
+	 * setup the register offsets, since they are different for each
+	 * chip
+	 */
+	dd->ipath_kregs = &ipath_7220_kregs;
+	dd->ipath_cregs = &ipath_7220_cregs;
+
+	/*
+	 * bits for selecting i2c direction and values,
+	 * used for I2C serial flash
+	 */
+	dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
+	dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
+	dd->ipath_gpio_sda = IPATH_GPIO_SDA;
+	dd->ipath_gpio_scl = IPATH_GPIO_SCL;
+
+	/*
+	 * Fill in data for field-values that change in IBA7220.
+	 * We dynamically specify only the mask for LINKTRAININGSTATE
+	 * and only the shift for LINKSTATE, as they are the only ones
+	 * that change.  Also precalculate the 3 link states of interest
+	 * and the combined mask.
+	 */
+	dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT;
+	dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK;
+	dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
+		dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
+	dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
+		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
+		(INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
+	dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
+		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
+		(INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
+	dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
+		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
+		(INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
+
+	/*
+	 * Fill in data for ibcc field-values that change in IBA7220.
+	 * We dynamically specify only the mask for LINKINITCMD
+	 * and only the shift for LINKCMD and MAXPKTLEN, as they are
+	 * the only ones that change.
+	 */
+	dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK;
+	dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT;
+	dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT;
+
+	/* Fill in shifts for RcvCtrl. */
+	dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
+	dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT;
+	dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT;
+	dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT;
+
+	/* variables for sanity checking interrupt and errors */
+	dd->ipath_hwe_bitsextant =
+		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+		 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
+		(INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
+		 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
+		(INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
+		 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
+		INFINIPATH_HWE_PCIE1PLLFAILED |
+		INFINIPATH_HWE_PCIE0PLLFAILED |
+		INFINIPATH_HWE_PCIEPOISONEDTLP |
+		INFINIPATH_HWE_PCIECPLTIMEOUT |
+		INFINIPATH_HWE_PCIEBUSPARITYXTLH |
+		INFINIPATH_HWE_PCIEBUSPARITYXADM |
+		INFINIPATH_HWE_PCIEBUSPARITYRADM |
+		INFINIPATH_HWE_MEMBISTFAILED |
+		INFINIPATH_HWE_COREPLL_FBSLIP |
+		INFINIPATH_HWE_COREPLL_RFSLIP |
+		INFINIPATH_HWE_SERDESPLLFAILED |
+		INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
+		INFINIPATH_HWE_IBCBUSFRSPCPARITYERR |
+		INFINIPATH_HWE_PCIECPLDATAQUEUEERR |
+		INFINIPATH_HWE_PCIECPLHDRQUEUEERR |
+		INFINIPATH_HWE_SDMAMEMREADERR |
+		INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED |
+		INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT |
+		INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT |
+		INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT |
+		INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT |
+		INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR |
+		INFINIPATH_HWE_IB_UC_MEMORYPARITYERR |
+		INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR |
+		INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR;
+	dd->ipath_i_bitsextant =
+		INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED |
+		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
+		(INFINIPATH_I_RCVAVAIL_MASK <<
+		 INFINIPATH_I_RCVAVAIL_SHIFT) |
+		INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
+		INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO |
+		INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE;
+	dd->ipath_e_bitsextant =
+		INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
+		INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
+		INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
+		INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
+		INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
+		INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
+		INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+		INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
+		INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
+		INFINIPATH_E_SENDSPECIALTRIGGER |
+		INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN |
+		INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN |
+		INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT |
+		INFINIPATH_E_SDROPPEDDATAPKT |
+		INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
+		INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE |
+		INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND |
+		INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE |
+		INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG |
+		INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW |
+		INFINIPATH_E_SDMAUNEXPDATA |
+		INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR |
+		INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE |
+		INFINIPATH_E_SDMADESCADDRMISALIGN |
+		INFINIPATH_E_INVALIDEEPCMD;
+
+	dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
+	dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
+	dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
+	dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
+	dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED
+		| IPATH_HAS_LINK_LATENCY;
+
+	/*
+	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+	 * 2 is Some Misc, 3 is reserved for future.
+	 */
+	dd->ipath_eep_st_masks[0].hwerrs_to_log =
+		INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
+		INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
+
+	dd->ipath_eep_st_masks[1].hwerrs_to_log =
+		INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+		INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
+
+	dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
+
+	ipath_linkrecovery = 0;
+
+	init_waitqueue_head(&dd->ipath_autoneg_wait);
+	INIT_DELAYED_WORK(&dd->ipath_autoneg_work,  autoneg_work);
+
+	dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+	dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR;
+
+	dd->ipath_link_width_enabled = dd->ipath_link_width_supported;
+	dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
+	/*
+	 * set the initial values to reasonable default, will be set
+	 * for real when link is up.
+	 */
+	dd->ipath_link_width_active = IB_WIDTH_4X;
+	dd->ipath_link_speed_active = IPATH_IB_SDR;
+	dd->delay_mult = rate_to_delay[0][1];
+}
+
+
+/*
+ * Setup the MSI stuff again after a reset.  I'd like to just call
+ * pci_enable_msi() and request_irq() again, but when I do that,
+ * the MSI enable bit doesn't get set in the command word, and
+ * we switch to to a different interrupt vector, which is confusing,
+ * so I instead just do it all inline.  Perhaps somehow can tie this
+ * into the PCIe hotplug support at some point
+ * Note, because I'm doing it all here, I don't call pci_disable_msi()
+ * or free_irq() at the start of ipath_setup_7220_reset().
+ */
+static int ipath_reinit_msi(struct ipath_devdata *dd)
+{
+	int ret = 0;
+#ifdef CONFIG_PCI_MSI
+	int pos;
+	u16 control;
+	if (!dd->ipath_msi_lo) /* Using intX, or init problem */
+		goto bail;
+
+	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+	if (!pos) {
+		ipath_dev_err(dd, "Can't find MSI capability, "
+			      "can't restore MSI settings\n");
+		goto bail;
+	}
+	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
+		   dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
+	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
+			       dd->ipath_msi_lo);
+	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
+		   dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
+	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
+			       dd->ipath_msi_hi);
+	pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
+	if (!(control & PCI_MSI_FLAGS_ENABLE)) {
+		ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
+			   "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
+			   control, control | PCI_MSI_FLAGS_ENABLE);
+		control |= PCI_MSI_FLAGS_ENABLE;
+		pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
+				      control);
+	}
+	/* now rewrite the data (vector) info */
+	pci_write_config_word(dd->pcidev, pos +
+			      ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
+			      dd->ipath_msi_data);
+	ret = 1;
+bail:
+#endif
+	if (!ret) {
+		ipath_dbg("Using IntX, MSI disabled or not configured\n");
+		ipath_enable_intx(dd->pcidev);
+		ret = 1;
+	}
+	/*
+	 * We restore the cachelinesize also, although it doesn't really
+	 * matter.
+	 */
+	pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
+			      dd->ipath_pci_cacheline);
+	/* and now set the pci master bit again */
+	pci_set_master(dd->pcidev);
+
+	return ret;
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.  If we need interrupt context, we can split
+ * it into two routines.
+ */
+static int ipath_setup_7220_reset(struct ipath_devdata *dd)
+{
+	u64 val;
+	int i;
+	int ret;
+	u16 cmdval;
+
+	pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
+
+	/* Use dev_err so it shows up in logs, etc. */
+	ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
+
+	/* keep chip from being accessed in a few places */
+	dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT);
+	val = dd->ipath_control | INFINIPATH_C_RESET;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
+	mb();
+
+	for (i = 1; i <= 5; i++) {
+		int r;
+
+		/*
+		 * Allow MBIST, etc. to complete; longer on each retry.
+		 * We sometimes get machine checks from bus timeout if no
+		 * response, so for now, make it *really* long.
+		 */
+		msleep(1000 + (1 + i) * 2000);
+		r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+					   dd->ipath_pcibar0);
+		if (r)
+			ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
+		r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+					   dd->ipath_pcibar1);
+		if (r)
+			ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
+		/* now re-enable memory access */
+		pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
+		r = pci_enable_device(dd->pcidev);
+		if (r)
+			ipath_dev_err(dd, "pci_enable_device failed after "
+				      "reset: %d\n", r);
+		/*
+		 * whether it fully enabled or not, mark as present,
+		 * again (but not INITTED)
+		 */
+		dd->ipath_flags |= IPATH_PRESENT;
+		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
+		if (val == dd->ipath_revision) {
+			ipath_cdbg(VERBOSE, "Got matching revision "
+				   "register %llx on try %d\n",
+				   (unsigned long long) val, i);
+			ret = ipath_reinit_msi(dd);
+			goto bail;
+		}
+		/* Probably getting -1 back */
+		ipath_dbg("Didn't get expected revision register, "
+			  "got %llx, try %d\n", (unsigned long long) val,
+			  i + 1);
+	}
+	ret = 0; /* failed */
+
+bail:
+	if (ret)
+		ipath_7220_pcie_params(dd, dd->ipath_boardrev);
+
+	return ret;
+}
+
+/**
+ * ipath_7220_put_tid - write a TID to the chip
+ * @dd: the infinipath device
+ * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for selection of the
+ * appropriate "flavor". The static calls in cleanup just use the
+ * revision-agnostic form, as they are not performance critical.
+ */
+static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
+			     u32 type, unsigned long pa)
+{
+	if (pa != dd->ipath_tidinvalid) {
+		u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
+
+		/* paranoia checks */
+		if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
+			dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
+				 "not 2KB aligned!\n", pa);
+			return;
+		}
+		if (pa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
+			ipath_dev_err(dd,
+				      "BUG: Physical page address 0x%lx "
+				      "larger than supported\n", pa);
+			return;
+		}
+
+		if (type == RCVHQ_RCV_TYPE_EAGER)
+			chippa |= dd->ipath_tidtemplate;
+		else /* for now, always full 4KB page */
+			chippa |= IBA7220_TID_SZ_4K;
+		writeq(chippa, tidptr);
+	} else
+		writeq(pa, tidptr);
+	mmiowb();
+}
+
+/**
+ * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager
+ * @dd: the infinipath device
+ * @port: the port
+ *
+ * clear all TID entries for a port, expected and eager.
+ * Used from ipath_close().  On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port)
+{
+	u64 __iomem *tidbase;
+	unsigned long tidinv;
+	int i;
+
+	if (!dd->ipath_kregbase)
+		return;
+
+	ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
+
+	tidinv = dd->ipath_tidinvalid;
+	tidbase = (u64 __iomem *)
+		((char __iomem *)(dd->ipath_kregbase) +
+		 dd->ipath_rcvtidbase +
+		 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
+
+	for (i = 0; i < dd->ipath_rcvtidcnt; i++)
+		ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+				   tidinv);
+
+	tidbase = (u64 __iomem *)
+		((char __iomem *)(dd->ipath_kregbase) +
+		 dd->ipath_rcvegrbase + port_egrtid_idx(dd, port)
+		 * sizeof(*tidbase));
+
+	for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--)
+		ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER,
+			tidinv);
+}
+
+/**
+ * ipath_7220_tidtemplate - setup constants for TID updates
+ * @dd: the infinipath device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void ipath_7220_tidtemplate(struct ipath_devdata *dd)
+{
+	/* For now, we always allocate 4KB buffers (at init) so we can
+	 * receive max size packets.  We may want a module parameter to
+	 * specify 2KB or 4KB and/or make be per port instead of per device
+	 * for those who want to reduce memory footprint.  Note that the
+	 * ipath_rcvhdrentsize size must be large enough to hold the largest
+	 * IB header (currently 96 bytes) that we expect to handle (plus of
+	 * course the 2 dwords of RHF).
+	 */
+	if (dd->ipath_rcvegrbufsize == 2048)
+		dd->ipath_tidtemplate = IBA7220_TID_SZ_2K;
+	else if (dd->ipath_rcvegrbufsize == 4096)
+		dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
+	else {
+		dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
+			 "%u, using %u\n", dd->ipath_rcvegrbufsize,
+			 4096);
+		dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
+	}
+	dd->ipath_tidinvalid = 0;
+}
+
+static int ipath_7220_early_init(struct ipath_devdata *dd)
+{
+	u32 i, s;
+
+	if (strcmp(int_type, "auto") &&
+	    strcmp(int_type, "force_msi") &&
+	    strcmp(int_type, "force_intx")) {
+		ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting "
+			      "auto, force_msi or force_intx\n", int_type);
+		return -EINVAL;
+	}
+
+	/*
+	 * Control[4] has been added to change the arbitration within
+	 * the SDMA engine between favoring data fetches over descriptor
+	 * fetches.  ipath_sdma_fetch_arb==0 gives data fetches priority.
+	 */
+	if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1))
+		dd->ipath_control |= 1<<4;
+
+	dd->ipath_flags |= IPATH_4BYTE_TID;
+
+	/*
+	 * For openfabrics, we need to be able to handle an IB header of
+	 * 24 dwords.  HT chip has arbitrary sized receive buffers, so we
+	 * made them the same size as the PIO buffers.  This chip does not
+	 * handle arbitrary size buffers, so we need the header large enough
+	 * to handle largest IB header, but still have room for a 2KB MTU
+	 * standard IB packet.
+	 */
+	dd->ipath_rcvhdrentsize = 24;
+	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
+	dd->ipath_rhf_offset =
+		dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32);
+
+	dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
+	/*
+	 * the min() check here is currently a nop, but it may not always
+	 * be, depending on just how we do ipath_rcvegrbufsize
+	 */
+	dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
+				 dd->ipath_piosize2k,
+				 dd->ipath_rcvegrbufsize +
+				 (dd->ipath_rcvhdrentsize << 2));
+	dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
+
+	ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS,
+			       INFINIPATH_JINT_DEFAULT_MAX_PACKETS);
+
+	if (dd->ipath_boardrev) /* no eeprom on emulator */
+		ipath_get_eeprom_info(dd);
+
+	/* start of code to check and print procmon */
+	s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
+	s &= ~(1U<<31); /* clear done bit */
+	s |= 1U<<14; /* clear counter (write 1 to clear) */
+	ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
+	/* make sure clear_counter low long enough before start */
+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+
+	s &= ~(1U<<14); /* allow counter to count (before starting) */
+	ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
+
+	s |= 1U<<15; /* start the counter */
+	s &= ~(1U<<31); /* clear done bit */
+	s &= ~0x7ffU; /* clear frequency bits */
+	s |= 0xe29; /* set frequency bits, in case cleared */
+	ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
+
+	s = 0;
+	for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) {
+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
+	}
+	if (!(s&(1U<<31)))
+		ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s);
+	else
+		ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff);
+
+	return 0;
+}
+
+/**
+ * ipath_init_7220_get_base_info - set chip-specific flags for user code
+ * @pd: the infinipath port
+ * @kbase: ipath_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase)
+{
+	struct ipath_base_info *kinfo = kbase;
+
+	kinfo->spi_runtime_flags |=
+		IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL |
+		IPATH_RUNTIME_SDMA;
+
+	return 0;
+}
+
+static void ipath_7220_free_irq(struct ipath_devdata *dd)
+{
+	free_irq(dd->ipath_irq, dd);
+	dd->ipath_irq = 0;
+}
+
+static struct ipath_message_header *
+ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
+{
+	u32 offset = ipath_hdrget_offset(rhf_addr);
+
+	return (struct ipath_message_header *)
+		(rhf_addr - dd->ipath_rhf_offset + offset);
+}
+
+static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
+{
+	u32 nchipports;
+
+	nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
+	if (!cfgports) {
+		int ncpus = num_online_cpus();
+
+		if (ncpus <= 4)
+			dd->ipath_portcnt = 5;
+		else if (ncpus <= 8)
+			dd->ipath_portcnt = 9;
+		if (dd->ipath_portcnt)
+			ipath_dbg("Auto-configured for %u ports, %d cpus "
+				"online\n", dd->ipath_portcnt, ncpus);
+	} else if (cfgports <= nchipports)
+		dd->ipath_portcnt = cfgports;
+	if (!dd->ipath_portcnt) /* none of the above, set to max */
+		dd->ipath_portcnt = nchipports;
+	/*
+	 * chip can be configured for 5, 9, or 17 ports, and choice
+	 * affects number of eager TIDs per port (1K, 2K, 4K).
+	 */
+	if (dd->ipath_portcnt > 9)
+		dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT;
+	else if (dd->ipath_portcnt > 5)
+		dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT;
+	/* else configure for default 5 receive ports */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+			 dd->ipath_rcvctrl);
+	dd->ipath_p0_rcvegrcnt = 2048; /* always */
+	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
+		dd->ipath_pioreserved = 1; /* reserve a buffer */
+}
+
+
+static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which)
+{
+	int lsb, ret = 0;
+	u64 maskr; /* right-justified mask */
+
+	switch (which) {
+	case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+		lsb = IBA7220_IBC_HRTBT_SHIFT;
+		maskr = IBA7220_IBC_HRTBT_MASK;
+		break;
+
+	case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */
+		ret = dd->ipath_link_width_enabled;
+		goto done;
+
+	case IPATH_IB_CFG_LWID: /* Get currently active Link-width */
+		ret = dd->ipath_link_width_active;
+		goto done;
+
+	case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
+		ret = dd->ipath_link_speed_enabled;
+		goto done;
+
+	case IPATH_IB_CFG_SPD: /* Get current Link spd */
+		ret = dd->ipath_link_speed_active;
+		goto done;
+
+	case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
+		lsb = IBA7220_IBC_RXPOL_SHIFT;
+		maskr = IBA7220_IBC_RXPOL_MASK;
+		break;
+
+	case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
+		lsb = IBA7220_IBC_LREV_SHIFT;
+		maskr = IBA7220_IBC_LREV_MASK;
+		break;
+
+	case IPATH_IB_CFG_LINKLATENCY:
+		ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus)
+			& IBA7220_DDRSTAT_LINKLAT_MASK;
+		goto done;
+
+	default:
+		ret = -ENOTSUPP;
+		goto done;
+	}
+	ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr);
+done:
+	return ret;
+}
+
+static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
+{
+	int lsb, ret = 0, setforce = 0;
+	u64 maskr; /* right-justified mask */
+
+	switch (which) {
+	case IPATH_IB_CFG_LIDLMC:
+		/*
+		 * Set LID and LMC. Combined to avoid possible hazard
+		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
+		 */
+		lsb = IBA7220_IBC_DLIDLMC_SHIFT;
+		maskr = IBA7220_IBC_DLIDLMC_MASK;
+		break;
+
+	case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
+		if (val & IPATH_IB_HRTBT_ON &&
+			(dd->ipath_flags & IPATH_NO_HRTBT))
+			goto bail;
+		lsb = IBA7220_IBC_HRTBT_SHIFT;
+		maskr = IBA7220_IBC_HRTBT_MASK;
+		break;
+
+	case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */
+		/*
+		 * As with speed, only write the actual register if
+		 * the link is currently down, otherwise takes effect
+		 * on next link change.
+		 */
+		dd->ipath_link_width_enabled = val;
+		if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
+			IPATH_LINKDOWN)
+			goto bail;
+		/*
+		 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
+		 * will get called because we want update
+		 * link_width_active, and the change may not take
+		 * effect for some time (if we are in POLL), so this
+		 * flag will force the updown routine to be called
+		 * on the next ibstatuschange down interrupt, even
+		 * if it's not an down->up transition.
+		 */
+		val--; /* convert from IB to chip */
+		maskr = IBA7220_IBC_WIDTH_MASK;
+		lsb = IBA7220_IBC_WIDTH_SHIFT;
+		setforce = 1;
+		dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
+		break;
+
+	case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */
+		/*
+		 * If we turn off IB1.2, need to preset SerDes defaults,
+		 * but not right now. Set a flag for the next time
+		 * we command the link down.  As with width, only write the
+		 * actual register if the link is currently down, otherwise
+		 * takes effect on next link change.  Since setting is being
+		 * explictly requested (via MAD or sysfs), clear autoneg
+		 * failure status if speed autoneg is enabled.
+		 */
+		dd->ipath_link_speed_enabled = val;
+		if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK &&
+		    !(val & (val - 1)))
+			dd->ipath_presets_needed = 1;
+		if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
+			IPATH_LINKDOWN)
+			goto bail;
+		/*
+		 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
+		 * will get called because we want update
+		 * link_speed_active, and the change may not take
+		 * effect for some time (if we are in POLL), so this
+		 * flag will force the updown routine to be called
+		 * on the next ibstatuschange down interrupt, even
+		 * if it's not an down->up transition.  When setting
+		 * speed autoneg, clear AUTONEG_FAILED.
+		 */
+		if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) {
+			val = IBA7220_IBC_SPEED_AUTONEG_MASK |
+				IBA7220_IBC_IBTA_1_2_MASK;
+			dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
+		} else
+			val = val == IPATH_IB_DDR ?  IBA7220_IBC_SPEED_DDR
+				: IBA7220_IBC_SPEED_SDR;
+		maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
+			IBA7220_IBC_IBTA_1_2_MASK;
+		lsb = 0; /* speed bits are low bits */
+		setforce = 1;
+		break;
+
+	case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
+		lsb = IBA7220_IBC_RXPOL_SHIFT;
+		maskr = IBA7220_IBC_RXPOL_MASK;
+		break;
+
+	case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
+		lsb = IBA7220_IBC_LREV_SHIFT;
+		maskr = IBA7220_IBC_LREV_MASK;
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		goto bail;
+	}
+	dd->ipath_ibcddrctrl &= ~(maskr << lsb);
+	dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
+			 dd->ipath_ibcddrctrl);
+	if (setforce)
+		dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
+bail:
+	return ret;
+}
+
+static void ipath_7220_read_counters(struct ipath_devdata *dd,
+				     struct infinipath_counters *cntrs)
+{
+	u64 *counters = (u64 *) cntrs;
+	int i;
+
+	for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++)
+		counters[i] = ipath_snap_cntr(dd, i);
+}
+
+/* if we are using MSI, try to fallback to IntX */
+static int ipath_7220_intr_fallback(struct ipath_devdata *dd)
+{
+	if (dd->ipath_msi_lo) {
+		dev_info(&dd->pcidev->dev, "MSI interrupt not detected,"
+			" trying IntX interrupts\n");
+		ipath_7220_nomsi(dd);
+		ipath_enable_intx(dd->pcidev);
+		/*
+		 * some newer kernels require free_irq before disable_msi,
+		 * and irq can be changed during disable and intx enable
+		 * and we need to therefore use the pcidev->irq value,
+		 * not our saved MSI value.
+		 */
+		dd->ipath_irq = dd->pcidev->irq;
+		if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
+			IPATH_DRV_NAME, dd))
+			ipath_dev_err(dd,
+				"Could not re-request_irq for IntX\n");
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * reset the XGXS (between serdes and IBC).  Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining.  To do this right, we reset IBC
+ * as well.
+ */
+static void ipath_7220_xgxs_reset(struct ipath_devdata *dd)
+{
+	u64 val, prev_val;
+
+	prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+	val = prev_val | INFINIPATH_XGXS_RESET;
+	prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+			 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+			 dd->ipath_control);
+}
+
+
+/* Still needs cleanup, too much hardwired stuff */
+static void autoneg_send(struct ipath_devdata *dd,
+	u32 *hdr, u32 dcnt, u32 *data)
+{
+	int i;
+	u64 cnt;
+	u32 __iomem *piobuf;
+	u32 pnum;
+
+	i = 0;
+	cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
+	while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) {
+		if (i++ > 15) {
+			ipath_dbg("Couldn't get pio buffer for send\n");
+			return;
+		}
+		udelay(2);
+	}
+	if (dd->ipath_flags&IPATH_HAS_PBC_CNT)
+		cnt |= 0x80000000UL<<32; /* mark as VL15 */
+	writeq(cnt, piobuf);
+	ipath_flush_wc();
+	__iowrite32_copy(piobuf + 2, hdr, 7);
+	__iowrite32_copy(piobuf + 9, data, dcnt);
+	ipath_flush_wc();
+}
+
+/*
+ * _start packet gets sent twice at start, _done gets sent twice at end
+ */
+static void ipath_autoneg_send(struct ipath_devdata *dd, int which)
+{
+	static u32 swapped;
+	u32 dw, i, hcnt, dcnt, *data;
+	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
+	static u32 madpayload_start[0x40] = {
+		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
+		};
+	static u32 madpayload_done[0x40] = {
+		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x40000001, 0x1388, 0x15e, /* rest 0's */
+		};
+	dcnt = sizeof(madpayload_start)/sizeof(madpayload_start[0]);
+	hcnt = sizeof(hdr)/sizeof(hdr[0]);
+	if (!swapped) {
+		/* for maintainability, do it at runtime */
+		for (i = 0; i < hcnt; i++) {
+			dw = (__force u32) cpu_to_be32(hdr[i]);
+			hdr[i] = dw;
+		}
+		for (i = 0; i < dcnt; i++) {
+			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
+			madpayload_start[i] = dw;
+			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
+			madpayload_done[i] = dw;
+		}
+		swapped = 1;
+	}
+
+	data = which ? madpayload_done : madpayload_start;
+	ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start");
+
+	autoneg_send(dd, hdr, dcnt, data);
+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	udelay(2);
+	autoneg_send(dd, hdr, dcnt, data);
+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	udelay(2);
+}
+
+
+
+/*
+ * Do the absolute minimum to cause an IB speed change, and make it
+ * ready, but don't actually trigger the change.   The caller will
+ * do that when ready (if link is in Polling training state, it will
+ * happen immediately, otherwise when link next goes down)
+ *
+ * This routine should only be used as part of the DDR autonegotation
+ * code for devices that are not compliant with IB 1.2 (or code that
+ * fixes things up for same).
+ *
+ * When link has gone down, and autoneg enabled, or autoneg has
+ * failed and we give up until next time we set both speeds, and
+ * then we want IBTA enabled as well as "use max enabled speed.
+ */
+static void set_speed_fast(struct ipath_devdata *dd, u32 speed)
+{
+	dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
+		IBA7220_IBC_IBTA_1_2_MASK |
+		(IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT));
+
+	if (speed == (IPATH_IB_SDR | IPATH_IB_DDR))
+		dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
+			IBA7220_IBC_IBTA_1_2_MASK;
+	else
+		dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ?
+			IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+
+	/*
+	 * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto
+	 * to chip-centric       0 = 1x, 1 = 4x, 2 = auto
+	 */
+	dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) <<
+		IBA7220_IBC_WIDTH_SHIFT;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
+			dd->ipath_ibcddrctrl);
+	ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed);
+}
+
+
+/*
+ * this routine is only used when we are not talking to another
+ * IB 1.2-compliant device that we think can do DDR.
+ * (This includes all existing switch chips as of Oct 2007.)
+ * 1.2-compliant devices go directly to DDR prior to reaching INIT
+ */
+static void try_auto_neg(struct ipath_devdata *dd)
+{
+	/*
+	 * required for older non-IB1.2 DDR switches.  Newer
+	 * non-IB-compliant switches don't need it, but so far,
+	 * aren't bothered by it either.  "Magic constant"
+	 */
+	ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl),
+		0x3b9dc07);
+	dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG;
+	ipath_autoneg_send(dd, 0);
+	set_speed_fast(dd, IPATH_IB_DDR);
+	ipath_toggle_rclkrls(dd);
+	/* 2 msec is minimum length of a poll cycle */
+	schedule_delayed_work(&dd->ipath_autoneg_work,
+		msecs_to_jiffies(2));
+}
+
+
+static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
+{
+	int ret = 0;
+	u32 ltstate = ipath_ib_linkstate(dd, ibcs);
+
+	dd->ipath_link_width_active =
+		((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ?
+		    IB_WIDTH_4X : IB_WIDTH_1X;
+	dd->ipath_link_speed_active =
+		((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ?
+		    IPATH_IB_DDR : IPATH_IB_SDR;
+
+	if (!ibup) {
+		/*
+		 * when link goes down we don't want aeq running, so it
+		 * won't't interfere with IBC training, etc., and we need
+		 * to go back to the static SerDes preset values
+		 */
+		if (dd->ipath_x1_fix_tries &&
+			 ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET &&
+			ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP)
+			dd->ipath_x1_fix_tries = 0;
+		if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
+			IPATH_IB_AUTONEG_INPROG)))
+			set_speed_fast(dd, dd->ipath_link_speed_enabled);
+		if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
+			ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n");
+			ipath_sd7220_presets(dd);
+		}
+		/* this might better in ipath_sd7220_presets() */
+		ipath_set_relock_poll(dd, ibup);
+	} else {
+		if (ipath_compat_ddr_negotiate &&
+		    !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
+			IPATH_IB_AUTONEG_INPROG)) &&
+			dd->ipath_link_speed_active == IPATH_IB_SDR &&
+			(dd->ipath_link_speed_enabled &
+			    (IPATH_IB_DDR | IPATH_IB_SDR)) ==
+			    (IPATH_IB_DDR | IPATH_IB_SDR) &&
+			dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) {
+			/* we are SDR, and DDR auto-negotiation enabled */
+			++dd->ipath_autoneg_tries;
+			ipath_dbg("DDR negotiation try, %u/%u\n",
+				dd->ipath_autoneg_tries,
+				IPATH_AUTONEG_TRIES);
+			try_auto_neg(dd);
+			ret = 1; /* no other IB status change processing */
+		} else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
+			&& dd->ipath_link_speed_active == IPATH_IB_SDR) {
+			ipath_autoneg_send(dd, 1);
+			set_speed_fast(dd, IPATH_IB_DDR);
+			udelay(2);
+			ipath_toggle_rclkrls(dd);
+			ret = 1; /* no other IB status change processing */
+		} else {
+			if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
+				(dd->ipath_link_speed_active & IPATH_IB_DDR)) {
+				ipath_dbg("Got to INIT with DDR autoneg\n");
+				dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG
+					| IPATH_IB_AUTONEG_FAILED);
+				dd->ipath_autoneg_tries = 0;
+				/* re-enable SDR, for next link down */
+				set_speed_fast(dd,
+					dd->ipath_link_speed_enabled);
+				wake_up(&dd->ipath_autoneg_wait);
+			} else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) {
+				/*
+				 * clear autoneg failure flag, and do setup
+				 * so we'll try next time link goes down and
+				 * back to INIT (possibly connected to different
+				 * device).
+				 */
+				ipath_dbg("INIT %sDR after autoneg failure\n",
+					(dd->ipath_link_speed_active &
+					  IPATH_IB_DDR) ? "D" : "S");
+				dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
+				dd->ipath_ibcddrctrl |=
+					IBA7220_IBC_IBTA_1_2_MASK;
+				ipath_write_kreg(dd,
+					IPATH_KREG_OFFSET(IBNCModeCtrl), 0);
+			}
+		}
+		/*
+		 * if we are in 1X, and are in autoneg width, it
+		 * could be due to an xgxs problem, so if we haven't
+		 * already tried, try twice to get to 4X; if we
+		 * tried, and couldn't, report it, since it will
+		 * probably not be what is desired.
+		 */
+		if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
+			IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)
+			&& dd->ipath_link_width_active == IB_WIDTH_1X
+			&& dd->ipath_x1_fix_tries < 3) {
+			if (++dd->ipath_x1_fix_tries == 3)
+				dev_info(&dd->pcidev->dev,
+					"IB link is in 1X mode\n");
+			else {
+				ipath_cdbg(VERBOSE, "IB 1X in "
+					"auto-width, try %u to be "
+					"sure it's really 1X; "
+					"ltstate %u\n",
+					 dd->ipath_x1_fix_tries,
+					 ltstate);
+				dd->ipath_f_xgxs_reset(dd);
+				ret = 1; /* skip other processing */
+			}
+		}
+
+		if (!ret) {
+			dd->delay_mult = rate_to_delay
+			    [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1]
+			    [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1];
+
+			ipath_set_relock_poll(dd, ibup);
+		}
+	}
+
+	if (!ret)
+		ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs),
+			ltstate);
+	return ret;
+}
+
+
+/*
+ * Handle the empirically determined mechanism for auto-negotiation
+ * of DDR speed with switches.
+ */
+static void autoneg_work(struct work_struct *work)
+{
+	struct ipath_devdata *dd;
+	u64 startms;
+	u32 lastlts, i;
+
+	dd = container_of(work, struct ipath_devdata,
+		ipath_autoneg_work.work);
+
+	startms = jiffies_to_msecs(jiffies);
+
+	/*
+	 * busy wait for this first part, it should be at most a
+	 * few hundred usec, since we scheduled ourselves for 2msec.
+	 */
+	for (i = 0; i < 25; i++) {
+		lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
+		if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
+			ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE);
+			break;
+		}
+		udelay(100);
+	}
+
+	if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
+		goto done; /* we got there early or told to stop */
+
+	/* we expect this to timeout */
+	if (wait_event_timeout(dd->ipath_autoneg_wait,
+		!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
+		msecs_to_jiffies(90)))
+		goto done;
+
+	ipath_toggle_rclkrls(dd);
+
+	/* we expect this to timeout */
+	if (wait_event_timeout(dd->ipath_autoneg_wait,
+		!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
+		msecs_to_jiffies(1700)))
+		goto done;
+
+	set_speed_fast(dd, IPATH_IB_SDR);
+	ipath_toggle_rclkrls(dd);
+
+	/*
+	 * wait up to 250 msec for link to train and get to INIT at DDR;
+	 * this should terminate early
+	 */
+	wait_event_timeout(dd->ipath_autoneg_wait,
+		!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
+		msecs_to_jiffies(250));
+done:
+	if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
+		ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
+			ipath_ib_state(dd, dd->ipath_lastibcstat),
+			jiffies_to_msecs(jiffies)-startms);
+		dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
+		if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
+			dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
+			ipath_dbg("Giving up on DDR until next IB "
+				"link Down\n");
+			dd->ipath_autoneg_tries = 0;
+		}
+		set_speed_fast(dd, dd->ipath_link_speed_enabled);
+	}
+}
+
+
+/**
+ * ipath_init_iba7220_funcs - set up the chip-specific function pointers
+ * @dd: the infinipath device
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+void ipath_init_iba7220_funcs(struct ipath_devdata *dd)
+{
+	dd->ipath_f_intrsetup = ipath_7220_intconfig;
+	dd->ipath_f_bus = ipath_setup_7220_config;
+	dd->ipath_f_reset = ipath_setup_7220_reset;
+	dd->ipath_f_get_boardname = ipath_7220_boardname;
+	dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors;
+	dd->ipath_f_early_init = ipath_7220_early_init;
+	dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors;
+	dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes;
+	dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes;
+	dd->ipath_f_clear_tids = ipath_7220_clear_tids;
+	dd->ipath_f_put_tid = ipath_7220_put_tid;
+	dd->ipath_f_cleanup = ipath_setup_7220_cleanup;
+	dd->ipath_f_setextled = ipath_setup_7220_setextled;
+	dd->ipath_f_get_base_info = ipath_7220_get_base_info;
+	dd->ipath_f_free_irq = ipath_7220_free_irq;
+	dd->ipath_f_tidtemplate = ipath_7220_tidtemplate;
+	dd->ipath_f_intr_fallback = ipath_7220_intr_fallback;
+	dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset;
+	dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg;
+	dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg;
+	dd->ipath_f_config_jint = ipath_7220_config_jint;
+	dd->ipath_f_config_ports = ipath_7220_config_ports;
+	dd->ipath_f_read_counters = ipath_7220_read_counters;
+	dd->ipath_f_get_msgheader = ipath_7220_get_msgheader;
+	dd->ipath_f_ib_updown = ipath_7220_ib_updown;
+
+	/* initialize chip-specific variables */
+	ipath_init_7220_variables(dd);
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 4471674..27dd894 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -155,24 +155,13 @@
 			 dd->ipath_control);
 
 	/*
-	 * Note that prior to try 14 or 15 of IB, the credit scaling
-	 * wasn't working, because it was swapped for writes with the
-	 * 1 bit default linkstate field
+	 * set initial max size pkt IBC will send, including ICRC; it's the
+	 * PIO buffer size in dwords, less 1; also see ipath_set_mtu()
 	 */
+	val = (dd->ipath_ibmaxlen >> 2) + 1;
+	ibc = val << dd->ibcc_mpl_shift;
 
-	/* ignore pbc and align word */
-	val = dd->ipath_piosize2k - 2 * sizeof(u32);
-	/*
-	 * for ICRC, which we only send in diag test pkt mode, and we
-	 * don't need to worry about that for mtu
-	 */
-	val += 1;
-	/*
-	 * Set the IBC maxpktlength to the size of our pio buffers the
-	 * maxpktlength is in words.  This is *not* the IB data MTU.
-	 */
-	ibc = (val / sizeof(u32)) << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-	/* in KB */
+	/* flowcontrolwatermark is in units of KBytes */
 	ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
 	/*
 	 * How often flowctrl sent.  More or less in usecs; balance against
@@ -191,10 +180,13 @@
 	/*
 	 * Want to start out with both LINKCMD and LINKINITCMD in NOP
 	 * (0 and 0).  Don't put linkinitcmd in ipath_ibcctrl, want that
-	 * to stay a NOP
+	 * to stay a NOP. Flag that we are disabled, for the (unlikely)
+	 * case that some recovery path is trying to bring the link up
+	 * before we are ready.
 	 */
 	ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
 		INFINIPATH_IBCC_LINKINITCMD_SHIFT;
+	dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
 	ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
 		   (unsigned long long) ibc);
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
@@ -227,17 +219,26 @@
 		pd->port_cnt = 1;
 		/* The port 0 pkey table is used by the layer interface. */
 		pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
+		pd->port_seq_cnt = 1;
 	}
 	return pd;
 }
 
-static int init_chip_first(struct ipath_devdata *dd,
-			   struct ipath_portdata **pdp)
+static int init_chip_first(struct ipath_devdata *dd)
 {
-	struct ipath_portdata *pd = NULL;
+	struct ipath_portdata *pd;
 	int ret = 0;
 	u64 val;
 
+	spin_lock_init(&dd->ipath_kernel_tid_lock);
+	spin_lock_init(&dd->ipath_user_tid_lock);
+	spin_lock_init(&dd->ipath_sendctrl_lock);
+	spin_lock_init(&dd->ipath_sdma_lock);
+	spin_lock_init(&dd->ipath_gpio_lock);
+	spin_lock_init(&dd->ipath_eep_st_lock);
+	spin_lock_init(&dd->ipath_sdepb_lock);
+	mutex_init(&dd->ipath_eep_lock);
+
 	/*
 	 * skip cfgports stuff because we are not allocating memory,
 	 * and we don't want problems if the portcnt changed due to
@@ -250,12 +251,14 @@
 	else if (ipath_cfgports <= dd->ipath_portcnt) {
 		dd->ipath_cfgports = ipath_cfgports;
 		ipath_dbg("Configured to use %u ports out of %u in chip\n",
-			  dd->ipath_cfgports, dd->ipath_portcnt);
+			  dd->ipath_cfgports, ipath_read_kreg32(dd,
+			  dd->ipath_kregs->kr_portcnt));
 	} else {
 		dd->ipath_cfgports = dd->ipath_portcnt;
 		ipath_dbg("Tried to configured to use %u ports; chip "
 			  "only supports %u\n", ipath_cfgports,
-			  dd->ipath_portcnt);
+			  ipath_read_kreg32(dd,
+				  dd->ipath_kregs->kr_portcnt));
 	}
 	/*
 	 * Allocate full portcnt array, rather than just cfgports, because
@@ -295,12 +298,9 @@
 	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
 	dd->ipath_piosize2k = val & ~0U;
 	dd->ipath_piosize4k = val >> 32;
-	/*
-	 * Note: the chips support a maximum MTU of 4096, but the driver
-	 * hasn't implemented this feature yet, so set the initial value
-	 * to 2048.
-	 */
-	dd->ipath_ibmtu = 2048;
+	if (dd->ipath_piosize4k == 0 && ipath_mtu4096)
+		ipath_mtu4096 = 0; /* 4KB not supported by this chip */
+	dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;
 	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
 	dd->ipath_piobcnt2k = val & ~0U;
 	dd->ipath_piobcnt4k = val >> 32;
@@ -328,43 +328,46 @@
 	else ipath_dbg("%u 2k piobufs @ %p\n",
 		       dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
 
-	spin_lock_init(&dd->ipath_tid_lock);
-	spin_lock_init(&dd->ipath_sendctrl_lock);
-	spin_lock_init(&dd->ipath_gpio_lock);
-	spin_lock_init(&dd->ipath_eep_st_lock);
-	mutex_init(&dd->ipath_eep_lock);
-
 done:
-	*pdp = pd;
 	return ret;
 }
 
 /**
  * init_chip_reset - re-initialize after a reset, or enable
  * @dd: the infinipath device
- * @pdp: output for port data
  *
  * sanity check at least some of the values after reset, and
  * ensure no receive or transmit (explictly, in case reset
  * failed
  */
-static int init_chip_reset(struct ipath_devdata *dd,
-			   struct ipath_portdata **pdp)
+static int init_chip_reset(struct ipath_devdata *dd)
 {
 	u32 rtmp;
+	int i;
+	unsigned long flags;
 
-	*pdp = dd->ipath_pd[0];
-	/* ensure chip does no sends or receives while we re-initialize */
-	dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
+	/*
+	 * ensure chip does no sends or receives, tail updates, or
+	 * pioavail updates while we re-initialize
+	 */
+	dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
+	for (i = 0; i < dd->ipath_portcnt; i++) {
+		clear_bit(dd->ipath_r_portenable_shift + i,
+			  &dd->ipath_rcvctrl);
+		clear_bit(dd->ipath_r_intravail_shift + i,
+			  &dd->ipath_rcvctrl);
+	}
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+		dd->ipath_rcvctrl);
+
+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+	dd->ipath_sendctrl = 0U; /* no sdma, etc */
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
-	rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
-	if (dd->ipath_portcnt != rtmp)
-		dev_info(&dd->pcidev->dev, "portcnt was %u before "
-			 "reset, now %u, using original\n",
-			 dd->ipath_portcnt, rtmp);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
+
 	rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
 	if (rtmp != dd->ipath_rcvtidcnt)
 		dev_info(&dd->pcidev->dev, "tidcnt was %u before "
@@ -467,10 +470,10 @@
 	dd->ipath_physshadow = addrs;
 }
 
-static void enable_chip(struct ipath_devdata *dd,
-			struct ipath_portdata *pd, int reinit)
+static void enable_chip(struct ipath_devdata *dd, int reinit)
 {
 	u32 val;
+	u64 rcvmask;
 	unsigned long flags;
 	int i;
 
@@ -484,17 +487,28 @@
 	/* Enable PIO send, and update of PIOavail regs to memory. */
 	dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
 		INFINIPATH_S_PIOBUFAVAILUPD;
+
+	/*
+	 * Set the PIO avail update threshold to host memory
+	 * on chips that support it.
+	 */
+	if (dd->ipath_pioupd_thresh)
+		dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
+			<< INFINIPATH_S_UPDTHRESH_SHIFT;
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
 	/*
-	 * enable port 0 receive, and receive interrupt.  other ports
-	 * done as user opens and inits them.
+	 * Enable kernel ports' receive and receive interrupt.
+	 * Other ports done as user opens and inits them.
 	 */
-	dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) |
-		(1ULL << dd->ipath_r_portenable_shift) |
-		(1ULL << dd->ipath_r_intravail_shift);
+	rcvmask = 1ULL;
+	dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
+		(rcvmask << dd->ipath_r_intravail_shift);
+	if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
+		dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
+
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
 			 dd->ipath_rcvctrl);
 
@@ -505,16 +519,16 @@
 	dd->ipath_flags |= IPATH_INITTED;
 
 	/*
-	 * init our shadow copies of head from tail values, and write
-	 * head values to match.
+	 * Init our shadow copies of head from tail values,
+	 * and write head values to match.
 	 */
 	val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
-	(void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
+	ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
 
 	/* Initialize so we interrupt on next packet received */
-	(void)ipath_write_ureg(dd, ur_rcvhdrhead,
-			       dd->ipath_rhdrhead_intr_off |
-			       dd->ipath_pd[0]->port_head, 0);
+	ipath_write_ureg(dd, ur_rcvhdrhead,
+			 dd->ipath_rhdrhead_intr_off |
+			 dd->ipath_pd[0]->port_head, 0);
 
 	/*
 	 * by now pioavail updates to memory should have occurred, so
@@ -523,25 +537,26 @@
 	 * initial values of the generation bit correct.
 	 */
 	for (i = 0; i < dd->ipath_pioavregs; i++) {
-		__le64 val;
+		__le64 pioavail;
 
 		/*
 		 * Chip Errata bug 6641; even and odd qwords>3 are swapped.
 		 */
 		if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
-			val = dd->ipath_pioavailregs_dma[i ^ 1];
+			pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
 		else
-			val = dd->ipath_pioavailregs_dma[i];
-		dd->ipath_pioavailshadow[i] = le64_to_cpu(val);
+			pioavail = dd->ipath_pioavailregs_dma[i];
+		dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) |
+			(~dd->ipath_pioavailkernel[i] <<
+			INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
 	}
 	/* can get counters, stats, etc. */
 	dd->ipath_flags |= IPATH_PRESENT;
 }
 
-static int init_housekeeping(struct ipath_devdata *dd,
-			     struct ipath_portdata **pdp, int reinit)
+static int init_housekeeping(struct ipath_devdata *dd, int reinit)
 {
-	char boardn[32];
+	char boardn[40];
 	int ret = 0;
 
 	/*
@@ -600,18 +615,9 @@
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
 			 INFINIPATH_E_RESET);
 
-	if (reinit)
-		ret = init_chip_reset(dd, pdp);
-	else
-		ret = init_chip_first(dd, pdp);
-
-	if (ret)
-		goto done;
-
-	ipath_cdbg(VERBOSE, "Revision %llx (PCI %x), %u ports, %u tids, "
-		   "%u egrtids\n", (unsigned long long) dd->ipath_revision,
-		   dd->ipath_pcirev, dd->ipath_portcnt, dd->ipath_rcvtidcnt,
-		   dd->ipath_rcvegrcnt);
+	ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
+		   (unsigned long long) dd->ipath_revision,
+		   dd->ipath_pcirev);
 
 	if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
 	     INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
@@ -650,10 +656,39 @@
 
 	ipath_dbg("%s", dd->ipath_boardversion);
 
+	if (ret)
+		goto done;
+
+	if (reinit)
+		ret = init_chip_reset(dd);
+	else
+		ret = init_chip_first(dd);
+
 done:
 	return ret;
 }
 
+static void verify_interrupt(unsigned long opaque)
+{
+	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
+
+	if (!dd)
+		return; /* being torn down */
+
+	/*
+	 * If we don't have any interrupts, let the user know and
+	 * don't bother checking again.
+	 */
+	if (dd->ipath_int_counter == 0) {
+		if (!dd->ipath_f_intr_fallback(dd))
+			dev_err(&dd->pcidev->dev, "No interrupts detected, "
+				"not usable.\n");
+		else /* re-arm the timer to see if fallback works */
+			mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);
+	} else
+		ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",
+			dd->ipath_int_counter);
+}
 
 /**
  * ipath_init_chip - do the actual initialization sequence on the chip
@@ -676,11 +711,11 @@
 	u32 val32, kpiobufs;
 	u32 piobufs, uports;
 	u64 val;
-	struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
+	struct ipath_portdata *pd;
 	gfp_t gfp_flags = GFP_USER | __GFP_COMP;
 	unsigned long flags;
 
-	ret = init_housekeeping(dd, &pd, reinit);
+	ret = init_housekeeping(dd, reinit);
 	if (ret)
 		goto done;
 
@@ -700,7 +735,7 @@
 	 * we now use routines that backend onto __get_free_pages, the
 	 * rest would be wasted.
 	 */
-	dd->ipath_rcvhdrcnt = dd->ipath_rcvegrcnt;
+	dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
 			 dd->ipath_rcvhdrcnt);
 
@@ -731,8 +766,8 @@
 	if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
 		int i = (int) piobufs -
 			(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
-		if (i < 0)
-			i = 0;
+		if (i < 1)
+			i = 1;
 		dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
 			 "%d for kernel leaves too few for %d user ports "
 			 "(%d each); using %u\n", kpiobufs,
@@ -751,24 +786,40 @@
 		ipath_dbg("allocating %u pbufs/port leaves %u unused, "
 			  "add to kernel\n", dd->ipath_pbufsport, val32);
 		dd->ipath_lastport_piobuf -= val32;
+		kpiobufs += val32;
 		ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
 			  dd->ipath_pbufsport, val32);
 	}
-	dd->ipath_lastpioindex = dd->ipath_lastport_piobuf;
+	dd->ipath_lastpioindex = 0;
+	dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
+	ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
 	ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
 		   "each for %u user ports\n", kpiobufs,
 		   piobufs, dd->ipath_pbufsport, uports);
+	if (dd->ipath_pioupd_thresh) {
+		if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh)
+			dd->ipath_pioupd_thresh = dd->ipath_pbufsport;
+		if (kpiobufs < dd->ipath_pioupd_thresh)
+			dd->ipath_pioupd_thresh = kpiobufs;
+	}
 
-	dd->ipath_f_early_init(dd);
+	ret = dd->ipath_f_early_init(dd);
+	if (ret) {
+		ipath_dev_err(dd, "Early initialization failure\n");
+		goto done;
+	}
+
 	/*
-	 * cancel any possible active sends from early driver load.
+	 * Cancel any possible active sends from early driver load.
 	 * Follows early_init because some chips have to initialize
 	 * PIO buffers in early_init to avoid false parity errors.
 	 */
 	ipath_cancel_sends(dd, 0);
 
-	/* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
-	 * done after early_init */
+	/*
+	 * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
+	 * done after early_init.
+	 */
 	dd->ipath_hdrqlast =
 		dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
@@ -783,8 +834,8 @@
 			goto done;
 	}
 
-	(void)ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
-			       dd->ipath_pioavailregs_phys);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
+			 dd->ipath_pioavailregs_phys);
 	/*
 	 * this is to detect s/w errors, which the h/w works around by
 	 * ignoring the low 6 bits of address, if it wasn't aligned.
@@ -843,58 +894,65 @@
 	/* enable errors that are masked, at least this first time. */
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
 			 ~dd->ipath_maskederrs);
-	dd->ipath_errormask = ipath_read_kreg64(dd,
-		dd->ipath_kregs->kr_errormask);
+	dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
+	dd->ipath_errormask =
+		ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
 	/* clear any interrupts up to this point (ints still not enabled) */
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
 
+	dd->ipath_f_tidtemplate(dd);
+
 	/*
 	 * Set up the port 0 (kernel) rcvhdr q and egr TIDs.  If doing
 	 * re-init, the simplest way to handle this is to free
 	 * existing, and re-allocate.
 	 * Need to re-create rest of port 0 portdata as well.
 	 */
+	pd = dd->ipath_pd[0];
 	if (reinit) {
-		/* Alloc and init new ipath_portdata for port0,
+		struct ipath_portdata *npd;
+
+		/*
+		 * Alloc and init new ipath_portdata for port0,
 		 * Then free old pd. Could lead to fragmentation, but also
 		 * makes later support for hot-swap easier.
 		 */
-		struct ipath_portdata *npd;
 		npd = create_portdata0(dd);
 		if (npd) {
 			ipath_free_pddata(dd, pd);
-			dd->ipath_pd[0] = pd = npd;
+			dd->ipath_pd[0] = npd;
+			pd = npd;
 		} else {
-			ipath_dev_err(dd, "Unable to allocate portdata for"
-				      "  port 0, failing\n");
+			ipath_dev_err(dd, "Unable to allocate portdata"
+				      " for port 0, failing\n");
 			ret = -ENOMEM;
 			goto done;
 		}
 	}
-	dd->ipath_f_tidtemplate(dd);
 	ret = ipath_create_rcvhdrq(dd, pd);
-	if (!ret) {
-		dd->ipath_hdrqtailptr =
-			(volatile __le64 *)pd->port_rcvhdrtail_kvaddr;
+	if (!ret)
 		ret = create_port0_egr(dd);
-	}
-	if (ret)
-		ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
+	if (ret) {
+		ipath_dev_err(dd, "failed to allocate kernel port's "
 			      "rcvhdrq and/or egr bufs\n");
+		goto done;
+	}
 	else
-		enable_chip(dd, pd, reinit);
+		enable_chip(dd, reinit);
 
-
-	if (!ret && !reinit) {
-	    /* used when we close a port, for DMA already in flight at close */
+	if (!reinit) {
+		/*
+		 * Used when we close a port, for DMA already in flight
+		 * at close.
+		 */
 		dd->ipath_dummy_hdrq = dma_alloc_coherent(
-			&dd->pcidev->dev, pd->port_rcvhdrq_size,
+			&dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
 			&dd->ipath_dummy_hdrq_phys,
 			gfp_flags);
-		if (!dd->ipath_dummy_hdrq ) {
+		if (!dd->ipath_dummy_hdrq) {
 			dev_info(&dd->pcidev->dev,
 				"Couldn't allocate 0x%lx bytes for dummy hdrq\n",
-				pd->port_rcvhdrq_size);
+				dd->ipath_pd[0]->port_rcvhdrq_size);
 			/* fallback to just 0'ing */
 			dd->ipath_dummy_hdrq_phys = 0UL;
 		}
@@ -906,7 +964,7 @@
 	 */
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
 
-	if(!dd->ipath_stats_timer_active) {
+	if (!dd->ipath_stats_timer_active) {
 		/*
 		 * first init, or after an admin disable/enable
 		 * set up stats retrieval timer, even if we had errors
@@ -922,6 +980,16 @@
 		dd->ipath_stats_timer_active = 1;
 	}
 
+	/* Set up SendDMA if chip supports it */
+	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
+		ret = setup_sdma(dd);
+
+	/* Set up HoL state */
+	init_timer(&dd->ipath_hol_timer);
+	dd->ipath_hol_timer.function = ipath_hol_event;
+	dd->ipath_hol_timer.data = (unsigned long)dd;
+	dd->ipath_hol_state = IPATH_HOL_UP;
+
 done:
 	if (!ret) {
 		*dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
@@ -934,6 +1002,20 @@
 					 0ULL);
 			/* chip is usable; mark it as initialized */
 			*dd->ipath_statusp |= IPATH_STATUS_INITTED;
+
+			/*
+			 * setup to verify we get an interrupt, and fallback
+			 * to an alternate if necessary and possible
+			 */
+			if (!reinit) {
+				init_timer(&dd->ipath_intrchk_timer);
+				dd->ipath_intrchk_timer.function =
+					verify_interrupt;
+				dd->ipath_intrchk_timer.data =
+					(unsigned long) dd;
+			}
+			dd->ipath_intrchk_timer.expires = jiffies + HZ/2;
+			add_timer(&dd->ipath_intrchk_timer);
 		} else
 			ipath_dev_err(dd, "No interrupts enabled, couldn't "
 				      "setup interrupt address\n");
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 92e58c9..1b58f47 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -32,6 +32,7 @@
  */
 
 #include <linux/pci.h>
+#include <linux/delay.h>
 
 #include "ipath_kernel.h"
 #include "ipath_verbs.h"
@@ -59,9 +60,11 @@
 	dev_info(&dd->pcidev->dev,
 		"Rewrite PIO buffer %u, to recover from parity error\n",
 		pnum);
-	*pbuf = dwcnt+1; /* no flush required, since already in freeze */
-	while(--dwcnt)
-		*pbuf++ = 0;
+
+	/* no flush required, since already in freeze */
+	writel(dwcnt + 1, pbuf);
+	while (--dwcnt)
+		writel(0, pbuf++);
 }
 
 /*
@@ -70,7 +73,7 @@
  * If rewrite is true, and bits are set in the sendbufferror registers,
  * we'll write to the buffer, for error recovery on parity errors.
  */
-static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
+void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
 {
 	u32 piobcnt;
 	unsigned long sbuf[4];
@@ -84,12 +87,14 @@
 		dd, dd->ipath_kregs->kr_sendbuffererror);
 	sbuf[1] = ipath_read_kreg64(
 		dd, dd->ipath_kregs->kr_sendbuffererror + 1);
-	if (piobcnt > 128) {
+	if (piobcnt > 128)
 		sbuf[2] = ipath_read_kreg64(
 			dd, dd->ipath_kregs->kr_sendbuffererror + 2);
+	if (piobcnt > 192)
 		sbuf[3] = ipath_read_kreg64(
 			dd, dd->ipath_kregs->kr_sendbuffererror + 3);
-	}
+	else
+		sbuf[3] = 0;
 
 	if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
 		int i;
@@ -254,24 +259,20 @@
 }
 
 /* return the strings for the most common link states */
-static char *ib_linkstate(u32 linkstate)
+static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
 {
 	char *ret;
+	u32 state;
 
-	switch (linkstate) {
-	case IPATH_IBSTATE_INIT:
+	state = ipath_ib_state(dd, ibcs);
+	if (state == dd->ib_init)
 		ret = "Init";
-		break;
-	case IPATH_IBSTATE_ARM:
+	else if (state == dd->ib_arm)
 		ret = "Arm";
-		break;
-	case IPATH_IBSTATE_ACTIVE:
+	else if (state == dd->ib_active)
 		ret = "Active";
-		break;
-	default:
+	else
 		ret = "Down";
-	}
-
 	return ret;
 }
 
@@ -286,103 +287,172 @@
 }
 
 static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
-				     ipath_err_t errs, int noprint)
+				     ipath_err_t errs)
 {
-	u64 val;
-	u32 ltstate, lstate;
+	u32 ltstate, lstate, ibstate, lastlstate;
+	u32 init = dd->ib_init;
+	u32 arm = dd->ib_arm;
+	u32 active = dd->ib_active;
+	const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+
+	lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
+	ibstate = ipath_ib_state(dd, ibcs);
+	/* linkstate at last interrupt */
+	lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
+	ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
 
 	/*
-	 * even if diags are enabled, we want to notice LINKINIT, etc.
-	 * We just don't want to change the LED state, or
-	 * dd->ipath_kregs->kr_ibcctrl
+	 * Since going into a recovery state causes the link state to go
+	 * down and since recovery is transitory, it is better if we "miss"
+	 * ever seeing the link training state go into recovery (i.e.,
+	 * ignore this transition for link state special handling purposes)
+	 * without even updating ipath_lastibcstat.
 	 */
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-	lstate = val & IPATH_IBSTATE_MASK;
+	if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
+	    (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
+	    (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
+		goto done;
 
 	/*
-	 * this is confusing enough when it happens that I want to always put it
-	 * on the console and in the logs.  If it was a requested state change,
-	 * we'll have already cleared the flags, so we won't print this warning
+	 * if linkstate transitions into INIT from any of the various down
+	 * states, or if it transitions from any of the up (INIT or better)
+	 * states into any of the down states (except link recovery), then
+	 * call the chip-specific code to take appropriate actions.
 	 */
-	if ((lstate != IPATH_IBSTATE_ARM && lstate != IPATH_IBSTATE_ACTIVE)
-		&& (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
-		dev_info(&dd->pcidev->dev, "Link state changed from %s to %s\n",
-				 (dd->ipath_flags & IPATH_LINKARMED) ? "ARM" : "ACTIVE",
-				 ib_linkstate(lstate));
-		/*
-		 * Flush all queued sends when link went to DOWN or INIT,
-		 * to be sure that they don't block SMA and other MAD packets
-		 */
-		ipath_cancel_sends(dd, 1);
-	}
-	else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
-	    lstate == IPATH_IBSTATE_ACTIVE) {
-		/*
-		 * only print at SMA if there is a change, debug if not
-		 * (sometimes we want to know that, usually not).
-		 */
-		if (lstate == ((unsigned) dd->ipath_lastibcstat
-			       & IPATH_IBSTATE_MASK)) {
-			ipath_dbg("Status change intr but no change (%s)\n",
-				  ib_linkstate(lstate));
+	if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
+		lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
+		/* transitioned to UP */
+		if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
+			/* link came up, so we must no longer be disabled */
+			dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
+			ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
+			goto skip_ibchange; /* chip-code handled */
 		}
-		else
-			ipath_cdbg(VERBOSE, "Unit %u link state %s, last "
-				   "was %s\n", dd->ipath_unit,
-				   ib_linkstate(lstate),
-				   ib_linkstate((unsigned)
-						dd->ipath_lastibcstat
-						& IPATH_IBSTATE_MASK));
+	} else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
+		(dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
+		ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
+		ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
+		int handled;
+		handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
+		dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
+		if (handled) {
+			ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
+			goto skip_ibchange; /* chip-code handled */
+		}
 	}
-	else {
-		lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
-		if (lstate == IPATH_IBSTATE_INIT ||
-		    lstate == IPATH_IBSTATE_ARM ||
-		    lstate == IPATH_IBSTATE_ACTIVE)
-			ipath_cdbg(VERBOSE, "Unit %u link state down"
-				   " (state 0x%x), from %s\n",
-				   dd->ipath_unit,
-				   (u32)val & IPATH_IBSTATE_MASK,
-				   ib_linkstate(lstate));
-		else
-			ipath_cdbg(VERBOSE, "Unit %u link state changed "
-				   "to 0x%x from down (%x)\n",
-				   dd->ipath_unit, (u32) val, lstate);
+
+	/*
+	 * Significant enough to always print and get into logs, if it was
+	 * unexpected.  If it was a requested state change, we'll have
+	 * already cleared the flags, so we won't print this warning
+	 */
+	if ((ibstate != arm && ibstate != active) &&
+	    (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
+		dev_info(&dd->pcidev->dev, "Link state changed from %s "
+			 "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
+			 "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
 	}
-	ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-		INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
-	lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
-		INFINIPATH_IBCS_LINKSTATE_MASK;
 
 	if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
 	    ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-		u32 last_ltstate;
-
+		u32 lastlts;
+		lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
 		/*
-		 * Ignore cycling back and forth from Polling.Active
-		 * to Polling.Quiet while waiting for the other end of
-		 * the link to come up. We will cycle back and forth
-		 * between them if no cable is plugged in,
-		 * the other device is powered off or disabled, etc.
+		 * Ignore cycling back and forth from Polling.Active to
+		 * Polling.Quiet while waiting for the other end of the link
+		 * to come up, except to try and decide if we are connected
+		 * to a live IB device or not.  We will cycle back and
+		 * forth between them if no cable is plugged in, the other
+		 * device is powered off or disabled, etc.
 		 */
-		last_ltstate = (dd->ipath_lastibcstat >>
-				INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT)
-			& INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
-		if (last_ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE
-		    || last_ltstate ==
-		    INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-			if (dd->ipath_ibpollcnt > 40) {
+		if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
+		    lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
+			if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
+			     (++dd->ipath_ibpollcnt == 40)) {
 				dd->ipath_flags |= IPATH_NOCABLE;
 				*dd->ipath_statusp |=
 					IPATH_STATUS_IB_NOCABLE;
-			} else
-				dd->ipath_ibpollcnt++;
+				ipath_cdbg(LINKVERB, "Set NOCABLE\n");
+			}
+			ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
+				ipath_ibcstatus_str[ltstate], ibstate);
 			goto skip_ibchange;
 		}
 	}
-	dd->ipath_ibpollcnt = 0;	/* some state other than 2 or 3 */
+
+	dd->ipath_ibpollcnt = 0; /* not poll*, now */
 	ipath_stats.sps_iblink++;
-	if (ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
+
+	if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
+		u64 linkrecov;
+		linkrecov = ipath_snap_cntr(dd,
+			dd->ipath_cregs->cr_iblinkerrrecovcnt);
+		if (linkrecov != dd->ipath_lastlinkrecov) {
+			ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
+				ibcs, ib_linkstate(dd, ibcs),
+				ipath_ibcstatus_str[ltstate],
+				linkrecov);
+			/* and no more until active again */
+			dd->ipath_lastlinkrecov = 0;
+			ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
+			goto skip_ibchange;
+		}
+	}
+
+	if (ibstate == init || ibstate == arm || ibstate == active) {
+		*dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
+		if (ibstate == init || ibstate == arm) {
+			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+			if (dd->ipath_flags & IPATH_LINKACTIVE)
+				signal_ib_event(dd, IB_EVENT_PORT_ERR);
+		}
+		if (ibstate == arm) {
+			dd->ipath_flags |= IPATH_LINKARMED;
+			dd->ipath_flags &= ~(IPATH_LINKUNK |
+				IPATH_LINKINIT | IPATH_LINKDOWN |
+				IPATH_LINKACTIVE | IPATH_NOCABLE);
+			ipath_hol_down(dd);
+		} else  if (ibstate == init) {
+			/*
+			 * set INIT and DOWN.  Down is checked by
+			 * most of the other code, but INIT is
+			 * useful to know in a few places.
+			 */
+			dd->ipath_flags |= IPATH_LINKINIT |
+				IPATH_LINKDOWN;
+			dd->ipath_flags &= ~(IPATH_LINKUNK |
+				IPATH_LINKARMED | IPATH_LINKACTIVE |
+				IPATH_NOCABLE);
+			ipath_hol_down(dd);
+		} else {  /* active */
+			dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
+				dd->ipath_cregs->cr_iblinkerrrecovcnt);
+			*dd->ipath_statusp |=
+				IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
+			dd->ipath_flags |= IPATH_LINKACTIVE;
+			dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
+				| IPATH_LINKDOWN | IPATH_LINKARMED |
+				IPATH_NOCABLE);
+			if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
+				ipath_restart_sdma(dd);
+			signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
+			/* LED active not handled in chip _f_updown */
+			dd->ipath_f_setextled(dd, lstate, ltstate);
+			ipath_hol_up(dd);
+		}
+
+		/*
+		 * print after we've already done the work, so as not to
+		 * delay the state changes and notifications, for debugging
+		 */
+		if (lstate == lastlstate)
+			ipath_cdbg(LINKVERB, "Unchanged from last: %s "
+				"(%x)\n", ib_linkstate(dd, ibcs), ibstate);
+		else
+			ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
+				  dd->ipath_unit, ib_linkstate(dd, ibcs),
+				  ipath_ibcstatus_str[ltstate],  ibstate);
+	} else { /* down */
 		if (dd->ipath_flags & IPATH_LINKACTIVE)
 			signal_ib_event(dd, IB_EVENT_PORT_ERR);
 		dd->ipath_flags |= IPATH_LINKDOWN;
@@ -391,69 +461,28 @@
 				     IPATH_LINKARMED);
 		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
 		dd->ipath_lli_counter = 0;
-		if (!noprint) {
-			if (((dd->ipath_lastibcstat >>
-			      INFINIPATH_IBCS_LINKSTATE_SHIFT) &
-			     INFINIPATH_IBCS_LINKSTATE_MASK)
-			    == INFINIPATH_IBCS_L_STATE_ACTIVE)
-				/* if from up to down be more vocal */
-				ipath_cdbg(VERBOSE,
-					   "Unit %u link now down (%s)\n",
-					   dd->ipath_unit,
-					   ipath_ibcstatus_str[ltstate]);
-			else
-				ipath_cdbg(VERBOSE, "Unit %u link is "
-					   "down (%s)\n", dd->ipath_unit,
-					   ipath_ibcstatus_str[ltstate]);
-		}
 
-		dd->ipath_f_setextled(dd, lstate, ltstate);
-	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ACTIVE) {
-		dd->ipath_flags |= IPATH_LINKACTIVE;
-		dd->ipath_flags &=
-			~(IPATH_LINKUNK | IPATH_LINKINIT | IPATH_LINKDOWN |
-			  IPATH_LINKARMED | IPATH_NOCABLE);
-		*dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
-		*dd->ipath_statusp |=
-			IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
-		dd->ipath_f_setextled(dd, lstate, ltstate);
-		signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
-	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
-		if (dd->ipath_flags & IPATH_LINKACTIVE)
-			signal_ib_event(dd, IB_EVENT_PORT_ERR);
-		/*
-		 * set INIT and DOWN.  Down is checked by most of the other
-		 * code, but INIT is useful to know in a few places.
-		 */
-		dd->ipath_flags |= IPATH_LINKINIT | IPATH_LINKDOWN;
-		dd->ipath_flags &=
-			~(IPATH_LINKUNK | IPATH_LINKACTIVE | IPATH_LINKARMED
-			  | IPATH_NOCABLE);
-		*dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
-					| IPATH_STATUS_IB_READY);
-		dd->ipath_f_setextled(dd, lstate, ltstate);
-	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ARM) {
-		if (dd->ipath_flags & IPATH_LINKACTIVE)
-			signal_ib_event(dd, IB_EVENT_PORT_ERR);
-		dd->ipath_flags |= IPATH_LINKARMED;
-		dd->ipath_flags &=
-			~(IPATH_LINKUNK | IPATH_LINKDOWN | IPATH_LINKINIT |
-			  IPATH_LINKACTIVE | IPATH_NOCABLE);
-		*dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
-					| IPATH_STATUS_IB_READY);
-		dd->ipath_f_setextled(dd, lstate, ltstate);
-	} else {
-		if (!noprint)
-			ipath_dbg("IBstatuschange unit %u: %s (%x)\n",
-				  dd->ipath_unit,
-				  ipath_ibcstatus_str[ltstate], ltstate);
+		if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
+			ipath_cdbg(VERBOSE, "Unit %u link state down "
+				   "(state 0x%x), from %s\n",
+				   dd->ipath_unit, lstate,
+				   ib_linkstate(dd, dd->ipath_lastibcstat));
+		else
+			ipath_cdbg(LINKVERB, "Unit %u link state changed "
+				   "to %s (0x%x) from down (%x)\n",
+				   dd->ipath_unit,
+				   ipath_ibcstatus_str[ltstate],
+				   ibstate, lastlstate);
 	}
+
 skip_ibchange:
-	dd->ipath_lastibcstat = val;
+	dd->ipath_lastibcstat = ibcs;
+done:
+	return;
 }
 
 static void handle_supp_msgs(struct ipath_devdata *dd,
-			     unsigned supp_msgs, char *msg, int msgsz)
+			     unsigned supp_msgs, char *msg, u32 msgsz)
 {
 	/*
 	 * Print the message unless it's ibc status change only, which
@@ -461,12 +490,19 @@
 	 */
 	if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
 		int iserr;
-		iserr = ipath_decode_err(msg, msgsz,
+		ipath_err_t mask;
+		iserr = ipath_decode_err(dd, msg, msgsz,
 					 dd->ipath_lasterror &
 					 ~INFINIPATH_E_IBSTATUSCHANGED);
-		if (dd->ipath_lasterror &
-			~(INFINIPATH_E_RRCVEGRFULL |
-			INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
+
+		mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+			INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
+
+		/* if we're in debug, then don't mask SDMADISABLED msgs */
+		if (ipath_debug & __IPATH_DBG)
+			mask &= ~INFINIPATH_E_SDMADISABLED;
+
+		if (dd->ipath_lasterror & ~mask)
 			ipath_dev_err(dd, "Suppressed %u messages for "
 				      "fast-repeating errors (%s) (%llx)\n",
 				      supp_msgs, msg,
@@ -493,7 +529,7 @@
 
 static unsigned handle_frequent_errors(struct ipath_devdata *dd,
 				       ipath_err_t errs, char *msg,
-				       int msgsz, int *noprint)
+				       u32 msgsz, int *noprint)
 {
 	unsigned long nc;
 	static unsigned long nextmsg_time;
@@ -523,19 +559,125 @@
 	return supp_msgs;
 }
 
+static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
+{
+	unsigned long flags;
+	int expected;
+
+	if (ipath_debug & __IPATH_DBG) {
+		char msg[128];
+		ipath_decode_err(dd, msg, sizeof msg, errs &
+			INFINIPATH_E_SDMAERRS);
+		ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
+	}
+	if (ipath_debug & __IPATH_VERBDBG) {
+		unsigned long tl, hd, status, lengen;
+		tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
+		hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
+		status = ipath_read_kreg64(dd
+			, dd->ipath_kregs->kr_senddmastatus);
+		lengen = ipath_read_kreg64(dd,
+			dd->ipath_kregs->kr_senddmalengen);
+		ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
+			"lengen 0x%lx\n", tl, hd, status, lengen);
+	}
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+	__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
+	expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+	if (!expected)
+		ipath_cancel_sends(dd, 1);
+}
+
+static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
+{
+	unsigned long flags;
+	int expected;
+
+	if ((istat & INFINIPATH_I_SDMAINT) &&
+	    !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+		ipath_sdma_intr(dd);
+
+	if (istat & INFINIPATH_I_SDMADISABLED) {
+		expected = test_bit(IPATH_SDMA_ABORTING,
+			&dd->ipath_sdma_status);
+		ipath_dbg("%s SDmaDisabled intr\n",
+			expected ? "expected" : "unexpected");
+		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+		__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
+		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+		if (!expected)
+			ipath_cancel_sends(dd, 1);
+		if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+			tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
+	}
+}
+
+static int handle_hdrq_full(struct ipath_devdata *dd)
+{
+	int chkerrpkts = 0;
+	u32 hd, tl;
+	u32 i;
+
+	ipath_stats.sps_hdrqfull++;
+	for (i = 0; i < dd->ipath_cfgports; i++) {
+		struct ipath_portdata *pd = dd->ipath_pd[i];
+
+		if (i == 0) {
+			/*
+			 * For kernel receive queues, we just want to know
+			 * if there are packets in the queue that we can
+			 * process.
+			 */
+			if (pd->port_head != ipath_get_hdrqtail(pd))
+				chkerrpkts |= 1 << i;
+			continue;
+		}
+
+		/* Skip if user context is not open */
+		if (!pd || !pd->port_cnt)
+			continue;
+
+		/* Don't report the same point multiple times. */
+		if (dd->ipath_flags & IPATH_NODMA_RTAIL)
+			tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
+		else
+			tl = ipath_get_rcvhdrtail(pd);
+		if (tl == pd->port_lastrcvhdrqtail)
+			continue;
+
+		hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
+		if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
+			pd->port_lastrcvhdrqtail = tl;
+			pd->port_hdrqfull++;
+			/* flush hdrqfull so that poll() sees it */
+			wmb();
+			wake_up_interruptible(&pd->port_wait);
+		}
+	}
+
+	return chkerrpkts;
+}
+
 static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
 {
 	char msg[128];
 	u64 ignore_this_time = 0;
-	int i, iserr = 0;
+	u64 iserr = 0;
 	int chkerrpkts = 0, noprint = 0;
 	unsigned supp_msgs;
 	int log_idx;
 
-	supp_msgs = handle_frequent_errors(dd, errs, msg, sizeof msg, &noprint);
+	/*
+	 * don't report errors that are masked, either at init
+	 * (not set in ipath_errormask), or temporarily (set in
+	 * ipath_maskederrs)
+	 */
+	errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
 
-	/* don't report errors that are masked */
-	errs &= ~dd->ipath_maskederrs;
+	supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
+		&noprint);
 
 	/* do these first, they are most important */
 	if (errs & INFINIPATH_E_HARDWARE) {
@@ -550,6 +692,9 @@
 		}
 	}
 
+	if (errs & INFINIPATH_E_SDMAERRS)
+		handle_sdma_errors(dd, errs);
+
 	if (!noprint && (errs & ~dd->ipath_e_bitsextant))
 		ipath_dev_err(dd, "error interrupt with unknown errors "
 			      "%llx set\n", (unsigned long long)
@@ -580,18 +725,19 @@
 		 * ones on this particular interrupt, which also isn't great
 		 */
 		dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
+
 		dd->ipath_errormask &= ~dd->ipath_maskederrs;
 		ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-			dd->ipath_errormask);
-		s_iserr = ipath_decode_err(msg, sizeof msg,
-			dd->ipath_maskederrs);
+				 dd->ipath_errormask);
+		s_iserr = ipath_decode_err(dd, msg, sizeof msg,
+					   dd->ipath_maskederrs);
 
 		if (dd->ipath_maskederrs &
-			~(INFINIPATH_E_RRCVEGRFULL |
-			INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
+		    ~(INFINIPATH_E_RRCVEGRFULL |
+		      INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
 			ipath_dev_err(dd, "Temporarily disabling "
 			    "error(s) %llx reporting; too frequent (%s)\n",
-				(unsigned long long)dd->ipath_maskederrs,
+				(unsigned long long) dd->ipath_maskederrs,
 				msg);
 		else {
 			/*
@@ -633,26 +779,43 @@
 			  INFINIPATH_E_IBSTATUSCHANGED);
 	}
 
-	/* likely due to cancel, so suppress */
+	if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
+		dd->ipath_spectriggerhit++;
+		ipath_dbg("%lu special trigger hits\n",
+			dd->ipath_spectriggerhit);
+	}
+
+	/* likely due to cancel; so suppress message unless verbose */
 	if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
 		dd->ipath_lastcancel > jiffies) {
-		ipath_dbg("Suppressed armlaunch/spktlen after error send cancel\n");
+		/* armlaunch takes precedence; it often causes both. */
+		ipath_cdbg(VERBOSE,
+			"Suppressed %s error (%llx) after sendbuf cancel\n",
+			(errs &  INFINIPATH_E_SPIOARMLAUNCH) ?
+			"armlaunch" : "sendpktlen", (unsigned long long)errs);
 		errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
 	}
 
 	if (!errs)
 		return 0;
 
-	if (!noprint)
+	if (!noprint) {
+		ipath_err_t mask;
 		/*
-		 * the ones we mask off are handled specially below or above
+		 * The ones we mask off are handled specially below
+		 * or above.  Also mask SDMADISABLED by default as it
+		 * is too chatty.
 		 */
-		ipath_decode_err(msg, sizeof msg,
-				 errs & ~(INFINIPATH_E_IBSTATUSCHANGED |
-					  INFINIPATH_E_RRCVEGRFULL |
-					  INFINIPATH_E_RRCVHDRFULL |
-					  INFINIPATH_E_HARDWARE));
-	else
+		mask = INFINIPATH_E_IBSTATUSCHANGED |
+			INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+			INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
+
+		/* if we're in debug, then don't mask SDMADISABLED msgs */
+		if (ipath_debug & __IPATH_DBG)
+			mask &= ~INFINIPATH_E_SDMADISABLED;
+
+		ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
+	} else
 		/* so we don't need if (!noprint) at strlcat's below */
 		*msg = 0;
 
@@ -677,40 +840,8 @@
 	 * fast_stats, no more than every 5 seconds, user ports get printed
 	 * on close
 	 */
-	if (errs & INFINIPATH_E_RRCVHDRFULL) {
-		u32 hd, tl;
-		ipath_stats.sps_hdrqfull++;
-		for (i = 0; i < dd->ipath_cfgports; i++) {
-			struct ipath_portdata *pd = dd->ipath_pd[i];
-			if (i == 0) {
-				hd = pd->port_head;
-				tl = (u32) le64_to_cpu(
-					*dd->ipath_hdrqtailptr);
-			} else if (pd && pd->port_cnt &&
-				   pd->port_rcvhdrtail_kvaddr) {
-				/*
-				 * don't report same point multiple times,
-				 * except kernel
-				 */
-				tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
-				if (tl == pd->port_lastrcvhdrqtail)
-					continue;
-				hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
-						       i);
-			} else
-				continue;
-			if (hd == (tl + 1) ||
-			    (!hd && tl == dd->ipath_hdrqlast)) {
-				if (i == 0)
-					chkerrpkts = 1;
-				pd->port_lastrcvhdrqtail = tl;
-				pd->port_hdrqfull++;
-				/* flush hdrqfull so that poll() sees it */
-				wmb();
-				wake_up_interruptible(&pd->port_wait);
-			}
-		}
-	}
+	if (errs & INFINIPATH_E_RRCVHDRFULL)
+		chkerrpkts |= handle_hdrq_full(dd);
 	if (errs & INFINIPATH_E_RRCVEGRFULL) {
 		struct ipath_portdata *pd = dd->ipath_pd[0];
 
@@ -721,9 +852,8 @@
 		 * vs user)
 		 */
 		ipath_stats.sps_etidfull++;
-		if (pd->port_head !=
-		    (u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
-			chkerrpkts = 1;
+		if (pd->port_head != ipath_get_hdrqtail(pd))
+			chkerrpkts |= 1;
 	}
 
 	/*
@@ -741,16 +871,13 @@
 		dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
 				     | IPATH_LINKARMED | IPATH_LINKACTIVE);
 		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-		if (!noprint) {
-			u64 st = ipath_read_kreg64(
-				dd, dd->ipath_kregs->kr_ibcstatus);
 
-			ipath_dbg("Lost link, link now down (%s)\n",
-				  ipath_ibcstatus_str[st & 0xf]);
-		}
+		ipath_dbg("Lost link, link now down (%s)\n",
+			ipath_ibcstatus_str[ipath_read_kreg64(dd,
+			dd->ipath_kregs->kr_ibcstatus) & 0xf]);
 	}
 	if (errs & INFINIPATH_E_IBSTATUSCHANGED)
-		handle_e_ibstatuschanged(dd, errs, noprint);
+		handle_e_ibstatuschanged(dd, errs);
 
 	if (errs & INFINIPATH_E_RESET) {
 		if (!noprint)
@@ -765,9 +892,6 @@
 	if (!noprint && *msg) {
 		if (iserr)
 			ipath_dev_err(dd, "%s error\n", msg);
-		else
-			dev_info(&dd->pcidev->dev, "%s packet problems\n",
-				msg);
 	}
 	if (dd->ipath_state_wanted & dd->ipath_flags) {
 		ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
@@ -779,7 +903,6 @@
 	return chkerrpkts;
 }
 
-
 /*
  * try to cleanup as much as possible for anything that might have gone
  * wrong while in freeze mode, such as pio buffers being written by user
@@ -796,8 +919,7 @@
 void ipath_clear_freeze(struct ipath_devdata *dd)
 {
 	int i, im;
-	__le64 val;
-	unsigned long flags;
+	u64 val;
 
 	/* disable error interrupts, to avoid confusion */
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
@@ -816,14 +938,7 @@
 			 dd->ipath_control);
 
 	/* ensure pio avail updates continue */
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-		 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			 dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+	ipath_force_pio_avail_update(dd);
 
 	/*
 	 * We just enabled pioavailupdate, so dma copy is almost certainly
@@ -831,10 +946,13 @@
 	 */
 	for (i = 0; i < dd->ipath_pioavregs; i++) {
 		/* deal with 6110 chip bug */
-		im = i > 3 ? i ^ 1 : i;
+		im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
+			i ^ 1 : i;
 		val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
-		dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
-			= le64_to_cpu(val);
+		dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
+		dd->ipath_pioavailshadow[i] = val |
+			(~dd->ipath_pioavailkernel[i] <<
+			INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
 	}
 
 	/*
@@ -950,7 +1068,7 @@
  * process was waiting for a packet to arrive, and didn't want
  * to poll
  */
-static void handle_urcv(struct ipath_devdata *dd, u32 istat)
+static void handle_urcv(struct ipath_devdata *dd, u64 istat)
 {
 	u64 portr;
 	int i;
@@ -966,12 +1084,13 @@
 	 * and ipath_poll_next()...
 	 */
 	rmb();
-	portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
-		 dd->ipath_i_rcvavail_mask)
-		| ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
-		   dd->ipath_i_rcvurg_mask);
+	portr = ((istat >> dd->ipath_i_rcvavail_shift) &
+		 dd->ipath_i_rcvavail_mask) |
+		((istat >> dd->ipath_i_rcvurg_shift) &
+		 dd->ipath_i_rcvurg_mask);
 	for (i = 1; i < dd->ipath_cfgports; i++) {
 		struct ipath_portdata *pd = dd->ipath_pd[i];
+
 		if (portr & (1 << i) && pd && pd->port_cnt) {
 			if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
 					       &pd->port_flag)) {
@@ -988,7 +1107,7 @@
 	}
 	if (rcvdint) {
 		/* only want to take one interrupt, so turn off the rcv
-		 * interrupt for all the ports that we did the wakeup on
+		 * interrupt for all the ports that we set the rcv_waiting
 		 * (but never for kernel port)
 		 */
 		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
@@ -999,12 +1118,11 @@
 irqreturn_t ipath_intr(int irq, void *data)
 {
 	struct ipath_devdata *dd = data;
-	u32 istat, chk0rcv = 0;
+	u64 istat, chk0rcv = 0;
 	ipath_err_t estat = 0;
 	irqreturn_t ret;
 	static unsigned unexpected = 0;
-	static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
-		 (1U<<INFINIPATH_I_RCVURG_SHIFT);
+	u64 kportrbits;
 
 	ipath_stats.sps_ints++;
 
@@ -1053,17 +1171,17 @@
 
 	if (unlikely(istat & ~dd->ipath_i_bitsextant))
 		ipath_dev_err(dd,
-			      "interrupt with unknown interrupts %x set\n",
-			      istat & (u32) ~ dd->ipath_i_bitsextant);
-	else
-		ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
+			      "interrupt with unknown interrupts %Lx set\n",
+			      istat & ~dd->ipath_i_bitsextant);
+	else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
+		ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat);
 
-	if (unlikely(istat & INFINIPATH_I_ERROR)) {
+	if (istat & INFINIPATH_I_ERROR) {
 		ipath_stats.sps_errints++;
 		estat = ipath_read_kreg64(dd,
 					  dd->ipath_kregs->kr_errorstatus);
 		if (!estat)
-			dev_info(&dd->pcidev->dev, "error interrupt (%x), "
+			dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
 				 "but no error bits set!\n", istat);
 		else if (estat == -1LL)
 			/*
@@ -1073,9 +1191,7 @@
 			ipath_dev_err(dd, "Read of error status failed "
 				      "(all bits set); ignoring\n");
 		else
-			if (handle_errors(dd, estat))
-				/* force calling ipath_kreceive() */
-				chk0rcv = 1;
+			chk0rcv |= handle_errors(dd, estat);
 	}
 
 	if (istat & INFINIPATH_I_GPIO) {
@@ -1093,8 +1209,7 @@
 
 		gpiostatus = ipath_read_kreg32(
 			dd, dd->ipath_kregs->kr_gpio_status);
-		/* First the error-counter case.
-		 */
+		/* First the error-counter case. */
 		if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
 		    (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
 			/* want to clear the bits we see asserted. */
@@ -1156,7 +1271,6 @@
 					(u64) to_clear);
 		}
 	}
-	chk0rcv |= istat & port0rbits;
 
 	/*
 	 * Clear the interrupt bits we found set, unless they are receive
@@ -1169,22 +1283,25 @@
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
 
 	/*
-	 * handle port0 receive  before checking for pio buffers available,
-	 * since receives can overflow; piobuf waiters can afford a few
-	 * extra cycles, since they were waiting anyway, and user's waiting
-	 * for receive are at the bottom.
+	 * Handle kernel receive queues before checking for pio buffers
+	 * available since receives can overflow; piobuf waiters can afford
+	 * a few extra cycles, since they were waiting anyway, and user's
+	 * waiting for receive are at the bottom.
 	 */
-	if (chk0rcv) {
+	kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
+		(1ULL << dd->ipath_i_rcvurg_shift);
+	if (chk0rcv || (istat & kportrbits)) {
+		istat &= ~kportrbits;
 		ipath_kreceive(dd->ipath_pd[0]);
-		istat &= ~port0rbits;
 	}
 
-	if (istat & ((dd->ipath_i_rcvavail_mask <<
-		      INFINIPATH_I_RCVAVAIL_SHIFT)
-		     | (dd->ipath_i_rcvurg_mask <<
-			INFINIPATH_I_RCVURG_SHIFT)))
+	if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
+		     (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
 		handle_urcv(dd, istat);
 
+	if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
+		handle_sdma_intr(dd, istat);
+
 	if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
 		unsigned long flags;
 
@@ -1195,7 +1312,10 @@
 		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
-		handle_layer_pioavail(dd);
+		if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
+			handle_layer_pioavail(dd);
+		else
+			ipath_dbg("unexpected BUFAVAIL intr\n");
 	}
 
 	ret = IRQ_HANDLED;
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index ecf3f7f..5863cbe 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1,7 +1,7 @@
 #ifndef _IPATH_KERNEL_H
 #define _IPATH_KERNEL_H
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -42,6 +42,8 @@
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
 #include <asm/io.h>
 #include <rdma/ib_verbs.h>
 
@@ -175,9 +177,13 @@
 	u16 poll_type;
 	/* port rcvhdrq head offset */
 	u32 port_head;
+	/* receive packet sequence counter */
+	u32 port_seq_cnt;
 };
 
 struct sk_buff;
+struct ipath_sge_state;
+struct ipath_verbs_txreq;
 
 /*
  * control information for layered drivers
@@ -191,6 +197,40 @@
 	dma_addr_t phys;
 };
 
+struct ipath_sdma_txreq {
+	int                 flags;
+	int                 sg_count;
+	union {
+		struct scatterlist *sg;
+		void *map_addr;
+	};
+	void              (*callback)(void *, int);
+	void               *callback_cookie;
+	int                 callback_status;
+	u16                 start_idx;  /* sdma private */
+	u16                 next_descq_idx;  /* sdma private */
+	struct list_head    list;       /* sdma private */
+};
+
+struct ipath_sdma_desc {
+	__le64 qw[2];
+};
+
+#define IPATH_SDMA_TXREQ_F_USELARGEBUF  0x1
+#define IPATH_SDMA_TXREQ_F_HEADTOHOST   0x2
+#define IPATH_SDMA_TXREQ_F_INTREQ       0x4
+#define IPATH_SDMA_TXREQ_F_FREEBUF      0x8
+#define IPATH_SDMA_TXREQ_F_FREEDESC     0x10
+#define IPATH_SDMA_TXREQ_F_VL15         0x20
+
+#define IPATH_SDMA_TXREQ_S_OK        0
+#define IPATH_SDMA_TXREQ_S_SENDERROR 1
+#define IPATH_SDMA_TXREQ_S_ABORTED   2
+#define IPATH_SDMA_TXREQ_S_SHUTDOWN  3
+
+/* max dwords in small buffer packet */
+#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
+
 /*
  * Possible IB config parameters for ipath_f_get/set_ib_cfg()
  */
@@ -221,11 +261,6 @@
 	unsigned long ipath_physaddr;
 	/* base of memory alloced for ipath_kregbase, for free */
 	u64 *ipath_kregalloc;
-	/*
-	 * virtual address where port0 rcvhdrqtail updated for this unit.
-	 * only written to by the chip, not the driver.
-	 */
-	volatile __le64 *ipath_hdrqtailptr;
 	/* ipath_cfgports pointers */
 	struct ipath_portdata **ipath_pd;
 	/* sk_buffs used by port 0 eager receive queue */
@@ -283,6 +318,7 @@
 	/* per chip actions needed for IB Link up/down changes */
 	int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
 
+	unsigned ipath_lastegr_idx;
 	struct ipath_ibdev *verbs_dev;
 	struct timer_list verbs_timer;
 	/* total dwords sent (summed from counter) */
@@ -309,6 +345,7 @@
 	ipath_err_t ipath_lasthwerror;
 	/* errors masked because they occur too fast */
 	ipath_err_t ipath_maskederrs;
+	u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
 	/* time in jiffies at which to re-enable maskederrs */
 	unsigned long ipath_unmasktime;
 	/* count of egrfull errors, combined for all ports */
@@ -347,6 +384,7 @@
 	u32 ipath_lastrpkts;
 	/* pio bufs allocated per port */
 	u32 ipath_pbufsport;
+	u32 ipath_pioupd_thresh; /* update threshold, some chips */
 	/*
 	 * number of ports configured as max; zero is set to number chip
 	 * supports, less gives more pio bufs/port, etc.
@@ -365,6 +403,7 @@
 	 * get to multiple devices
 	 */
 	u32 ipath_lastpioindex;
+	u32 ipath_lastpioindexl;
 	/* max length of freezemsg */
 	u32 ipath_freezelen;
 	/*
@@ -381,6 +420,15 @@
 	u32 ipath_pcibar0;
 	/* so we can rewrite it after a chip reset */
 	u32 ipath_pcibar1;
+	u32 ipath_x1_fix_tries;
+	u32 ipath_autoneg_tries;
+	u32 serdes_first_init_done;
+
+	struct ipath_relock {
+		atomic_t ipath_relock_timer_active;
+		struct timer_list ipath_relock_timer;
+		unsigned int ipath_relock_interval; /* in jiffies */
+	} ipath_relock_singleton;
 
 	/* interrupt number */
 	int ipath_irq;
@@ -403,7 +451,7 @@
 	u64 __iomem *ipath_egrtidbase;
 	/* lock to workaround chip bug 9437 and others */
 	spinlock_t ipath_kernel_tid_lock;
-	spinlock_t ipath_tid_lock;
+	spinlock_t ipath_user_tid_lock;
 	spinlock_t ipath_sendctrl_lock;
 
 	/*
@@ -422,11 +470,48 @@
 	struct class_device *diag_class_dev;
 	/* timer used to prevent stats overflow, error throttling, etc. */
 	struct timer_list ipath_stats_timer;
+	/* timer to verify interrupts work, and fallback if possible */
+	struct timer_list ipath_intrchk_timer;
 	void *ipath_dummy_hdrq;	/* used after port close */
 	dma_addr_t ipath_dummy_hdrq_phys;
 
+	/* SendDMA related entries */
+	spinlock_t            ipath_sdma_lock;
+	u64                   ipath_sdma_status;
+	unsigned long         ipath_sdma_abort_jiffies;
+	unsigned long         ipath_sdma_abort_intr_timeout;
+	unsigned long         ipath_sdma_buf_jiffies;
+	struct ipath_sdma_desc *ipath_sdma_descq;
+	u64		      ipath_sdma_descq_added;
+	u64		      ipath_sdma_descq_removed;
+	int		      ipath_sdma_desc_nreserved;
+	u16                   ipath_sdma_descq_cnt;
+	u16                   ipath_sdma_descq_tail;
+	u16                   ipath_sdma_descq_head;
+	u16                   ipath_sdma_next_intr;
+	u16                   ipath_sdma_reset_wait;
+	u8                    ipath_sdma_generation;
+	struct tasklet_struct ipath_sdma_abort_task;
+	struct tasklet_struct ipath_sdma_notify_task;
+	struct list_head      ipath_sdma_activelist;
+	struct list_head      ipath_sdma_notifylist;
+	atomic_t              ipath_sdma_vl15_count;
+	struct timer_list     ipath_sdma_vl15_timer;
+
+	dma_addr_t       ipath_sdma_descq_phys;
+	volatile __le64 *ipath_sdma_head_dma;
+	dma_addr_t       ipath_sdma_head_phys;
+
 	unsigned long ipath_ureg_align; /* user register alignment */
 
+	struct delayed_work ipath_autoneg_work;
+	wait_queue_head_t ipath_autoneg_wait;
+
+	/* HoL blocking / user app forward-progress state */
+	unsigned          ipath_hol_state;
+	unsigned          ipath_hol_next;
+	struct timer_list ipath_hol_timer;
+
 	/*
 	 * Shadow copies of registers; size indicates read access size.
 	 * Most of them are readonly, but some are write-only register,
@@ -447,6 +532,8 @@
 	 * init time.
 	 */
 	unsigned long ipath_pioavailshadow[8];
+	/* bitmap of send buffers available for the kernel to use with PIO. */
+	unsigned long ipath_pioavailkernel[8];
 	/* shadow of kr_gpio_out, for rmw ops */
 	u64 ipath_gpio_out;
 	/* shadow the gpio mask register */
@@ -472,6 +559,8 @@
 	u64 ipath_intconfig;
 	/* kr_sendpiobufbase value */
 	u64 ipath_piobufbase;
+	/* kr_ibcddrctrl shadow */
+	u64 ipath_ibcddrctrl;
 
 	/* these are the "32 bit" regs */
 
@@ -488,7 +577,10 @@
 	unsigned long ipath_rcvctrl;
 	/* shadow kr_sendctrl */
 	unsigned long ipath_sendctrl;
-	unsigned long ipath_lastcancel; /* to not count armlaunch after cancel */
+	/* to not count armlaunch after cancel */
+	unsigned long ipath_lastcancel;
+	/* count cases where special trigger was needed (double write) */
+	unsigned long ipath_spectriggerhit;
 
 	/* value we put in kr_rcvhdrcnt */
 	u32 ipath_rcvhdrcnt;
@@ -510,6 +602,7 @@
 	u32 ipath_piobcnt4k;
 	/* size in bytes of "4KB" PIO buffers */
 	u32 ipath_piosize4k;
+	u32 ipath_pioreserved; /* reserved special-inkernel; */
 	/* kr_rcvegrbase value */
 	u32 ipath_rcvegrbase;
 	/* kr_rcvegrcnt value */
@@ -546,10 +639,10 @@
 	u32 ipath_init_ibmaxlen;
 	/* size of each rcvegrbuffer */
 	u32 ipath_rcvegrbufsize;
-	/* width (2,4,8,16,32) from HT config reg */
-	u32 ipath_htwidth;
-	/* HT speed (200,400,800,1000) from HT config */
-	u32 ipath_htspeed;
+	/* localbus width (1, 2,4,8,16,32) from config space  */
+	u32 ipath_lbus_width;
+	/* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
+	u32 ipath_lbus_speed;
 	/*
 	 * number of sequential ibcstatus change for polling active/quiet
 	 * (i.e., link not coming up).
@@ -573,21 +666,14 @@
 	 */
 	u8 ipath_serial[16];
 	/* human readable board version */
-	u8 ipath_boardversion[80];
+	u8 ipath_boardversion[96];
+	u8 ipath_lbus_info[32]; /* human readable localbus info */
 	/* chip major rev, from ipath_revision */
 	u8 ipath_majrev;
 	/* chip minor rev, from ipath_revision */
 	u8 ipath_minrev;
 	/* board rev, from ipath_revision */
 	u8 ipath_boardrev;
-
-	u8 ipath_r_portenable_shift;
-	u8 ipath_r_intravail_shift;
-	u8 ipath_r_tailupd_shift;
-	u8 ipath_r_portcfg_shift;
-
-	/* unit # of this chip, if present */
-	int ipath_unit;
 	/* saved for restore after reset */
 	u8 ipath_pci_cacheline;
 	/* LID mask control */
@@ -603,6 +689,14 @@
 	/* Rx Polarity inversion (compensate for ~tx on partner) */
 	u8 ipath_rx_pol_inv;
 
+	u8 ipath_r_portenable_shift;
+	u8 ipath_r_intravail_shift;
+	u8 ipath_r_tailupd_shift;
+	u8 ipath_r_portcfg_shift;
+
+	/* unit # of this chip, if present */
+	int ipath_unit;
+
 	/* local link integrity counter */
 	u32 ipath_lli_counter;
 	/* local link integrity errors */
@@ -617,9 +711,6 @@
 	u32 ipath_overrun_thresh_errs;
 	u32 ipath_lli_errs;
 
-	/* status check work */
-	struct delayed_work status_work;
-
 	/*
 	 * Not all devices managed by a driver instance are the same
 	 * type, so these fields must be per-device.
@@ -632,8 +723,8 @@
 	 * Below should be computable from number of ports,
 	 * since they are never modified.
 	 */
-	u32 ipath_i_rcvavail_mask;
-	u32 ipath_i_rcvurg_mask;
+	u64 ipath_i_rcvavail_mask;
+	u64 ipath_i_rcvurg_mask;
 	u16 ipath_i_rcvurg_shift;
 	u16 ipath_i_rcvavail_shift;
 
@@ -641,8 +732,9 @@
 	 * Register bits for selecting i2c direction and values, used for
 	 * I2C serial flash.
 	 */
-	u16 ipath_gpio_sda_num;
-	u16 ipath_gpio_scl_num;
+	u8 ipath_gpio_sda_num;
+	u8 ipath_gpio_scl_num;
+	u8 ipath_i2c_chain_type;
 	u64 ipath_gpio_sda;
 	u64 ipath_gpio_scl;
 
@@ -703,13 +795,51 @@
 	/* interrupt mitigation reload register info */
 	u16 ipath_jint_idle_ticks;	/* idle clock ticks */
 	u16 ipath_jint_max_packets;	/* max packets across all ports */
+
+	/*
+	 * lock for access to SerDes, and flags to sequence preset
+	 * versus steady-state. 7220-only at the moment.
+	 */
+	spinlock_t ipath_sdepb_lock;
+	u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
 };
 
+/* ipath_hol_state values (stopping/starting user proc, send flushing) */
+#define IPATH_HOL_UP       0
+#define IPATH_HOL_DOWN     1
+/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
+#define IPATH_HOL_DOWNSTOP 0
+#define IPATH_HOL_DOWNCONT 1
+
+/* bit positions for sdma_status */
+#define IPATH_SDMA_ABORTING  0
+#define IPATH_SDMA_DISARMED  1
+#define IPATH_SDMA_DISABLED  2
+#define IPATH_SDMA_LAYERBUF  3
+#define IPATH_SDMA_RUNNING  62
+#define IPATH_SDMA_SHUTDOWN 63
+
+/* bit combinations that correspond to abort states */
+#define IPATH_SDMA_ABORT_NONE 0
+#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
+#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
+	(1UL << IPATH_SDMA_DISARMED))
+#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
+	(1UL << IPATH_SDMA_DISABLED))
+#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
+	(1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
+#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
+	(1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
+
+#define IPATH_SDMA_BUF_NONE 0
+#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
+
 /* Private data for file operations */
 struct ipath_filedata {
 	struct ipath_portdata *pd;
 	unsigned subport;
 	unsigned tidcursor;
+	struct ipath_user_sdma_queue *pq;
 };
 extern struct list_head ipath_dev_list;
 extern spinlock_t ipath_devs_lock;
@@ -718,7 +848,7 @@
 int ipath_init_chip(struct ipath_devdata *, int);
 int ipath_enable_wc(struct ipath_devdata *dd);
 void ipath_disable_wc(struct ipath_devdata *dd);
-int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp);
+int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
 void ipath_shutdown_device(struct ipath_devdata *);
 void ipath_clear_freeze(struct ipath_devdata *);
 
@@ -741,7 +871,8 @@
 extern int ipath_diag_inuse;
 
 irqreturn_t ipath_intr(int irq, void *devid);
-int ipath_decode_err(char *buf, size_t blen, ipath_err_t err);
+int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
+		     ipath_err_t err);
 #if __IPATH_INFO || __IPATH_DBG
 extern const char *ipath_ibcstatus_str[];
 #endif
@@ -774,6 +905,13 @@
 int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
 void ipath_enable_armlaunch(struct ipath_devdata *);
 void ipath_disable_armlaunch(struct ipath_devdata *);
+void ipath_hol_down(struct ipath_devdata *);
+void ipath_hol_up(struct ipath_devdata *);
+void ipath_hol_event(unsigned long);
+void ipath_toggle_rclkrls(struct ipath_devdata *);
+void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
+void ipath_set_relock_poll(struct ipath_devdata *, int);
+void ipath_shutdown_relock_poll(struct ipath_devdata *);
 
 /* for use in system calls, where we want to know device type, etc. */
 #define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
@@ -781,11 +919,15 @@
 	((struct ipath_filedata *)(fp)->private_data)->subport
 #define tidcursor_fp(fp) \
 	((struct ipath_filedata *)(fp)->private_data)->tidcursor
+#define user_sdma_queue_fp(fp) \
+	((struct ipath_filedata *)(fp)->private_data)->pq
 
 /*
  * values for ipath_flags
  */
-/* The chip is up and initted */
+		/* chip can report link latency (IB 1.2) */
+#define IPATH_HAS_LINK_LATENCY 0x1
+		/* The chip is up and initted */
 #define IPATH_INITTED       0x2
 		/* set if any user code has set kr_rcvhdrsize */
 #define IPATH_RCVHDRSZ_SET  0x4
@@ -809,6 +951,8 @@
 #define IPATH_LINKUNK       0x400
 		/* Write combining flush needed for PIO */
 #define IPATH_PIO_FLUSH_WC  0x1000
+		/* DMA Receive tail pointer */
+#define IPATH_NODMA_RTAIL   0x2000
 		/* no IB cable, or no device on IB cable */
 #define IPATH_NOCABLE       0x4000
 		/* Supports port zero per packet receive interrupts via
@@ -819,16 +963,26 @@
 		/* packet/word counters are 32 bit, else those 4 counters
 		 * are 64bit */
 #define IPATH_32BITCOUNTERS 0x20000
-		/* can miss port0 rx interrupts */
 		/* Interrupt register is 64 bits */
 #define IPATH_INTREG_64     0x40000
+		/* can miss port0 rx interrupts */
 #define IPATH_DISABLED      0x80000 /* administratively disabled */
 		/* Use GPIO interrupts for new counters */
 #define IPATH_GPIO_ERRINTRS 0x100000
 #define IPATH_SWAP_PIOBUFS  0x200000
+		/* Supports Send DMA */
+#define IPATH_HAS_SEND_DMA  0x400000
+		/* Supports Send Count (not just word count) in PBC */
+#define IPATH_HAS_PBC_CNT   0x800000
 		/* Suppress heartbeat, even if turning off loopback */
 #define IPATH_NO_HRTBT      0x1000000
+#define IPATH_HAS_THRESH_UPDATE 0x4000000
 #define IPATH_HAS_MULT_IB_SPEED 0x8000000
+#define IPATH_IB_AUTONEG_INPROG 0x10000000
+#define IPATH_IB_AUTONEG_FAILED 0x20000000
+		/* Linkdown-disable intentionally, Do not attempt to bring up */
+#define IPATH_IB_LINK_DISABLED 0x40000000
+#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
 
 /* Bits in GPIO for the added interrupts */
 #define IPATH_GPIO_PORT0_BIT 2
@@ -847,13 +1001,18 @@
 
 /* free up any allocated data at closes */
 void ipath_free_data(struct ipath_portdata *dd);
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
+u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
+void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
+				unsigned len, int avail);
+void ipath_init_iba7220_funcs(struct ipath_devdata *);
 void ipath_init_iba6120_funcs(struct ipath_devdata *);
 void ipath_init_iba6110_funcs(struct ipath_devdata *);
 void ipath_get_eeprom_info(struct ipath_devdata *);
 int ipath_update_eeprom_log(struct ipath_devdata *dd);
 void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
 u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
+void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
+void ipath_force_pio_avail_update(struct ipath_devdata *);
 void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
 
 /*
@@ -865,6 +1024,34 @@
 #define IPATH_LED_LOG 2  /* Logical (link) YELLOW LED */
 void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
 
+/* send dma routines */
+int setup_sdma(struct ipath_devdata *);
+void teardown_sdma(struct ipath_devdata *);
+void ipath_restart_sdma(struct ipath_devdata *);
+void ipath_sdma_intr(struct ipath_devdata *);
+int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
+			  u32, struct ipath_verbs_txreq *);
+/* ipath_sdma_lock should be locked before calling this. */
+int ipath_sdma_make_progress(struct ipath_devdata *dd);
+
+/* must be called under ipath_sdma_lock */
+static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
+{
+	return dd->ipath_sdma_descq_cnt -
+		(dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
+		1 - dd->ipath_sdma_desc_nreserved;
+}
+
+static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
+{
+	dd->ipath_sdma_desc_nreserved += cnt;
+}
+
+static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
+{
+	dd->ipath_sdma_desc_nreserved -= cnt;
+}
+
 /*
  * number of words used for protocol header if not set by ipath_userinit();
  */
@@ -875,6 +1062,8 @@
 void ipath_release_user_pages_on_close(struct page **, size_t);
 int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
 int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
+int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
+int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
 
 /* these are used for the registers that vary with port */
 void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
@@ -891,8 +1080,7 @@
 
 /*
  * At the moment, none of the s-registers are writable, so no
- * ipath_write_sreg(), and none of the c-registers are writable, so no
- * ipath_write_creg().
+ * ipath_write_sreg().
  */
 
 /**
@@ -1001,6 +1189,27 @@
 				pd->port_rcvhdrtail_kvaddr));
 }
 
+static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
+{
+	const struct ipath_devdata *dd = pd->port_dd;
+	u32 hdrqtail;
+
+	if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
+		__le32 *rhf_addr;
+		u32 seq;
+
+		rhf_addr = (__le32 *) pd->port_rcvhdrq +
+			pd->port_head + dd->ipath_rhf_offset;
+		seq = ipath_hdrget_seq(rhf_addr);
+		hdrqtail = pd->port_head;
+		if (seq == pd->port_seq_cnt)
+			hdrqtail++;
+	} else
+		hdrqtail = ipath_get_rcvhdrtail(pd);
+
+	return hdrqtail;
+}
+
 static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
 {
 	return (dd->ipath_flags & IPATH_INTREG_64) ?
@@ -1029,6 +1238,21 @@
 }
 
 /*
+ * from contents of IBCStatus (or a saved copy), return logical link state
+ * combination of link state and linktraining state (down, active, init,
+ * arm, etc.
+ */
+static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
+{
+	u32 ibs;
+	ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
+		dd->ibcs_lts_mask;
+	ibs |= (u32)(ibcs &
+		(INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
+	return ibs;
+}
+
+/*
  * sysfs interface.
  */
 
@@ -1053,6 +1277,7 @@
 dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
 			  size_t, int);
 dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
+const char *ipath_get_unit_name(int unit);
 
 /*
  * Flush write combining store buffers (if present) and perform a write
@@ -1065,11 +1290,8 @@
 #endif
 
 extern unsigned ipath_debug; /* debugging bit mask */
-
-#define IPATH_MAX_PARITY_ATTEMPTS 10000 /* max times to try recovery */
-
-const char *ipath_get_unit_name(int unit);
-
+extern unsigned ipath_linkrecovery;
+extern unsigned ipath_mtu4096;
 extern struct mutex ipath_mutex;
 
 #define IPATH_DRV_NAME		"ib_ipath"
@@ -1096,7 +1318,7 @@
 
 # define __IPATH_DBG_WHICH(which,fmt,...) \
 	do { \
-		if(unlikely(ipath_debug&(which))) \
+		if (unlikely(ipath_debug & (which))) \
 			printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
 			       __func__,##__VA_ARGS__); \
 	} while(0)
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index b34b91d..1ff46ae 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -146,6 +146,15 @@
 	return reply(smp);
 }
 
+static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
+{
+	(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
+}
+
+static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
+{
+	(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
+}
 
 static int get_overrunthreshold(struct ipath_devdata *dd)
 {
@@ -226,6 +235,7 @@
 				  struct ib_device *ibdev, u8 port)
 {
 	struct ipath_ibdev *dev;
+	struct ipath_devdata *dd;
 	struct ib_port_info *pip = (struct ib_port_info *)smp->data;
 	u16 lid;
 	u8 ibcstat;
@@ -239,6 +249,7 @@
 	}
 
 	dev = to_idev(ibdev);
+	dd = dev->dd;
 
 	/* Clear all fields.  Only set the non-zero fields. */
 	memset(smp->data, 0, sizeof(smp->data));
@@ -248,25 +259,28 @@
 	    dev->mkeyprot == 0)
 		pip->mkey = dev->mkey;
 	pip->gid_prefix = dev->gid_prefix;
-	lid = dev->dd->ipath_lid;
+	lid = dd->ipath_lid;
 	pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
 	pip->sm_lid = cpu_to_be16(dev->sm_lid);
 	pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
 	/* pip->diag_code; */
 	pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
 	pip->local_port_num = port;
-	pip->link_width_enabled = dev->link_width_enabled;
-	pip->link_width_supported = 3;	/* 1x or 4x */
-	pip->link_width_active = 2;	/* 4x */
-	pip->linkspeed_portstate = 0x10;	/* 2.5Gbps */
-	ibcstat = dev->dd->ipath_lastibcstat;
-	pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1;
+	pip->link_width_enabled = dd->ipath_link_width_enabled;
+	pip->link_width_supported = dd->ipath_link_width_supported;
+	pip->link_width_active = dd->ipath_link_width_active;
+	pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
+	ibcstat = dd->ipath_lastibcstat;
+	/* map LinkState to IB portinfo values.  */
+	pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
+
 	pip->portphysstate_linkdown =
-		(ipath_cvt_physportstate[ibcstat & 0xf] << 4) |
-		(get_linkdowndefaultstate(dev->dd) ? 1 : 2);
-	pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dev->dd->ipath_lmc;
-	pip->linkspeedactive_enabled = 0x11;	/* 2.5Gbps, 2.5Gbps */
-	switch (dev->dd->ipath_ibmtu) {
+		(ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
+		(get_linkdowndefaultstate(dd) ? 1 : 2);
+	pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
+	pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
+		dd->ipath_link_speed_enabled;
+	switch (dd->ipath_ibmtu) {
 	case 4096:
 		mtu = IB_MTU_4096;
 		break;
@@ -292,19 +306,15 @@
 	/* pip->vl_arb_high_cap; // only one VL */
 	/* pip->vl_arb_low_cap; // only one VL */
 	/* InitTypeReply = 0 */
-	/*
-	 * Note: the chips support a maximum MTU of 4096, but the driver
-	 * hasn't implemented this feature yet, so set the maximum value
-	 * to 2048.
-	 */
-	pip->inittypereply_mtucap = IB_MTU_2048;
-	// HCAs ignore VLStallCount and HOQLife
+	/* our mtu cap depends on whether 4K MTU enabled or not */
+	pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
+	/* HCAs ignore VLStallCount and HOQLife */
 	/* pip->vlstallcnt_hoqlife; */
 	pip->operationalvl_pei_peo_fpi_fpo = 0x10;	/* OVLs = 1 */
 	pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
 	/* P_KeyViolations are counted by hardware. */
 	pip->pkey_violations =
-		cpu_to_be16((ipath_get_cr_errpkey(dev->dd) -
+		cpu_to_be16((ipath_get_cr_errpkey(dd) -
 			     dev->z_pkey_violations) & 0xFFFF);
 	pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
 	/* Only the hardware GUID is supported for now */
@@ -313,10 +323,17 @@
 	/* 32.768 usec. response time (guessing) */
 	pip->resv_resptimevalue = 3;
 	pip->localphyerrors_overrunerrors =
-		(get_phyerrthreshold(dev->dd) << 4) |
-		get_overrunthreshold(dev->dd);
+		(get_phyerrthreshold(dd) << 4) |
+		get_overrunthreshold(dd);
 	/* pip->max_credit_hint; */
-	/* pip->link_roundtrip_latency[3]; */
+	if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
+		u32 v;
+
+		v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
+		pip->link_roundtrip_latency[0] = v >> 16;
+		pip->link_roundtrip_latency[1] = v >> 8;
+		pip->link_roundtrip_latency[2] = v;
+	}
 
 	ret = reply(smp);
 
@@ -444,19 +461,25 @@
 		ib_dispatch_event(&event);
 	}
 
-	/* Only 4x supported but allow 1x or 4x to be set (see 14.2.6.6). */
+	/* Allow 1x or 4x to be set (see 14.2.6.6). */
 	lwe = pip->link_width_enabled;
-	if ((lwe >= 4 && lwe <= 8) || (lwe >= 0xC && lwe <= 0xFE))
-		goto err;
-	if (lwe == 0xFF)
-		dev->link_width_enabled = 3;	/* 1x or 4x */
-	else if (lwe)
-		dev->link_width_enabled = lwe;
+	if (lwe) {
+		if (lwe == 0xFF)
+			lwe = dd->ipath_link_width_supported;
+		else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
+			goto err;
+		set_link_width_enabled(dd, lwe);
+	}
 
-	/* Only 2.5 Gbs supported. */
+	/* Allow 2.5 or 5.0 Gbs. */
 	lse = pip->linkspeedactive_enabled & 0xF;
-	if (lse >= 2 && lse <= 0xE)
-		goto err;
+	if (lse) {
+		if (lse == 15)
+			lse = dd->ipath_link_speed_supported;
+		else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
+			goto err;
+		set_link_speed_enabled(dd, lse);
+	}
 
 	/* Set link down default state. */
 	switch (pip->portphysstate_linkdown & 0xF) {
@@ -491,6 +514,8 @@
 		mtu = 2048;
 		break;
 	case IB_MTU_4096:
+		if (!ipath_mtu4096)
+			goto err;
 		mtu = 4096;
 		break;
 	default:
@@ -565,6 +590,10 @@
 		else
 			goto err;
 		ipath_set_linkstate(dd, lstate);
+		if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
+			ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+			goto done;
+		}
 		ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
 				IPATH_LINKACTIVE, 1000);
 		break;
@@ -948,10 +977,14 @@
 	 * nsec.  0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec.  Sample
 	 * intervals are counted in ticks.  Since we use Linux timers, that
 	 * count in jiffies, we can't sample for less than 1000 ticks if HZ
-	 * == 1000 (4000 ticks if HZ is 250).
+	 * == 1000 (4000 ticks if HZ is 250).  link_speed_active returns 2 for
+	 * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
+	 * have hardware support for delaying packets.
 	 */
-	/* XXX This is WRONG. */
-	p->tick = 250;		/* 1 usec. */
+	if (crp->cr_psstat)
+		p->tick = dev->dd->ipath_link_speed_active - 1;
+	else
+		p->tick = 250;		/* 1 usec. */
 	p->counter_width = 4;	/* 32 bit counters */
 	p->counter_mask0_9 = COUNTER_MASK0_9;
 	spin_lock_irqsave(&dev->pending_lock, flags);
@@ -1364,7 +1397,8 @@
 	}
 
 	/* Is the mkey in the process of expiring? */
-	if (dev->mkey_lease_timeout && jiffies >= dev->mkey_lease_timeout) {
+	if (dev->mkey_lease_timeout &&
+	    time_after_eq(jiffies, dev->mkey_lease_timeout)) {
 		/* Clear timeout and mkey protection field. */
 		dev->mkey_lease_timeout = 0;
 		dev->mkeyprot = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 087ed31..dd5b6e9 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -340,6 +340,7 @@
 	qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
 	qp->s_hdrwords = 0;
 	qp->s_wqe = NULL;
+	qp->s_pkt_delay = 0;
 	qp->s_psn = 0;
 	qp->r_psn = 0;
 	qp->r_msn = 0;
@@ -392,7 +393,6 @@
 		  qp->ibqp.qp_num, qp->remote_qpn, err);
 
 	spin_lock(&dev->pending_lock);
-	/* XXX What if its already removed by the timeout code? */
 	if (!list_empty(&qp->timerwait))
 		list_del_init(&qp->timerwait);
 	if (!list_empty(&qp->piowait))
@@ -516,13 +516,13 @@
 			goto inval;
 
 	/*
-	 * Note: the chips support a maximum MTU of 4096, but the driver
-	 * hasn't implemented this feature yet, so don't allow Path MTU
-	 * values greater than 2048.
+	 * don't allow invalid Path MTU values or greater than 2048
+	 * unless we are configured for a 4KB MTU
 	 */
-	if (attr_mask & IB_QP_PATH_MTU)
-		if (attr->path_mtu > IB_MTU_2048)
-			goto inval;
+	if ((attr_mask & IB_QP_PATH_MTU) &&
+		(ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
+		(attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
+		goto inval;
 
 	if (attr_mask & IB_QP_PATH_MIG_STATE)
 		if (attr->path_mig_state != IB_MIG_MIGRATED &&
@@ -564,8 +564,10 @@
 	if (attr_mask & IB_QP_ACCESS_FLAGS)
 		qp->qp_access_flags = attr->qp_access_flags;
 
-	if (attr_mask & IB_QP_AV)
+	if (attr_mask & IB_QP_AV) {
 		qp->remote_ah_attr = attr->ah_attr;
+		qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
+	}
 
 	if (attr_mask & IB_QP_PATH_MTU)
 		qp->path_mtu = attr->path_mtu;
@@ -748,22 +750,33 @@
 	size_t sz;
 	struct ib_qp *ret;
 
-	if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
-	    init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
-	    init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
-	    init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
-		ret = ERR_PTR(-ENOMEM);
+	if (init_attr->create_flags) {
+		ret = ERR_PTR(-EINVAL);
 		goto bail;
 	}
 
-	if (init_attr->cap.max_send_sge +
-	    init_attr->cap.max_recv_sge +
-	    init_attr->cap.max_send_wr +
-	    init_attr->cap.max_recv_wr == 0) {
+	if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
+	    init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
 		ret = ERR_PTR(-EINVAL);
 		goto bail;
 	}
 
+	/* Check receive queue parameters if no SRQ is specified. */
+	if (!init_attr->srq) {
+		if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
+		    init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
+			ret = ERR_PTR(-EINVAL);
+			goto bail;
+		}
+		if (init_attr->cap.max_send_sge +
+		    init_attr->cap.max_send_wr +
+		    init_attr->cap.max_recv_sge +
+		    init_attr->cap.max_recv_wr == 0) {
+			ret = ERR_PTR(-EINVAL);
+			goto bail;
+		}
+	}
+
 	switch (init_attr->qp_type) {
 	case IB_QPT_UC:
 	case IB_QPT_RC:
@@ -840,6 +853,7 @@
 			goto bail_qp;
 		}
 		qp->ip = NULL;
+		qp->s_tx = NULL;
 		ipath_reset_qp(qp, init_attr->qp_type);
 		break;
 
@@ -945,12 +959,20 @@
 	/* Stop the sending tasklet. */
 	tasklet_kill(&qp->s_task);
 
+	if (qp->s_tx) {
+		atomic_dec(&qp->refcount);
+		if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
+			kfree(qp->s_tx->txreq.map_addr);
+	}
+
 	/* Make sure the QP isn't on the timeout list. */
 	spin_lock_irqsave(&dev->pending_lock, flags);
 	if (!list_empty(&qp->timerwait))
 		list_del_init(&qp->timerwait);
 	if (!list_empty(&qp->piowait))
 		list_del_init(&qp->piowait);
+	if (qp->s_tx)
+		list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
 	spin_unlock_irqrestore(&dev->pending_lock, flags);
 
 	/*
@@ -1021,7 +1043,6 @@
 		  qp->ibqp.qp_num, qp->remote_qpn, wc->status);
 
 	spin_lock(&dev->pending_lock);
-	/* XXX What if its already removed by the timeout code? */
 	if (!list_empty(&qp->timerwait))
 		list_del_init(&qp->timerwait);
 	if (!list_empty(&qp->piowait))
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 40f3e37..c405dfb 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -31,6 +31,8 @@
  * SOFTWARE.
  */
 
+#include <linux/io.h>
+
 #include "ipath_verbs.h"
 #include "ipath_kernel.h"
 
@@ -306,7 +308,7 @@
 			else {
 				qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
 				/* Immediate data comes after the BTH */
-				ohdr->u.imm_data = wqe->wr.imm_data;
+				ohdr->u.imm_data = wqe->wr.ex.imm_data;
 				hwords += 1;
 			}
 			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -344,7 +346,7 @@
 				qp->s_state =
 					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
 				/* Immediate data comes after RETH */
-				ohdr->u.rc.imm_data = wqe->wr.imm_data;
+				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
 				hwords += 1;
 				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 					bth0 |= 1 << 23;
@@ -488,7 +490,7 @@
 		else {
 			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
 			/* Immediate data comes after the BTH */
-			ohdr->u.imm_data = wqe->wr.imm_data;
+			ohdr->u.imm_data = wqe->wr.ex.imm_data;
 			hwords += 1;
 		}
 		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -524,7 +526,7 @@
 		else {
 			qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
 			/* Immediate data comes after the BTH */
-			ohdr->u.imm_data = wqe->wr.imm_data;
+			ohdr->u.imm_data = wqe->wr.ex.imm_data;
 			hwords += 1;
 			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 				bth0 |= 1 << 23;
@@ -585,19 +587,39 @@
 static void send_rc_ack(struct ipath_qp *qp)
 {
 	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+	struct ipath_devdata *dd;
 	u16 lrh0;
 	u32 bth0;
 	u32 hwords;
+	u32 __iomem *piobuf;
 	struct ipath_ib_header hdr;
 	struct ipath_other_headers *ohdr;
 	unsigned long flags;
 
+	spin_lock_irqsave(&qp->s_lock, flags);
+
 	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
 	if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
 	    (qp->s_flags & IPATH_S_ACK_PENDING) ||
 	    qp->s_ack_state != OP(ACKNOWLEDGE))
 		goto queue_ack;
 
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+
+	dd = dev->dd;
+	piobuf = ipath_getpiobuf(dd, 0, NULL);
+	if (!piobuf) {
+		/*
+		 * We are out of PIO buffers at the moment.
+		 * Pass responsibility for sending the ACK to the
+		 * send tasklet so that when a PIO buffer becomes
+		 * available, the ACK is sent ahead of other outgoing
+		 * packets.
+		 */
+		spin_lock_irqsave(&qp->s_lock, flags);
+		goto queue_ack;
+	}
+
 	/* Construct the header. */
 	ohdr = &hdr.u.oth;
 	lrh0 = IPATH_LRH_BTH;
@@ -611,7 +633,7 @@
 		lrh0 = IPATH_LRH_GRH;
 	}
 	/* read pkey_index w/o lock (its atomic) */
-	bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) |
+	bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
 		(OP(ACKNOWLEDGE) << 24) | (1 << 22);
 	if (qp->r_nak_state)
 		ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
@@ -623,30 +645,29 @@
 	hdr.lrh[0] = cpu_to_be16(lrh0);
 	hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
 	hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
-	hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
+	hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
 	ohdr->bth[0] = cpu_to_be32(bth0);
 	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
 	ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
 
-	/*
-	 * If we can send the ACK, clear the ACK state.
-	 */
-	if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
-		dev->n_unicast_xmit++;
-		goto done;
-	}
+	writeq(hwords + 1, piobuf);
 
-	/*
-	 * We are out of PIO buffers at the moment.
-	 * Pass responsibility for sending the ACK to the
-	 * send tasklet so that when a PIO buffer becomes
-	 * available, the ACK is sent ahead of other outgoing
-	 * packets.
-	 */
-	dev->n_rc_qacks++;
+	if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
+		u32 *hdrp = (u32 *) &hdr;
+
+		ipath_flush_wc();
+		__iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
+		ipath_flush_wc();
+		__raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
+	} else
+		__iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
+
+	ipath_flush_wc();
+
+	dev->n_unicast_xmit++;
+	goto done;
 
 queue_ack:
-	spin_lock_irqsave(&qp->s_lock, flags);
 	dev->n_rc_qacks++;
 	qp->s_flags |= IPATH_S_ACK_PENDING;
 	qp->s_nak_state = qp->r_nak_state;
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 92ad73a..8f44d0c 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -63,67 +63,92 @@
 /* kr_control bits */
 #define INFINIPATH_C_FREEZEMODE 0x00000002
 #define INFINIPATH_C_LINKENABLE 0x00000004
-#define INFINIPATH_C_RESET 0x00000001
 
 /* kr_sendctrl bits */
 #define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
+#define INFINIPATH_S_UPDTHRESH_SHIFT 24
+#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
 
 #define IPATH_S_ABORT		0
 #define IPATH_S_PIOINTBUFAVAIL	1
 #define IPATH_S_PIOBUFAVAILUPD	2
 #define IPATH_S_PIOENABLE	3
+#define IPATH_S_SDMAINTENABLE	9
+#define IPATH_S_SDMASINGLEDESCRIPTOR	10
+#define IPATH_S_SDMAENABLE	11
+#define IPATH_S_SDMAHALT	12
 #define IPATH_S_DISARM		31
 
 #define INFINIPATH_S_ABORT		(1U << IPATH_S_ABORT)
 #define INFINIPATH_S_PIOINTBUFAVAIL	(1U << IPATH_S_PIOINTBUFAVAIL)
 #define INFINIPATH_S_PIOBUFAVAILUPD	(1U << IPATH_S_PIOBUFAVAILUPD)
 #define INFINIPATH_S_PIOENABLE		(1U << IPATH_S_PIOENABLE)
+#define INFINIPATH_S_SDMAINTENABLE	(1U << IPATH_S_SDMAINTENABLE)
+#define INFINIPATH_S_SDMASINGLEDESCRIPTOR \
+					(1U << IPATH_S_SDMASINGLEDESCRIPTOR)
+#define INFINIPATH_S_SDMAENABLE		(1U << IPATH_S_SDMAENABLE)
+#define INFINIPATH_S_SDMAHALT		(1U << IPATH_S_SDMAHALT)
 #define INFINIPATH_S_DISARM		(1U << IPATH_S_DISARM)
 
-/* kr_rcvctrl bits */
+/* kr_rcvctrl bits that are the same on multiple chips */
 #define INFINIPATH_R_PORTENABLE_SHIFT 0
 #define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
 
 /* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_SHIFT 0
-#define INFINIPATH_I_RCVAVAIL_SHIFT 12
-#define INFINIPATH_I_ERROR        0x80000000
-#define INFINIPATH_I_SPIOSENT     0x40000000
-#define INFINIPATH_I_SPIOBUFAVAIL 0x20000000
-#define INFINIPATH_I_GPIO         0x10000000
+#define INFINIPATH_I_SDMAINT		0x8000000000000000ULL
+#define INFINIPATH_I_SDMADISABLED	0x4000000000000000ULL
+#define INFINIPATH_I_ERROR		0x0000000080000000ULL
+#define INFINIPATH_I_SPIOSENT		0x0000000040000000ULL
+#define INFINIPATH_I_SPIOBUFAVAIL	0x0000000020000000ULL
+#define INFINIPATH_I_GPIO		0x0000000010000000ULL
+#define INFINIPATH_I_JINT		0x0000000004000000ULL
 
 /* kr_errorstatus, kr_errorclear, kr_errormask bits */
-#define INFINIPATH_E_RFORMATERR      0x0000000000000001ULL
-#define INFINIPATH_E_RVCRC           0x0000000000000002ULL
-#define INFINIPATH_E_RICRC           0x0000000000000004ULL
-#define INFINIPATH_E_RMINPKTLEN      0x0000000000000008ULL
-#define INFINIPATH_E_RMAXPKTLEN      0x0000000000000010ULL
-#define INFINIPATH_E_RLONGPKTLEN     0x0000000000000020ULL
-#define INFINIPATH_E_RSHORTPKTLEN    0x0000000000000040ULL
-#define INFINIPATH_E_RUNEXPCHAR      0x0000000000000080ULL
-#define INFINIPATH_E_RUNSUPVL        0x0000000000000100ULL
-#define INFINIPATH_E_REBP            0x0000000000000200ULL
-#define INFINIPATH_E_RIBFLOW         0x0000000000000400ULL
-#define INFINIPATH_E_RBADVERSION     0x0000000000000800ULL
-#define INFINIPATH_E_RRCVEGRFULL     0x0000000000001000ULL
-#define INFINIPATH_E_RRCVHDRFULL     0x0000000000002000ULL
-#define INFINIPATH_E_RBADTID         0x0000000000004000ULL
-#define INFINIPATH_E_RHDRLEN         0x0000000000008000ULL
-#define INFINIPATH_E_RHDR            0x0000000000010000ULL
-#define INFINIPATH_E_RIBLOSTLINK     0x0000000000020000ULL
-#define INFINIPATH_E_SMINPKTLEN      0x0000000020000000ULL
-#define INFINIPATH_E_SMAXPKTLEN      0x0000000040000000ULL
-#define INFINIPATH_E_SUNDERRUN       0x0000000080000000ULL
-#define INFINIPATH_E_SPKTLEN         0x0000000100000000ULL
-#define INFINIPATH_E_SDROPPEDSMPPKT  0x0000000200000000ULL
-#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL
-#define INFINIPATH_E_SPIOARMLAUNCH   0x0000000800000000ULL
-#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL
-#define INFINIPATH_E_SUNSUPVL        0x0000002000000000ULL
-#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL
-#define INFINIPATH_E_INVALIDADDR     0x0002000000000000ULL
-#define INFINIPATH_E_RESET           0x0004000000000000ULL
-#define INFINIPATH_E_HARDWARE        0x0008000000000000ULL
+#define INFINIPATH_E_RFORMATERR			0x0000000000000001ULL
+#define INFINIPATH_E_RVCRC			0x0000000000000002ULL
+#define INFINIPATH_E_RICRC			0x0000000000000004ULL
+#define INFINIPATH_E_RMINPKTLEN			0x0000000000000008ULL
+#define INFINIPATH_E_RMAXPKTLEN			0x0000000000000010ULL
+#define INFINIPATH_E_RLONGPKTLEN		0x0000000000000020ULL
+#define INFINIPATH_E_RSHORTPKTLEN		0x0000000000000040ULL
+#define INFINIPATH_E_RUNEXPCHAR			0x0000000000000080ULL
+#define INFINIPATH_E_RUNSUPVL			0x0000000000000100ULL
+#define INFINIPATH_E_REBP			0x0000000000000200ULL
+#define INFINIPATH_E_RIBFLOW			0x0000000000000400ULL
+#define INFINIPATH_E_RBADVERSION		0x0000000000000800ULL
+#define INFINIPATH_E_RRCVEGRFULL		0x0000000000001000ULL
+#define INFINIPATH_E_RRCVHDRFULL		0x0000000000002000ULL
+#define INFINIPATH_E_RBADTID			0x0000000000004000ULL
+#define INFINIPATH_E_RHDRLEN			0x0000000000008000ULL
+#define INFINIPATH_E_RHDR			0x0000000000010000ULL
+#define INFINIPATH_E_RIBLOSTLINK		0x0000000000020000ULL
+#define INFINIPATH_E_SENDSPECIALTRIGGER		0x0000000008000000ULL
+#define INFINIPATH_E_SDMADISABLED		0x0000000010000000ULL
+#define INFINIPATH_E_SMINPKTLEN			0x0000000020000000ULL
+#define INFINIPATH_E_SMAXPKTLEN			0x0000000040000000ULL
+#define INFINIPATH_E_SUNDERRUN			0x0000000080000000ULL
+#define INFINIPATH_E_SPKTLEN			0x0000000100000000ULL
+#define INFINIPATH_E_SDROPPEDSMPPKT		0x0000000200000000ULL
+#define INFINIPATH_E_SDROPPEDDATAPKT		0x0000000400000000ULL
+#define INFINIPATH_E_SPIOARMLAUNCH		0x0000000800000000ULL
+#define INFINIPATH_E_SUNEXPERRPKTNUM		0x0000001000000000ULL
+#define INFINIPATH_E_SUNSUPVL			0x0000002000000000ULL
+#define INFINIPATH_E_SENDBUFMISUSE		0x0000004000000000ULL
+#define INFINIPATH_E_SDMAGENMISMATCH		0x0000008000000000ULL
+#define INFINIPATH_E_SDMAOUTOFBOUND		0x0000010000000000ULL
+#define INFINIPATH_E_SDMATAILOUTOFBOUND		0x0000020000000000ULL
+#define INFINIPATH_E_SDMABASE			0x0000040000000000ULL
+#define INFINIPATH_E_SDMA1STDESC		0x0000080000000000ULL
+#define INFINIPATH_E_SDMARPYTAG			0x0000100000000000ULL
+#define INFINIPATH_E_SDMADWEN			0x0000200000000000ULL
+#define INFINIPATH_E_SDMAMISSINGDW		0x0000400000000000ULL
+#define INFINIPATH_E_SDMAUNEXPDATA		0x0000800000000000ULL
+#define INFINIPATH_E_IBSTATUSCHANGED		0x0001000000000000ULL
+#define INFINIPATH_E_INVALIDADDR		0x0002000000000000ULL
+#define INFINIPATH_E_RESET			0x0004000000000000ULL
+#define INFINIPATH_E_HARDWARE			0x0008000000000000ULL
+#define INFINIPATH_E_SDMADESCADDRMISALIGN	0x0010000000000000ULL
+#define INFINIPATH_E_INVALIDEEPCMD		0x0020000000000000ULL
 
 /*
  * this is used to print "common" packet errors only when the
@@ -134,6 +159,17 @@
 		| INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
 		| INFINIPATH_E_REBP )
 
+/* Convenience for decoding Send DMA errors */
+#define INFINIPATH_E_SDMAERRS ( \
+	INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | \
+	INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | \
+	INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | \
+	INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | \
+	INFINIPATH_E_SDMAUNEXPDATA | \
+	INFINIPATH_E_SDMADESCADDRMISALIGN | \
+	INFINIPATH_E_SDMADISABLED | \
+	INFINIPATH_E_SENDBUFMISUSE)
+
 /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
 /* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
  * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2:  expTID, 3: eagerTID
@@ -158,7 +194,7 @@
 #define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO  0x40ULL
 /* waldo specific -- find the rest in ipath_6110.c */
 #define INFINIPATH_HWE_RXDSYNCMEMPARITYERR  0x0000000400000000ULL
-/* monty specific -- find the rest in ipath_6120.c */
+/* 6120/7220 specific -- find the rest in ipath_6120.c and ipath_7220.c */
 #define INFINIPATH_HWE_MEMBISTFAILED	0x0040000000000000ULL
 
 /* kr_hwdiagctrl bits */
@@ -185,8 +221,8 @@
 #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
 #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
 #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
-#define INFINIPATH_IBCC_LINKCMD_DOWN 1	/* move to 0x11 */
-#define INFINIPATH_IBCC_LINKCMD_ARMED 2	/* move to 0x21 */
+#define INFINIPATH_IBCC_LINKCMD_DOWN 1		/* move to 0x11 */
+#define INFINIPATH_IBCC_LINKCMD_ARMED 2		/* move to 0x21 */
 #define INFINIPATH_IBCC_LINKCMD_ACTIVE 3	/* move to 0x31 */
 #define INFINIPATH_IBCC_LINKCMD_SHIFT 18
 #define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
@@ -201,10 +237,9 @@
 #define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
 
 /* kr_ibcstatus bits */
-#define INFINIPATH_IBCS_LINKTRAININGSTATE_MASK 0xF
 #define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
 #define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
-#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4
+
 #define INFINIPATH_IBCS_TXREADY       0x40000000
 #define INFINIPATH_IBCS_TXCREDITOK    0x80000000
 /* link training states (shift by
@@ -222,30 +257,13 @@
 #define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN	0x0c
 #define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT	0x0e
 #define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE	0x0f
-/* link state machine states (shift by INFINIPATH_IBCS_LINKSTATE_SHIFT) */
+/* link state machine states (shift by ibcs_ls_shift) */
 #define INFINIPATH_IBCS_L_STATE_DOWN		0x0
 #define INFINIPATH_IBCS_L_STATE_INIT		0x1
 #define INFINIPATH_IBCS_L_STATE_ARM		0x2
 #define INFINIPATH_IBCS_L_STATE_ACTIVE		0x3
 #define INFINIPATH_IBCS_L_STATE_ACT_DEFER	0x4
 
-/* combination link status states that we use with some frequency */
-#define IPATH_IBSTATE_MASK ((INFINIPATH_IBCS_LINKTRAININGSTATE_MASK \
-		<< INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | \
-		(INFINIPATH_IBCS_LINKSTATE_MASK \
-		<<INFINIPATH_IBCS_LINKSTATE_SHIFT))
-#define IPATH_IBSTATE_INIT ((INFINIPATH_IBCS_L_STATE_INIT \
-		<< INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
-		(INFINIPATH_IBCS_LT_STATE_LINKUP \
-		<<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
-#define IPATH_IBSTATE_ARM ((INFINIPATH_IBCS_L_STATE_ARM \
-		<< INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
-		(INFINIPATH_IBCS_LT_STATE_LINKUP \
-		<<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
-#define IPATH_IBSTATE_ACTIVE ((INFINIPATH_IBCS_L_STATE_ACTIVE \
-		<< INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
-		(INFINIPATH_IBCS_LT_STATE_LINKUP \
-		<<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
 
 /* kr_extstatus bits */
 #define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
@@ -286,8 +304,7 @@
 /* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
 #define INFINIPATH_SERDC0_L1PWR_DN	 0xF0ULL
 
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET          0x7ULL
+/* common kr_xgxsconfig bits (or safe in all, even if not implemented) */
 #define INFINIPATH_XGXS_RX_POL_SHIFT 19
 #define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
 
@@ -417,6 +434,29 @@
 	ipath_kreg kr_pcieq1serdesconfig0;
 	ipath_kreg kr_pcieq1serdesconfig1;
 	ipath_kreg kr_pcieq1serdesstatus;
+	ipath_kreg kr_hrtbt_guid;
+	ipath_kreg kr_ibcddrctrl;
+	ipath_kreg kr_ibcddrstatus;
+	ipath_kreg kr_jintreload;
+
+	/* send dma related regs */
+	ipath_kreg kr_senddmabase;
+	ipath_kreg kr_senddmalengen;
+	ipath_kreg kr_senddmatail;
+	ipath_kreg kr_senddmahead;
+	ipath_kreg kr_senddmaheadaddr;
+	ipath_kreg kr_senddmabufmask0;
+	ipath_kreg kr_senddmabufmask1;
+	ipath_kreg kr_senddmabufmask2;
+	ipath_kreg kr_senddmastatus;
+
+	/* SerDes related regs (IBA7220-only) */
+	ipath_kreg kr_ibserdesctrl;
+	ipath_kreg kr_ib_epbacc;
+	ipath_kreg kr_ib_epbtrans;
+	ipath_kreg kr_pcie_epbacc;
+	ipath_kreg kr_pcie_epbtrans;
+	ipath_kreg kr_ib_ddsrxeq;
 };
 
 struct ipath_cregs {
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index a59bdbd..8ac5c1d 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -310,7 +310,7 @@
 	switch (wqe->wr.opcode) {
 	case IB_WR_SEND_WITH_IMM:
 		wc.wc_flags = IB_WC_WITH_IMM;
-		wc.imm_data = wqe->wr.imm_data;
+		wc.imm_data = wqe->wr.ex.imm_data;
 		/* FALLTHROUGH */
 	case IB_WR_SEND:
 		if (!ipath_get_rwqe(qp, 0)) {
@@ -339,7 +339,7 @@
 			goto err;
 		}
 		wc.wc_flags = IB_WC_WITH_IMM;
-		wc.imm_data = wqe->wr.imm_data;
+		wc.imm_data = wqe->wr.ex.imm_data;
 		if (!ipath_get_rwqe(qp, 1))
 			goto rnr_nak;
 		/* FALLTHROUGH */
@@ -483,14 +483,16 @@
 
 static void want_buffer(struct ipath_devdata *dd)
 {
-	unsigned long flags;
+	if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) {
+		unsigned long flags;
 
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			 dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+		dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+				 dd->ipath_sendctrl);
+		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+	}
 }
 
 /**
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/ipath/ipath_sd7220.c
new file mode 100644
index 0000000..aa47eb5
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sd7220.c
@@ -0,0 +1,1462 @@
+/*
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the SerDes
+ * on the InfiniPath 7220 chip.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "ipath_kernel.h"
+#include "ipath_registers.h"
+#include "ipath_7220.h"
+
+/*
+ * The IBSerDesMappTable is a memory that holds values to be stored in
+ * various SerDes registers by IBC. It is not part of the normal kregs
+ * map and is used in exactly one place, hence the #define below.
+ */
+#define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t)))
+
+/*
+ * Below used for sdnum parameter, selecting one of the two sections
+ * used for PCIe, or the single SerDes used for IB.
+ */
+#define PCIE_SERDES0 0
+#define PCIE_SERDES1 1
+
+/*
+ * The EPB requires addressing in a particular form. EPB_LOC() is intended
+ * to make #definitions a little more readable.
+ */
+#define EPB_ADDR_SHF 8
+#define EPB_LOC(chn, elt, reg) \
+	(((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
+	 EPB_ADDR_SHF)
+#define EPB_IB_QUAD0_CS_SHF (25)
+#define EPB_IB_QUAD0_CS (1U <<  EPB_IB_QUAD0_CS_SHF)
+#define EPB_IB_UC_CS_SHF (26)
+#define EPB_PCIE_UC_CS_SHF (27)
+#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
+
+/* Forward declarations. */
+static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
+				u32 data, u32 mask);
+static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
+			     int mask);
+static int ipath_sd_trimdone_poll(struct ipath_devdata *dd);
+static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
+				      const char *where);
+static int ipath_sd_setvals(struct ipath_devdata *dd);
+static int ipath_sd_early(struct ipath_devdata *dd);
+static int ipath_sd_dactrim(struct ipath_devdata *dd);
+/* Set the registers that IBC may muck with to their default "preset" values */
+int ipath_sd7220_presets(struct ipath_devdata *dd);
+static int ipath_internal_presets(struct ipath_devdata *dd);
+/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
+static int ipath_sd_trimself(struct ipath_devdata *dd, int val);
+static int epb_access(struct ipath_devdata *dd, int sdnum, int claim);
+
+void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup);
+
+/*
+ * Below keeps track of whether the "once per power-on" initialization has
+ * been done, because uC code Version 1.32.17 or higher allows the uC to
+ * be reset at will, and Automatic Equalization may require it. So the
+ * state of the reset "pin", as reflected in was_reset parameter to
+ * ipath_sd7220_init() is no longer valid. Instead, we check for the
+ * actual uC code having been loaded.
+ */
+static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
+{
+	if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0))
+		dd->serdes_first_init_done = 1;
+	return dd->serdes_first_init_done;
+}
+
+/* repeat #define for local use. "Real" #define is in ipath_iba7220.c */
+#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR      0x0000004000000000ULL
+#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
+#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
+#define UC_PAR_CLR_D 8
+#define UC_PAR_CLR_M 0xC
+#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
+#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
+
+void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd)
+{
+	int ret;
+
+	/* clear, then re-enable parity errs */
+	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
+		UC_PAR_CLR_D, UC_PAR_CLR_M);
+	if (ret < 0) {
+		ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
+		goto bail;
+	}
+	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
+		UC_PAR_CLR_M);
+
+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	udelay(4);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+		INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+bail:
+	return;
+}
+
+/*
+ * After a reset or other unusual event, the epb interface may need
+ * to be re-synchronized, between the host and the uC.
+ * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
+ */
+#define IBSD_RESYNC_TRIES 3
+#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
+#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
+
+static int ipath_resync_ibepb(struct ipath_devdata *dd)
+{
+	int ret, pat, tries, chn;
+	u32 loc;
+
+	ret = -1;
+	chn = 0;
+	for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
+		loc = IB_PGUDP(chn);
+		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+		if (ret < 0) {
+			ipath_dev_err(dd, "Failed read in resync\n");
+			continue;
+		}
+		if (ret != 0xF0 && ret != 0x55 && tries == 0)
+			ipath_dev_err(dd, "unexpected pattern in resync\n");
+		pat = ret ^ 0xA5; /* alternate F0 and 55 */
+		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
+		if (ret < 0) {
+			ipath_dev_err(dd, "Failed write in resync\n");
+			continue;
+		}
+		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+		if (ret < 0) {
+			ipath_dev_err(dd, "Failed re-read in resync\n");
+			continue;
+		}
+		if (ret != pat) {
+			ipath_dev_err(dd, "Failed compare1 in resync\n");
+			continue;
+		}
+		loc = IB_CMUDONE(chn);
+		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+		if (ret < 0) {
+			ipath_dev_err(dd, "Failed CMUDONE rd in resync\n");
+			continue;
+		}
+		if ((ret & 0x70) != ((chn << 4) | 0x40)) {
+			ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
+				ret, chn);
+			continue;
+		}
+		if (++chn == 4)
+			break;  /* Success */
+	}
+	ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries);
+	return (ret > 0) ? 0 : ret;
+}
+
+/*
+ * Localize the stuff that should be done to change IB uC reset
+ * returns <0 for errors.
+ */
+static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
+{
+	u64 rst_val;
+	int ret = 0;
+	unsigned long flags;
+
+	rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+	if (assert_rst) {
+		/*
+		 * Vendor recommends "interrupting" uC before reset, to
+		 * minimize possible glitches.
+		 */
+		spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+		epb_access(dd, IB_7220_SERDES, 1);
+		rst_val |= 1ULL;
+		/* Squelch possible parity error from _asserting_ reset */
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+			dd->ipath_hwerrmask &
+			~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+		/* flush write, delay to ensure it took effect */
+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		udelay(2);
+		/* once it's reset, can remove interrupt */
+		epb_access(dd, IB_7220_SERDES, -1);
+		spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+	} else {
+		/*
+		 * Before we de-assert reset, we need to deal with
+		 * possible glitch on the Parity-error line.
+		 * Suppress it around the reset, both in chip-level
+		 * hwerrmask and in IB uC control reg. uC will allow
+		 * it again during startup.
+		 */
+		u64 val;
+		rst_val &= ~(1ULL);
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+			dd->ipath_hwerrmask &
+			~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
+
+		ret = ipath_resync_ibepb(dd);
+		if (ret < 0)
+			ipath_dev_err(dd, "unable to re-sync IB EPB\n");
+
+		/* set uC control regs to suppress parity errs */
+		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
+		if (ret < 0)
+			goto bail;
+		/* IB uC code past Version 1.32.17 allow suppression of wdog */
+		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
+			0x80);
+		if (ret < 0) {
+			ipath_dev_err(dd, "Failed to set WDOG disable\n");
+			goto bail;
+		}
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+		/* flush write, delay for startup */
+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		udelay(1);
+		/* clear, then re-enable parity errs */
+		ipath_sd7220_clr_ibpar(dd);
+		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
+		if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) {
+			ipath_dev_err(dd, "IBUC Parity still set after RST\n");
+			dd->ipath_hwerrmask &=
+				~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
+		}
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+			dd->ipath_hwerrmask);
+	}
+
+bail:
+	return ret;
+}
+
+static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
+       const char *where)
+{
+	int ret, chn, baduns;
+	u64 val;
+
+	if (!where)
+		where = "?";
+
+	/* give time for reset to settle out in EPB */
+	udelay(2);
+
+	ret = ipath_resync_ibepb(dd);
+	if (ret < 0)
+		ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
+
+	/* Do "sacrificial read" to get EPB in sane state after reset */
+	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
+	if (ret < 0)
+		ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
+
+	/* Check/show "summary" Trim-done bit in IBCStatus */
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+	if (val & (1ULL << 11))
+		ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where);
+	else
+		ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
+
+	udelay(2);
+
+	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
+	if (ret < 0)
+		ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
+	udelay(10);
+
+	baduns = 0;
+
+	for (chn = 3; chn >= 0; --chn) {
+		/* Read CTRL reg for each channel to check TRIMDONE */
+		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+			IB_CTRL2(chn), 0, 0);
+		if (ret < 0)
+			ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d"
+				" (%s)\n", chn, where);
+
+		if (!(ret & 0x10)) {
+			int probe;
+			baduns |= (1 << chn);
+			ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
+				" (%s)\n", chn, ret, where);
+			probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+				IB_PGUDP(0), 0, 0);
+			ipath_dev_err(dd, "probe is %d (%02X)\n",
+				probe, probe);
+			probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+				IB_CTRL2(chn), 0, 0);
+			ipath_dev_err(dd, "re-read: %d (%02X)\n",
+				probe, probe);
+			ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+				IB_CTRL2(chn), 0x10, 0x10);
+			if (ret < 0)
+				ipath_dev_err(dd,
+					"Err on TRIMDONE rewrite1\n");
+		}
+	}
+	for (chn = 3; chn >= 0; --chn) {
+		/* Read CTRL reg for each channel to check TRIMDONE */
+		if (baduns & (1 << chn)) {
+			ipath_dev_err(dd,
+				"Reseting TRIMDONE on chn %d (%s)\n",
+				chn, where);
+			ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+				IB_CTRL2(chn), 0x10, 0x10);
+			if (ret < 0)
+				ipath_dev_err(dd, "Failed re-setting "
+					"TRIMDONE, chn %d (%s)\n",
+					chn, where);
+		}
+	}
+}
+
+/*
+ * Below is portion of IBA7220-specific bringup_serdes() that actually
+ * deals with registers and memory within the SerDes itself.
+ * Post IB uC code version 1.32.17, was_reset being 1 is not really
+ * informative, so we double-check.
+ */
+int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
+{
+	int ret = 1; /* default to failure */
+	int first_reset;
+	int val_stat;
+
+	if (!was_reset) {
+		/* entered with reset not asserted, we need to do it */
+		ipath_ibsd_reset(dd, 1);
+		ipath_sd_trimdone_monitor(dd, "Driver-reload");
+	}
+
+	/* Substitute our deduced value for was_reset */
+	ret = ipath_ibsd_ucode_loaded(dd);
+	if (ret < 0) {
+		ret = 1;
+		goto done;
+	}
+	first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
+
+	/*
+	 * Alter some regs per vendor latest doc, reset-defaults
+	 * are not right for IB.
+	 */
+	ret = ipath_sd_early(dd);
+	if (ret < 0) {
+		ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n");
+		ret = 1;
+		goto done;
+	}
+
+	/*
+	 * Set DAC manual trim IB.
+	 * We only do this once after chip has been reset (usually
+	 * same as once per system boot).
+	 */
+	if (first_reset) {
+		ret = ipath_sd_dactrim(dd);
+		if (ret < 0) {
+			ipath_dev_err(dd, "Failed IB SERDES DAC trim\n");
+			ret = 1;
+			goto done;
+		}
+	}
+
+	/*
+	 * Set various registers (DDS and RXEQ) that will be
+	 * controlled by IBC (in 1.2 mode) to reasonable preset values
+	 * Calling the "internal" version avoids the "check for needed"
+	 * and "trimdone monitor" that might be counter-productive.
+	 */
+	ret = ipath_internal_presets(dd);
+	if (ret < 0) {
+		ipath_dev_err(dd, "Failed to set IB SERDES presets\n");
+		ret = 1;
+		goto done;
+	}
+	ret = ipath_sd_trimself(dd, 0x80);
+	if (ret < 0) {
+		ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
+		ret = 1;
+		goto done;
+	}
+
+	/* Load image, then try to verify */
+	ret = 0;	/* Assume success */
+	if (first_reset) {
+		int vfy;
+		int trim_done;
+		ipath_dbg("SerDes uC was reset, reloading PRAM\n");
+		ret = ipath_sd7220_ib_load(dd);
+		if (ret < 0) {
+			ipath_dev_err(dd, "Failed to load IB SERDES image\n");
+			ret = 1;
+			goto done;
+		}
+
+		/* Loaded image, try to verify */
+		vfy = ipath_sd7220_ib_vfy(dd);
+		if (vfy != ret) {
+			ipath_dev_err(dd, "SERDES PRAM VFY failed\n");
+			ret = 1;
+			goto done;
+		}
+		/*
+		 * Loaded and verified. Almost good...
+		 * hold "success" in ret
+		 */
+		ret = 0;
+
+		/*
+		 * Prev steps all worked, continue bringup
+		 * De-assert RESET to uC, only in first reset, to allow
+		 * trimming.
+		 *
+		 * Since our default setup sets START_EQ1 to
+		 * PRESET, we need to clear that for this very first run.
+		 */
+		ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
+		if (ret < 0) {
+			ipath_dev_err(dd, "Failed clearing START_EQ1\n");
+			ret = 1;
+			goto done;
+		}
+
+		ipath_ibsd_reset(dd, 0);
+		/*
+		 * If this is not the first reset, trimdone should be set
+		 * already.
+		 */
+		trim_done = ipath_sd_trimdone_poll(dd);
+		/*
+		 * Whether or not trimdone succeeded, we need to put the
+		 * uC back into reset to avoid a possible fight with the
+		 * IBC state-machine.
+		 */
+		ipath_ibsd_reset(dd, 1);
+
+		if (!trim_done) {
+			ipath_dev_err(dd, "No TRIMDONE seen\n");
+			ret = 1;
+			goto done;
+		}
+
+		ipath_sd_trimdone_monitor(dd, "First-reset");
+		/* Remember so we do not re-do the load, dactrim, etc. */
+		dd->serdes_first_init_done = 1;
+	}
+	/*
+	 * Setup for channel training and load values for
+	 * RxEq and DDS in tables used by IBC in IB1.2 mode
+	 */
+
+	val_stat = ipath_sd_setvals(dd);
+	if (val_stat < 0)
+		ret = 1;
+done:
+	/* start relock timer regardless, but start at 1 second */
+	ipath_set_relock_poll(dd, -1);
+	return ret;
+}
+
+#define EPB_ACC_REQ 1
+#define EPB_ACC_GNT 0x100
+#define EPB_DATA_MASK 0xFF
+#define EPB_RD (1ULL << 24)
+#define EPB_TRANS_RDY (1ULL << 31)
+#define EPB_TRANS_ERR (1ULL << 30)
+#define EPB_TRANS_TRIES 5
+
+/*
+ * query, claim, release ownership of the EPB (External Parallel Bus)
+ * for a specified SERDES.
+ * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
+ * Returns <0 for errors, >0 if we had ownership, else 0.
+ */
+static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
+{
+	u16 acc;
+	u64 accval;
+	int owned = 0;
+	u64 oct_sel = 0;
+
+	switch (sdnum) {
+	case IB_7220_SERDES :
+		/*
+		 * The IB SERDES "ownership" is fairly simple. A single each
+		 * request/grant.
+		 */
+		acc = dd->ipath_kregs->kr_ib_epbacc;
+		break;
+	case PCIE_SERDES0 :
+	case PCIE_SERDES1 :
+		/* PCIe SERDES has two "octants", need to select which */
+		acc = dd->ipath_kregs->kr_pcie_epbacc;
+		oct_sel = (2 << (sdnum - PCIE_SERDES0));
+		break;
+	default :
+		return 0;
+	}
+
+	/* Make sure any outstanding transaction was seen */
+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	udelay(15);
+
+	accval = ipath_read_kreg32(dd, acc);
+
+	owned = !!(accval & EPB_ACC_GNT);
+	if (claim < 0) {
+		/* Need to release */
+		u64 pollval;
+		/*
+		 * The only writeable bits are the request and CS.
+		 * Both should be clear
+		 */
+		u64 newval = 0;
+		ipath_write_kreg(dd, acc, newval);
+		/* First read after write is not trustworthy */
+		pollval = ipath_read_kreg32(dd, acc);
+		udelay(5);
+		pollval = ipath_read_kreg32(dd, acc);
+		if (pollval & EPB_ACC_GNT)
+			owned = -1;
+	} else if (claim > 0) {
+		/* Need to claim */
+		u64 pollval;
+		u64 newval = EPB_ACC_REQ | oct_sel;
+		ipath_write_kreg(dd, acc, newval);
+		/* First read after write is not trustworthy */
+		pollval = ipath_read_kreg32(dd, acc);
+		udelay(5);
+		pollval = ipath_read_kreg32(dd, acc);
+		if (!(pollval & EPB_ACC_GNT))
+			owned = -1;
+	}
+	return owned;
+}
+
+/*
+ * Lemma to deal with race condition of write..read to epb regs
+ */
+static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
+{
+	int tries;
+	u64 transval;
+
+
+	ipath_write_kreg(dd, reg, i_val);
+	/* Throw away first read, as RDY bit may be stale */
+	transval = ipath_read_kreg64(dd, reg);
+
+	for (tries = EPB_TRANS_TRIES; tries; --tries) {
+		transval = ipath_read_kreg32(dd, reg);
+		if (transval & EPB_TRANS_RDY)
+			break;
+		udelay(5);
+	}
+	if (transval & EPB_TRANS_ERR)
+		return -1;
+	if (tries > 0 && o_vp)
+		*o_vp = transval;
+	return tries;
+}
+
+/**
+ *
+ * ipath_sd7220_reg_mod - modify SERDES register
+ * @dd: the infinipath device
+ * @sdnum: which SERDES to access
+ * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
+ * @wd: Write Data - value to set in register
+ * @mask: ones where data should be spliced into reg.
+ *
+ * Basic register read/modify/write, with un-needed acesses elided. That is,
+ * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
+ * returns current (presumed, if a write was done) contents of selected
+ * register, or <0 if errors.
+ */
+static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
+				u32 wd, u32 mask)
+{
+	u16 trans;
+	u64 transval;
+	int owned;
+	int tries, ret;
+	unsigned long flags;
+
+	switch (sdnum) {
+	case IB_7220_SERDES :
+		trans = dd->ipath_kregs->kr_ib_epbtrans;
+		break;
+	case PCIE_SERDES0 :
+	case PCIE_SERDES1 :
+		trans = dd->ipath_kregs->kr_pcie_epbtrans;
+		break;
+	default :
+		return -1;
+	}
+
+	/*
+	 * All access is locked in software (vs other host threads) and
+	 * hardware (vs uC access).
+	 */
+	spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+
+	owned = epb_access(dd, sdnum, 1);
+	if (owned < 0) {
+		spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+		return -1;
+	}
+	ret = 0;
+	for (tries = EPB_TRANS_TRIES; tries; --tries) {
+		transval = ipath_read_kreg32(dd, trans);
+		if (transval & EPB_TRANS_RDY)
+			break;
+		udelay(5);
+	}
+
+	if (tries > 0) {
+		tries = 1;	/* to make read-skip work */
+		if (mask != 0xFF) {
+			/*
+			 * Not a pure write, so need to read.
+			 * loc encodes chip-select as well as address
+			 */
+			transval = loc | EPB_RD;
+			tries = epb_trans(dd, trans, transval, &transval);
+		}
+		if (tries > 0 && mask != 0) {
+			/*
+			 * Not a pure read, so need to write.
+			 */
+			wd = (wd & mask) | (transval & ~mask);
+			transval = loc | (wd & EPB_DATA_MASK);
+			tries = epb_trans(dd, trans, transval, &transval);
+		}
+	}
+	/* else, failed to see ready, what error-handling? */
+
+	/*
+	 * Release bus. Failure is an error.
+	 */
+	if (epb_access(dd, sdnum, -1) < 0)
+		ret = -1;
+	else
+		ret = transval & EPB_DATA_MASK;
+
+	spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+	if (tries <= 0)
+		ret = -1;
+	return ret;
+}
+
+#define EPB_ROM_R (2)
+#define EPB_ROM_W (1)
+/*
+ * Below, all uC-related, use appropriate UC_CS, depending
+ * on which SerDes is used.
+ */
+#define EPB_UC_CTL EPB_LOC(6, 0, 0)
+#define EPB_MADDRL EPB_LOC(6, 0, 2)
+#define EPB_MADDRH EPB_LOC(6, 0, 3)
+#define EPB_ROMDATA EPB_LOC(6, 0, 4)
+#define EPB_RAMDATA EPB_LOC(6, 0, 5)
+
+/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
+static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
+			       u8 *buf, int cnt, int rd_notwr)
+{
+	u16 trans;
+	u64 transval;
+	u64 csbit;
+	int owned;
+	int tries;
+	int sofar;
+	int addr;
+	int ret;
+	unsigned long flags;
+	const char *op;
+
+	/* Pick appropriate transaction reg and "Chip select" for this serdes */
+	switch (sdnum) {
+	case IB_7220_SERDES :
+		csbit = 1ULL << EPB_IB_UC_CS_SHF;
+		trans = dd->ipath_kregs->kr_ib_epbtrans;
+		break;
+	case PCIE_SERDES0 :
+	case PCIE_SERDES1 :
+		/* PCIe SERDES has uC "chip select" in different bit, too */
+		csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
+		trans = dd->ipath_kregs->kr_pcie_epbtrans;
+		break;
+	default :
+		return -1;
+	}
+
+	op = rd_notwr ? "Rd" : "Wr";
+	spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+
+	owned = epb_access(dd, sdnum, 1);
+	if (owned < 0) {
+		spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+		ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n",
+			op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe",
+			owned, loc);
+		return -1;
+	}
+
+	/*
+	 * In future code, we may need to distinguish several address ranges,
+	 * and select various memories based on this. For now, just trim
+	 * "loc" (location including address and memory select) to
+	 * "addr" (address within memory). we will only support PRAM
+	 * The memory is 8KB.
+	 */
+	addr = loc & 0x1FFF;
+	for (tries = EPB_TRANS_TRIES; tries; --tries) {
+		transval = ipath_read_kreg32(dd, trans);
+		if (transval & EPB_TRANS_RDY)
+			break;
+		udelay(5);
+	}
+
+	sofar = 0;
+	if (tries <= 0)
+		ipath_dbg("No initial RDY on EPB access request\n");
+	else {
+		/*
+		 * Every "memory" access is doubly-indirect.
+		 * We set two bytes of address, then read/write
+		 * one or mores bytes of data.
+		 */
+
+		/* First, we set control to "Read" or "Write" */
+		transval = csbit | EPB_UC_CTL |
+			(rd_notwr ? EPB_ROM_R : EPB_ROM_W);
+		tries = epb_trans(dd, trans, transval, &transval);
+		if (tries <= 0)
+			ipath_dbg("No EPB response to uC %s cmd\n", op);
+		while (tries > 0 && sofar < cnt) {
+			if (!sofar) {
+				/* Only set address at start of chunk */
+				int addrbyte = (addr + sofar) >> 8;
+				transval = csbit | EPB_MADDRH | addrbyte;
+				tries = epb_trans(dd, trans, transval,
+						  &transval);
+				if (tries <= 0) {
+					ipath_dbg("No EPB response ADDRH\n");
+					break;
+				}
+				addrbyte = (addr + sofar) & 0xFF;
+				transval = csbit | EPB_MADDRL | addrbyte;
+				tries = epb_trans(dd, trans, transval,
+						 &transval);
+				if (tries <= 0) {
+					ipath_dbg("No EPB response ADDRL\n");
+					break;
+				}
+			}
+
+			if (rd_notwr)
+				transval = csbit | EPB_ROMDATA | EPB_RD;
+			else
+				transval = csbit | EPB_ROMDATA | buf[sofar];
+			tries = epb_trans(dd, trans, transval, &transval);
+			if (tries <= 0) {
+				ipath_dbg("No EPB response DATA\n");
+				break;
+			}
+			if (rd_notwr)
+				buf[sofar] = transval & EPB_DATA_MASK;
+			++sofar;
+		}
+		/* Finally, clear control-bit for Read or Write */
+		transval = csbit | EPB_UC_CTL;
+		tries = epb_trans(dd, trans, transval, &transval);
+		if (tries <= 0)
+			ipath_dbg("No EPB response to drop of uC %s cmd\n", op);
+	}
+
+	ret = sofar;
+	/* Release bus. Failure is an error */
+	if (epb_access(dd, sdnum, -1) < 0)
+		ret = -1;
+
+	spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+	if (tries <= 0) {
+		ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar);
+		ret = -1;
+	}
+	return ret;
+}
+
+#define PROG_CHUNK 64
+
+int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
+	u8 *img, int len, int offset)
+{
+	int cnt, sofar, req;
+
+	sofar = 0;
+	while (sofar < len) {
+		req = len - sofar;
+		if (req > PROG_CHUNK)
+			req = PROG_CHUNK;
+		cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar,
+					  img + sofar, req, 0);
+		if (cnt < req) {
+			sofar = -1;
+			break;
+		}
+		sofar += req;
+	}
+	return sofar;
+}
+
+#define VFY_CHUNK 64
+#define SD_PRAM_ERROR_LIMIT 42
+
+int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
+	const u8 *img, int len, int offset)
+{
+	int cnt, sofar, req, idx, errors;
+	unsigned char readback[VFY_CHUNK];
+
+	errors = 0;
+	sofar = 0;
+	while (sofar < len) {
+		req = len - sofar;
+		if (req > VFY_CHUNK)
+			req = VFY_CHUNK;
+		cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset,
+					  readback, req, 1);
+		if (cnt < req) {
+			/* failed in read itself */
+			sofar = -1;
+			break;
+		}
+		for (idx = 0; idx < cnt; ++idx) {
+			if (readback[idx] != img[idx+sofar])
+				++errors;
+		}
+		sofar += cnt;
+	}
+	return errors ? -errors : sofar;
+}
+
+/* IRQ not set up at this point in init, so we poll. */
+#define IB_SERDES_TRIM_DONE (1ULL << 11)
+#define TRIM_TMO (30)
+
+static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
+{
+	int trim_tmo, ret;
+	uint64_t val;
+
+	/*
+	 * Default to failure, so IBC will not start
+	 * without IB_SERDES_TRIM_DONE.
+	 */
+	ret = 0;
+	for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
+		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+		if (val & IB_SERDES_TRIM_DONE) {
+			ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo);
+			ret = 1;
+			break;
+		}
+		msleep(10);
+	}
+	if (trim_tmo >= TRIM_TMO) {
+		ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
+		ret = 0;
+	}
+	return ret;
+}
+
+#define TX_FAST_ELT (9)
+
+/*
+ * Set the "negotiation" values for SERDES. These are used by the IB1.2
+ * link negotiation. Macros below are attempt to keep the values a
+ * little more human-editable.
+ * First, values related to Drive De-emphasis Settings.
+ */
+
+#define NUM_DDS_REGS 6
+#define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
+
+#define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
+	{ { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
+	  (main_d << 3) | 4 | (ipre_d >> 2), \
+	  (main_s << 3) | 4 | (ipre_s >> 2), \
+	  ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
+	  ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
+
+static struct dds_init {
+	uint8_t reg_vals[NUM_DDS_REGS];
+} dds_init_vals[] = {
+	/*       DDR(FDR)       SDR(HDR)   */
+	/* Vendor recommends below for 3m cable */
+#define DDS_3M 0
+	DDS_VAL(31, 19, 12, 0, 29, 22,  9, 0),
+	DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
+	DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
+	DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
+	DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
+	DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
+	DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
+	DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
+	DDS_VAL(31, 20, 11, 0, 28, 23,  8, 0),
+	DDS_VAL(31, 21, 10, 0, 27, 24,  7, 0),
+	DDS_VAL(31, 22,  9, 0, 26, 25,  6, 0),
+	DDS_VAL(30, 23,  8, 0, 25, 26,  5, 0),
+	DDS_VAL(29, 24,  7, 0, 23, 27,  4, 0),
+	/* Vendor recommends below for 1m cable */
+#define DDS_1M 13
+	DDS_VAL(28, 25,  6, 0, 21, 28,  3, 0),
+	DDS_VAL(27, 26,  5, 0, 19, 29,  2, 0),
+	DDS_VAL(25, 27,  4, 0, 17, 30,  1, 0)
+};
+
+/*
+ * Next, values related to Receive Equalization.
+ * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR
+ */
+/* Hardware packs an element number and register address thus: */
+#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
+#define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
+	{RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
+
+#define RXEQ_VAL_ALL(elt, adr, val)  \
+	{RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
+
+#define RXEQ_SDR_DFELTH 0
+#define RXEQ_SDR_TLTH 0
+#define RXEQ_SDR_G1CNT_Z1CNT 0x11
+#define RXEQ_SDR_ZCNT 23
+
+static struct rxeq_init {
+	u16 rdesc;	/* in form used in SerDesDDSRXEQ */
+	u8  rdata[4];
+} rxeq_init_vals[] = {
+	/* Set Rcv Eq. to Preset node */
+	RXEQ_VAL_ALL(7, 0x27, 0x10),
+	/* Set DFELTHFDR/HDR thresholds */
+	RXEQ_VAL(7, 8,    0, 0, 0, 0), /* FDR */
+	RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
+	/* Set TLTHFDR/HDR theshold */
+	RXEQ_VAL(7, 9,    2, 2, 2, 2), /* FDR */
+	RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */
+	/* Set Preamp setting 2 (ZFR/ZCNT) */
+	RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */
+	RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */
+	/* Set Preamp DC gain and Setting 1 (GFR/GHR) */
+	RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */
+	RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */
+	/* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
+	RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
+	RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
+};
+
+/* There are 17 values from vendor, but IBC only accesses the first 16 */
+#define DDS_ROWS (16)
+#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
+
+static int ipath_sd_setvals(struct ipath_devdata *dd)
+{
+	int idx, midx;
+	int min_idx;	 /* Minimum index for this portion of table */
+	uint32_t dds_reg_map;
+	u64 __iomem *taddr, *iaddr;
+	uint64_t data;
+	uint64_t sdctl;
+
+	taddr = dd->ipath_kregbase + KR_IBSerDesMappTable;
+	iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq;
+
+	/*
+	 * Init the DDS section of the table.
+	 * Each "row" of the table provokes NUM_DDS_REG writes, to the
+	 * registers indicated in DDS_REG_MAP.
+	 */
+	sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+	sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
+	sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl);
+
+	/*
+	 * Iterate down table within loop for each register to store.
+	 */
+	dds_reg_map = DDS_REG_MAP;
+	for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
+		data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
+		writeq(data, iaddr + idx);
+		mmiowb();
+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		dds_reg_map >>= 4;
+		for (midx = 0; midx < DDS_ROWS; ++midx) {
+			u64 __iomem *daddr = taddr + ((midx << 4) + idx);
+			data = dds_init_vals[midx].reg_vals[idx];
+			writeq(data, daddr);
+			mmiowb();
+			ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		} /* End inner for (vals for this reg, each row) */
+	} /* end outer for (regs to be stored) */
+
+	/*
+	 * Init the RXEQ section of the table. As explained above the table
+	 * rxeq_init_vals[], this runs in a different order, as the pattern
+	 * of register references is more complex, but there are only
+	 * four "data" values per register.
+	 */
+	min_idx = idx; /* RXEQ indices pick up where DDS left off */
+	taddr += 0x100; /* RXEQ data is in second half of table */
+	/* Iterate through RXEQ register addresses */
+	for (idx = 0; idx < RXEQ_ROWS; ++idx) {
+		int didx; /* "destination" */
+		int vidx;
+
+		/* didx is offset by min_idx to address RXEQ range of regs */
+		didx = idx + min_idx;
+		/* Store the next RXEQ register address */
+		writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
+		mmiowb();
+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		/* Iterate through RXEQ values */
+		for (vidx = 0; vidx < 4; vidx++) {
+			data = rxeq_init_vals[idx].rdata[vidx];
+			writeq(data, taddr + (vidx << 6) + idx);
+			mmiowb();
+			ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		}
+	} /* end outer for (Reg-writes for RXEQ) */
+	return 0;
+}
+
+#define CMUCTRL5 EPB_LOC(7, 0, 0x15)
+#define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
+#define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
+#define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
+#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
+#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
+
+static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask)
+{
+	int ret = -1;
+	int sloc; /* shifted loc, for messages */
+
+	loc |= (1U << EPB_IB_QUAD0_CS_SHF);
+	sloc = loc >> EPB_ADDR_SHF;
+
+	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask);
+	if (ret < 0)
+		ipath_dev_err(dd, "Write failed: elt %d,"
+			" addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
+			(sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7,
+			val & 0xFF, mask & 0xFF);
+	return ret;
+}
+
+/*
+ * Repeat a "store" across all channels of the IB SerDes.
+ * Although nominally it inherits the "read value" of the last
+ * channel it modified, the only really useful return is <0 for
+ * failure, >= 0 for success. The parameter 'loc' is assumed to
+ * be the location for the channel-0 copy of the register to
+ * be modified.
+ */
+static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
+	int mask)
+{
+	int ret = -1;
+	int chnl;
+
+	if (loc & EPB_GLOBAL_WR) {
+		/*
+		 * Our caller has assured us that we can set all four
+		 * channels at once. Trust that. If mask is not 0xFF,
+		 * we will read the _specified_ channel for our starting
+		 * value.
+		 */
+		loc |= (1U << EPB_IB_QUAD0_CS_SHF);
+		chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
+		if (mask != 0xFF) {
+			ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+				loc & ~EPB_GLOBAL_WR, 0, 0);
+			if (ret < 0) {
+				int sloc = loc >> EPB_ADDR_SHF;
+				ipath_dev_err(dd, "pre-read failed: elt %d,"
+					" addr 0x%X, chnl %d\n", (sloc & 0xF),
+					(sloc >> 9) & 0x3f, chnl);
+				return ret;
+			}
+			val = (ret & ~mask) | (val & mask);
+		}
+		loc &=  ~(7 << (4+EPB_ADDR_SHF));
+		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
+		if (ret < 0) {
+			int sloc = loc >> EPB_ADDR_SHF;
+			ipath_dev_err(dd, "Global WR failed: elt %d,"
+				" addr 0x%X, val %02X\n",
+				(sloc & 0xF), (sloc >> 9) & 0x3f, val);
+		}
+		return ret;
+	}
+	/* Clear "channel" and set CS so we can simply iterate */
+	loc &=  ~(7 << (4+EPB_ADDR_SHF));
+	loc |= (1U << EPB_IB_QUAD0_CS_SHF);
+	for (chnl = 0; chnl < 4; ++chnl) {
+		int cloc;
+		cloc = loc | (chnl << (4+EPB_ADDR_SHF));
+		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
+		if (ret < 0) {
+			int sloc = loc >> EPB_ADDR_SHF;
+			ipath_dev_err(dd, "Write failed: elt %d,"
+				" addr 0x%X, chnl %d, val 0x%02X,"
+				" mask 0x%02X\n",
+				(sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
+				val & 0xFF, mask & 0xFF);
+			break;
+		}
+	}
+	return ret;
+}
+
+/*
+ * Set the Tx values normally modified by IBC in IB1.2 mode to default
+ * values, as gotten from first row of init table.
+ */
+static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
+{
+	int ret;
+	int idx, reg, data;
+	uint32_t regmap;
+
+	regmap = DDS_REG_MAP;
+	for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
+		reg = (regmap & 0xF);
+		regmap >>= 4;
+		data = ddi->reg_vals[idx];
+		/* Vendor says RMW not needed for these regs, use 0xFF mask */
+		ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
+		if (ret < 0)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * Set the Rx values normally modified by IBC in IB1.2 mode to default
+ * values, as gotten from selected column of init table.
+ */
+static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
+{
+	int ret;
+	int ridx;
+	int cnt = ARRAY_SIZE(rxeq_init_vals);
+
+	for (ridx = 0; ridx < cnt; ++ridx) {
+		int elt, reg, val, loc;
+		elt = rxeq_init_vals[ridx].rdesc & 0xF;
+		reg = rxeq_init_vals[ridx].rdesc >> 4;
+		loc = EPB_LOC(0, elt, reg);
+		val = rxeq_init_vals[ridx].rdata[vsel];
+		/* mask of 0xFF, because hardware does full-byte store. */
+		ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
+		if (ret < 0)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * Set the default values (row 0) for DDR Driver Demphasis.
+ * we do this initially and whenever we turn off IB-1.2
+ * The "default" values for Rx equalization are also stored to
+ * SerDes registers. Formerly (and still default), we used set 2.
+ * For experimenting with cables and link-partners, we allow changing
+ * that via a module parameter.
+ */
+static unsigned ipath_rxeq_set = 2;
+module_param_named(rxeq_default_set, ipath_rxeq_set, uint,
+	S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(rxeq_default_set,
+	"Which set [0..3] of Rx Equalization values is default");
+
+static int ipath_internal_presets(struct ipath_devdata *dd)
+{
+	int ret = 0;
+
+	ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
+
+	if (ret < 0)
+		ipath_dev_err(dd, "Failed to set default DDS values\n");
+	ret = set_rxeq_vals(dd, ipath_rxeq_set & 3);
+	if (ret < 0)
+		ipath_dev_err(dd, "Failed to set default RXEQ values\n");
+	return ret;
+}
+
+int ipath_sd7220_presets(struct ipath_devdata *dd)
+{
+	int ret = 0;
+
+	if (!dd->ipath_presets_needed)
+		return ret;
+	dd->ipath_presets_needed = 0;
+	/* Assert uC reset, so we don't clash with it. */
+	ipath_ibsd_reset(dd, 1);
+	udelay(2);
+	ipath_sd_trimdone_monitor(dd, "link-down");
+
+	ret = ipath_internal_presets(dd);
+return ret;
+}
+
+static int ipath_sd_trimself(struct ipath_devdata *dd, int val)
+{
+	return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF);
+}
+
+static int ipath_sd_early(struct ipath_devdata *dd)
+{
+	int ret = -1; /* Default failed */
+	int chnl;
+
+	for (chnl = 0; chnl < 4; ++chnl) {
+		ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF);
+		if (ret < 0)
+			goto bail;
+	}
+	for (chnl = 0; chnl < 4; ++chnl) {
+		ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF);
+		if (ret < 0)
+			goto bail;
+	}
+	/* more fine-tuning of what will be default */
+	for (chnl = 0; chnl < 4; ++chnl) {
+		ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF);
+		if (ret < 0)
+			goto bail;
+	}
+	for (chnl = 0; chnl < 4; ++chnl) {
+		ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF);
+		if (ret < 0)
+			goto bail;
+	}
+	for (chnl = 0; chnl < 4; ++chnl) {
+		ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF);
+		if (ret < 0)
+			goto bail;
+	}
+bail:
+	return ret;
+}
+
+#define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
+#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
+#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
+
+static int ipath_sd_dactrim(struct ipath_devdata *dd)
+{
+	int ret = -1; /* Default failed */
+	int chnl;
+
+	for (chnl = 0; chnl < 4; ++chnl) {
+		ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF);
+		if (ret < 0)
+			goto bail;
+	}
+	for (chnl = 0; chnl < 4; ++chnl) {
+		ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF);
+		if (ret < 0)
+			goto bail;
+	}
+	for (chnl = 0; chnl < 4; ++chnl) {
+		ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF);
+		if (ret < 0)
+			goto bail;
+	}
+	/*
+	 * delay for max possible number of steps, with slop.
+	 * Each step is about 4usec.
+	 */
+	udelay(415);
+	for (chnl = 0; chnl < 4; ++chnl) {
+		ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF);
+		if (ret < 0)
+			goto bail;
+	}
+bail:
+	return ret;
+}
+
+#define RELOCK_FIRST_MS 3
+#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
+void ipath_toggle_rclkrls(struct ipath_devdata *dd)
+{
+	int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
+	int ret;
+
+	ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
+	if (ret < 0)
+		ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+	else {
+		udelay(1);
+		ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
+	}
+	/* And again for good measure */
+	udelay(1);
+	ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
+	if (ret < 0)
+		ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+	else {
+		udelay(1);
+		ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
+	}
+	/* Now reset xgxs and IBC to complete the recovery */
+	dd->ipath_f_xgxs_reset(dd);
+}
+
+/*
+ * Shut down the timer that polls for relock occasions, if needed
+ * this is "hooked" from ipath_7220_quiet_serdes(), which is called
+ * just before ipath_shutdown_device() in ipath_driver.c shuts down all
+ * the other timers
+ */
+void ipath_shutdown_relock_poll(struct ipath_devdata *dd)
+{
+	struct ipath_relock *irp = &dd->ipath_relock_singleton;
+	if (atomic_read(&irp->ipath_relock_timer_active)) {
+		del_timer_sync(&irp->ipath_relock_timer);
+		atomic_set(&irp->ipath_relock_timer_active, 0);
+	}
+}
+
+static unsigned ipath_relock_by_timer = 1;
+module_param_named(relock_by_timer, ipath_relock_by_timer, uint,
+	S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
+
+static void ipath_run_relock(unsigned long opaque)
+{
+	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
+	struct ipath_relock *irp = &dd->ipath_relock_singleton;
+	u64 val, ltstate;
+
+	if (!(dd->ipath_flags & IPATH_INITTED)) {
+		/* Not yet up, just reenable the timer for later */
+		irp->ipath_relock_interval = HZ;
+		mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
+		return;
+	}
+
+	/*
+	 * Check link-training state for "stuck" state.
+	 * if found, try relock and schedule another try at
+	 * exponentially growing delay, maxed at one second.
+	 * if not stuck, our work is done.
+	 */
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+	ltstate = ipath_ib_linktrstate(dd, val);
+
+	if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT
+		&& ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
+		int timeoff;
+		/* Not up yet. Try again, if allowed by module-param */
+		if (ipath_relock_by_timer) {
+			if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
+				ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n");
+			else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) {
+				ipath_cdbg(VERBOSE, "RELOCK\n");
+				ipath_toggle_rclkrls(dd);
+			}
+		}
+		/* re-set timer for next check */
+		timeoff = irp->ipath_relock_interval << 1;
+		if (timeoff > HZ)
+			timeoff = HZ;
+		irp->ipath_relock_interval = timeoff;
+
+		mod_timer(&irp->ipath_relock_timer, jiffies + timeoff);
+	} else {
+		/* Up, so no more need to check so often */
+		mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
+	}
+}
+
+void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup)
+{
+	struct ipath_relock *irp = &dd->ipath_relock_singleton;
+
+	if (ibup > 0) {
+		/* we are now up, so relax timer to 1 second interval */
+		if (atomic_read(&irp->ipath_relock_timer_active))
+			mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
+	} else {
+		/* Transition to down, (re-)set timer to short interval. */
+		int timeout;
+		timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000;
+		if (timeout == 0)
+			timeout = 1;
+		/* If timer has not yet been started, do so. */
+		if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) {
+			init_timer(&irp->ipath_relock_timer);
+			irp->ipath_relock_timer.function = ipath_run_relock;
+			irp->ipath_relock_timer.data = (unsigned long) dd;
+			irp->ipath_relock_interval = timeout;
+			irp->ipath_relock_timer.expires = jiffies + timeout;
+			add_timer(&irp->ipath_relock_timer);
+		} else {
+			irp->ipath_relock_interval = timeout;
+			mod_timer(&irp->ipath_relock_timer, jiffies + timeout);
+			atomic_dec(&irp->ipath_relock_timer_active);
+		}
+	}
+}
+
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
new file mode 100644
index 0000000..5ef59da
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
@@ -0,0 +1,1082 @@
+/*
+ * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains the memory image from the vendor, to be copied into
+ * the IB SERDES of the IBA7220 during initialization.
+ * The file also includes the two functions which use this image.
+ */
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "ipath_kernel.h"
+#include "ipath_registers.h"
+#include "ipath_7220.h"
+
+static unsigned char ipath_sd7220_ib_img[] = {
+/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
+	0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
+/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
+	0x80, 0x02, 0x7E, 0x00, 0xEE, 0x5F, 0x60, 0x08,
+/*0020*/0x53, 0xF9, 0xF7, 0xE4, 0xF5, 0xFE, 0x80, 0x08,
+	0x7F, 0x0A, 0x12, 0x17, 0x31, 0x12, 0x0E, 0xA2,
+/*0030*/0x75, 0xFC, 0x08, 0xE4, 0xF5, 0xFD, 0xE5, 0xE7,
+	0x20, 0xE7, 0x03, 0x43, 0xF9, 0x08, 0x22, 0x00,
+/*0040*/0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x75,
+	0x51, 0x01, 0xE4, 0xF5, 0x52, 0xF5, 0x53, 0xF5,
+/*0050*/0x52, 0xF5, 0x7E, 0x7F, 0x04, 0x02, 0x04, 0x38,
+	0xC2, 0x36, 0x05, 0x52, 0xE5, 0x52, 0xD3, 0x94,
+/*0060*/0x0C, 0x40, 0x05, 0x75, 0x52, 0x01, 0xD2, 0x36,
+	0x90, 0x07, 0x0C, 0x74, 0x07, 0xF0, 0xA3, 0x74,
+/*0070*/0xFF, 0xF0, 0xE4, 0xF5, 0x0C, 0xA3, 0xF0, 0x90,
+	0x07, 0x14, 0xF0, 0xA3, 0xF0, 0x75, 0x0B, 0x20,
+/*0080*/0xF5, 0x09, 0xE4, 0xF5, 0x08, 0xE5, 0x08, 0xD3,
+	0x94, 0x30, 0x40, 0x03, 0x02, 0x04, 0x04, 0x12,
+/*0090*/0x00, 0x06, 0x15, 0x0B, 0xE5, 0x08, 0x70, 0x04,
+	0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x09,
+/*00A0*/0x70, 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00,
+	0xEE, 0x5F, 0x60, 0x05, 0x12, 0x18, 0x71, 0xD2,
+/*00B0*/0x35, 0x53, 0xE1, 0xF7, 0xE5, 0x08, 0x45, 0x09,
+	0xFF, 0xE5, 0x0B, 0x25, 0xE0, 0x25, 0xE0, 0x24,
+/*00C0*/0x83, 0xF5, 0x82, 0xE4, 0x34, 0x07, 0xF5, 0x83,
+	0xEF, 0xF0, 0x85, 0xE2, 0x20, 0xE5, 0x52, 0xD3,
+/*00D0*/0x94, 0x01, 0x40, 0x0D, 0x12, 0x19, 0xF3, 0xE0,
+	0x54, 0xA0, 0x64, 0x40, 0x70, 0x03, 0x02, 0x03,
+/*00E0*/0xFB, 0x53, 0xF9, 0xF8, 0x90, 0x94, 0x70, 0xE4,
+	0xF0, 0xE0, 0xF5, 0x10, 0xAF, 0x09, 0x12, 0x1E,
+/*00F0*/0xB3, 0xAF, 0x08, 0xEF, 0x44, 0x08, 0xF5, 0x82,
+	0x75, 0x83, 0x80, 0xE0, 0xF5, 0x29, 0xEF, 0x44,
+/*0100*/0x07, 0x12, 0x1A, 0x3C, 0xF5, 0x22, 0x54, 0x40,
+	0xD3, 0x94, 0x00, 0x40, 0x1E, 0xE5, 0x29, 0x54,
+/*0110*/0xF0, 0x70, 0x21, 0x12, 0x19, 0xF3, 0xE0, 0x44,
+	0x80, 0xF0, 0xE5, 0x22, 0x54, 0x30, 0x65, 0x08,
+/*0120*/0x70, 0x09, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xBF,
+	0xF0, 0x80, 0x09, 0x12, 0x19, 0xF3, 0x74, 0x40,
+/*0130*/0xF0, 0x02, 0x03, 0xFB, 0x12, 0x1A, 0x12, 0x75,
+	0x83, 0xAE, 0x74, 0xFF, 0xF0, 0xAF, 0x08, 0x7E,
+/*0140*/0x00, 0xEF, 0x44, 0x07, 0xF5, 0x82, 0xE0, 0xFD,
+	0xE5, 0x0B, 0x25, 0xE0, 0x25, 0xE0, 0x24, 0x81,
+/*0150*/0xF5, 0x82, 0xE4, 0x34, 0x07, 0xF5, 0x83, 0xED,
+	0xF0, 0x90, 0x07, 0x0E, 0xE0, 0x04, 0xF0, 0xEF,
+/*0160*/0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0x98, 0xE0,
+	0xF5, 0x28, 0x12, 0x1A, 0x23, 0x40, 0x0C, 0x12,
+/*0170*/0x19, 0xF3, 0xE0, 0x44, 0x01, 0x12, 0x1A, 0x32,
+	0x02, 0x03, 0xF6, 0xAF, 0x08, 0x7E, 0x00, 0x74,
+/*0180*/0x80, 0xCD, 0xEF, 0xCD, 0x8D, 0x82, 0xF5, 0x83,
+	0xE0, 0x30, 0xE0, 0x0A, 0x12, 0x19, 0xF3, 0xE0,
+/*0190*/0x44, 0x20, 0xF0, 0x02, 0x03, 0xFB, 0x12, 0x19,
+	0xF3, 0xE0, 0x54, 0xDF, 0xF0, 0xEE, 0x44, 0xAE,
+/*01A0*/0x12, 0x1A, 0x43, 0x30, 0xE4, 0x03, 0x02, 0x03,
+	0xFB, 0x74, 0x9E, 0x12, 0x1A, 0x05, 0x20, 0xE0,
+/*01B0*/0x03, 0x02, 0x03, 0xFB, 0x8F, 0x82, 0x8E, 0x83,
+	0xE0, 0x20, 0xE0, 0x03, 0x02, 0x03, 0xFB, 0x12,
+/*01C0*/0x19, 0xF3, 0xE0, 0x44, 0x10, 0xF0, 0xE5, 0xE3,
+	0x20, 0xE7, 0x08, 0xE5, 0x08, 0x12, 0x1A, 0x3A,
+/*01D0*/0x44, 0x04, 0xF0, 0xAF, 0x08, 0x7E, 0x00, 0xEF,
+	0x12, 0x1A, 0x3A, 0x20, 0xE2, 0x34, 0x12, 0x19,
+/*01E0*/0xF3, 0xE0, 0x44, 0x08, 0xF0, 0xE5, 0xE4, 0x30,
+	0xE6, 0x04, 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00,
+/*01F0*/0xE5, 0x7E, 0xC3, 0x94, 0x04, 0x50, 0x04, 0x7C,
+	0x01, 0x80, 0x02, 0x7C, 0x00, 0xEC, 0x4D, 0x60,
+/*0200*/0x05, 0xC2, 0x35, 0x02, 0x03, 0xFB, 0xEE, 0x44,
+	0xD2, 0x12, 0x1A, 0x43, 0x44, 0x40, 0xF0, 0x02,
+/*0210*/0x03, 0xFB, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xF7,
+	0xF0, 0x12, 0x1A, 0x12, 0x75, 0x83, 0xD2, 0xE0,
+/*0220*/0x54, 0xBF, 0xF0, 0x90, 0x07, 0x14, 0xE0, 0x04,
+	0xF0, 0xE5, 0x7E, 0x70, 0x03, 0x75, 0x7E, 0x01,
+/*0230*/0xAF, 0x08, 0x7E, 0x00, 0x12, 0x1A, 0x23, 0x40,
+	0x12, 0x12, 0x19, 0xF3, 0xE0, 0x44, 0x01, 0x12,
+/*0240*/0x19, 0xF2, 0xE0, 0x54, 0x02, 0x12, 0x1A, 0x32,
+	0x02, 0x03, 0xFB, 0x12, 0x19, 0xF3, 0xE0, 0x44,
+/*0250*/0x02, 0x12, 0x19, 0xF2, 0xE0, 0x54, 0xFE, 0xF0,
+	0xC2, 0x35, 0xEE, 0x44, 0x8A, 0x8F, 0x82, 0xF5,
+/*0260*/0x83, 0xE0, 0xF5, 0x17, 0x54, 0x8F, 0x44, 0x40,
+	0xF0, 0x74, 0x90, 0xFC, 0xE5, 0x08, 0x44, 0x07,
+/*0270*/0xFD, 0xF5, 0x82, 0x8C, 0x83, 0xE0, 0x54, 0x3F,
+	0x90, 0x07, 0x02, 0xF0, 0xE0, 0x54, 0xC0, 0x8D,
+/*0280*/0x82, 0x8C, 0x83, 0xF0, 0x74, 0x92, 0x12, 0x1A,
+	0x05, 0x90, 0x07, 0x03, 0x12, 0x1A, 0x19, 0x74,
+/*0290*/0x82, 0x12, 0x1A, 0x05, 0x90, 0x07, 0x04, 0x12,
+	0x1A, 0x19, 0x74, 0xB4, 0x12, 0x1A, 0x05, 0x90,
+/*02A0*/0x07, 0x05, 0x12, 0x1A, 0x19, 0x74, 0x94, 0xFE,
+	0xE5, 0x08, 0x44, 0x06, 0x12, 0x1A, 0x0A, 0xF5,
+/*02B0*/0x10, 0x30, 0xE0, 0x04, 0xD2, 0x37, 0x80, 0x02,
+	0xC2, 0x37, 0xE5, 0x10, 0x54, 0x7F, 0x8F, 0x82,
+/*02C0*/0x8E, 0x83, 0xF0, 0x30, 0x44, 0x30, 0x12, 0x1A,
+	0x03, 0x54, 0x80, 0xD3, 0x94, 0x00, 0x40, 0x04,
+/*02D0*/0xD2, 0x39, 0x80, 0x02, 0xC2, 0x39, 0x8F, 0x82,
+	0x8E, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0x12, 0x1A,
+/*02E0*/0x03, 0x54, 0x40, 0xD3, 0x94, 0x00, 0x40, 0x04,
+	0xD2, 0x3A, 0x80, 0x02, 0xC2, 0x3A, 0x8F, 0x82,
+/*02F0*/0x8E, 0x83, 0xE0, 0x44, 0x40, 0xF0, 0x74, 0x92,
+	0xFE, 0xE5, 0x08, 0x44, 0x06, 0x12, 0x1A, 0x0A,
+/*0300*/0x30, 0xE7, 0x04, 0xD2, 0x38, 0x80, 0x02, 0xC2,
+	0x38, 0x8F, 0x82, 0x8E, 0x83, 0xE0, 0x54, 0x7F,
+/*0310*/0xF0, 0x12, 0x1E, 0x46, 0xE4, 0xF5, 0x0A, 0x20,
+	0x03, 0x02, 0x80, 0x03, 0x30, 0x43, 0x03, 0x12,
+/*0320*/0x19, 0x95, 0x20, 0x02, 0x02, 0x80, 0x03, 0x30,
+	0x42, 0x03, 0x12, 0x0C, 0x8F, 0x30, 0x30, 0x06,
+/*0330*/0x12, 0x19, 0x95, 0x12, 0x0C, 0x8F, 0x12, 0x0D,
+	0x47, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xFB, 0xF0,
+/*0340*/0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40, 0x46, 0x43,
+	0xE1, 0x08, 0x12, 0x19, 0xF3, 0xE0, 0x44, 0x04,
+/*0350*/0xF0, 0xE5, 0xE4, 0x20, 0xE7, 0x2A, 0x12, 0x1A,
+	0x12, 0x75, 0x83, 0xD2, 0xE0, 0x54, 0x08, 0xD3,
+/*0360*/0x94, 0x00, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
+	0x7F, 0x00, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40,
+/*0370*/0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF,
+	0x5E, 0x60, 0x05, 0x12, 0x1D, 0xD7, 0x80, 0x17,
+/*0380*/0x12, 0x1A, 0x12, 0x75, 0x83, 0xD2, 0xE0, 0x44,
+	0x08, 0xF0, 0x02, 0x03, 0xFB, 0x12, 0x1A, 0x12,
+/*0390*/0x75, 0x83, 0xD2, 0xE0, 0x54, 0xF7, 0xF0, 0x12,
+	0x1E, 0x46, 0x7F, 0x08, 0x12, 0x17, 0x31, 0x74,
+/*03A0*/0x8E, 0xFE, 0x12, 0x1A, 0x12, 0x8E, 0x83, 0xE0,
+	0xF5, 0x10, 0x54, 0xFE, 0xF0, 0xE5, 0x10, 0x44,
+/*03B0*/0x01, 0xFF, 0xE5, 0x08, 0xFD, 0xED, 0x44, 0x07,
+	0xF5, 0x82, 0xEF, 0xF0, 0xE5, 0x10, 0x54, 0xFE,
+/*03C0*/0xFF, 0xED, 0x44, 0x07, 0xF5, 0x82, 0xEF, 0x12,
+	0x1A, 0x11, 0x75, 0x83, 0x86, 0xE0, 0x44, 0x10,
+/*03D0*/0x12, 0x1A, 0x11, 0xE0, 0x44, 0x10, 0xF0, 0x12,
+	0x19, 0xF3, 0xE0, 0x54, 0xFD, 0x44, 0x01, 0xFF,
+/*03E0*/0x12, 0x19, 0xF3, 0xEF, 0x12, 0x1A, 0x32, 0x30,
+	0x32, 0x0C, 0xE5, 0x08, 0x44, 0x08, 0xF5, 0x82,
+/*03F0*/0x75, 0x83, 0x82, 0x74, 0x05, 0xF0, 0xAF, 0x0B,
+	0x12, 0x18, 0xD7, 0x74, 0x10, 0x25, 0x08, 0xF5,
+/*0400*/0x08, 0x02, 0x00, 0x85, 0x05, 0x09, 0xE5, 0x09,
+	0xD3, 0x94, 0x07, 0x50, 0x03, 0x02, 0x00, 0x82,
+/*0410*/0xE5, 0x7E, 0xD3, 0x94, 0x00, 0x40, 0x04, 0x7F,
+	0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x7E, 0xC3,
+/*0420*/0x94, 0xFA, 0x50, 0x04, 0x7E, 0x01, 0x80, 0x02,
+	0x7E, 0x00, 0xEE, 0x5F, 0x60, 0x02, 0x05, 0x7E,
+/*0430*/0x30, 0x35, 0x0B, 0x43, 0xE1, 0x01, 0x7F, 0x09,
+	0x12, 0x17, 0x31, 0x02, 0x00, 0x58, 0x53, 0xE1,
+/*0440*/0xFE, 0x02, 0x00, 0x58, 0x8E, 0x6A, 0x8F, 0x6B,
+	0x8C, 0x6C, 0x8D, 0x6D, 0x75, 0x6E, 0x01, 0x75,
+/*0450*/0x6F, 0x01, 0x75, 0x70, 0x01, 0xE4, 0xF5, 0x73,
+	0xF5, 0x74, 0xF5, 0x75, 0x90, 0x07, 0x2F, 0xF0,
+/*0460*/0xF5, 0x3C, 0xF5, 0x3E, 0xF5, 0x46, 0xF5, 0x47,
+	0xF5, 0x3D, 0xF5, 0x3F, 0xF5, 0x6F, 0xE5, 0x6F,
+/*0470*/0x70, 0x0F, 0xE5, 0x6B, 0x45, 0x6A, 0x12, 0x07,
+	0x2A, 0x75, 0x83, 0x80, 0x74, 0x3A, 0xF0, 0x80,
+/*0480*/0x09, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x80, 0x74,
+	0x1A, 0xF0, 0xE4, 0xF5, 0x6E, 0xC3, 0x74, 0x3F,
+/*0490*/0x95, 0x6E, 0xFF, 0x12, 0x08, 0x65, 0x75, 0x83,
+	0x82, 0xEF, 0xF0, 0x12, 0x1A, 0x4D, 0x12, 0x08,
+/*04A0*/0xC6, 0xE5, 0x33, 0xF0, 0x12, 0x08, 0xFA, 0x12,
+	0x08, 0xB1, 0x40, 0xE1, 0xE5, 0x6F, 0x70, 0x0B,
+/*04B0*/0x12, 0x07, 0x2A, 0x75, 0x83, 0x80, 0x74, 0x36,
+	0xF0, 0x80, 0x09, 0x12, 0x07, 0x2A, 0x75, 0x83,
+/*04C0*/0x80, 0x74, 0x16, 0xF0, 0x75, 0x6E, 0x01, 0x12,
+	0x07, 0x2A, 0x75, 0x83, 0xB4, 0xE5, 0x6E, 0xF0,
+/*04D0*/0x12, 0x1A, 0x4D, 0x74, 0x3F, 0x25, 0x6E, 0xF5,
+	0x82, 0xE4, 0x34, 0x00, 0xF5, 0x83, 0xE5, 0x33,
+/*04E0*/0xF0, 0x74, 0xBF, 0x25, 0x6E, 0xF5, 0x82, 0xE4,
+	0x34, 0x00, 0x12, 0x08, 0xB1, 0x40, 0xD8, 0xE4,
+/*04F0*/0xF5, 0x70, 0xF5, 0x46, 0xF5, 0x47, 0xF5, 0x6E,
+	0x12, 0x08, 0xFA, 0xF5, 0x83, 0xE0, 0xFE, 0x12,
+/*0500*/0x08, 0xC6, 0xE0, 0x7C, 0x00, 0x24, 0x00, 0xFF,
+	0xEC, 0x3E, 0xFE, 0xAD, 0x3B, 0xD3, 0xEF, 0x9D,
+/*0510*/0xEE, 0x9C, 0x50, 0x04, 0x7B, 0x01, 0x80, 0x02,
+	0x7B, 0x00, 0xE5, 0x70, 0x70, 0x04, 0x7A, 0x01,
+/*0520*/0x80, 0x02, 0x7A, 0x00, 0xEB, 0x5A, 0x60, 0x06,
+	0x85, 0x6E, 0x46, 0x75, 0x70, 0x01, 0xD3, 0xEF,
+/*0530*/0x9D, 0xEE, 0x9C, 0x50, 0x04, 0x7F, 0x01, 0x80,
+	0x02, 0x7F, 0x00, 0xE5, 0x70, 0xB4, 0x01, 0x04,
+/*0540*/0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF, 0x5E,
+	0x60, 0x03, 0x85, 0x6E, 0x47, 0x05, 0x6E, 0xE5,
+/*0550*/0x6E, 0x64, 0x7F, 0x70, 0xA3, 0xE5, 0x46, 0x60,
+	0x05, 0xE5, 0x47, 0xB4, 0x7E, 0x03, 0x85, 0x46,
+/*0560*/0x47, 0xE5, 0x6F, 0x70, 0x08, 0x85, 0x46, 0x76,
+	0x85, 0x47, 0x77, 0x80, 0x0E, 0xC3, 0x74, 0x7F,
+/*0570*/0x95, 0x46, 0xF5, 0x78, 0xC3, 0x74, 0x7F, 0x95,
+	0x47, 0xF5, 0x79, 0xE5, 0x6F, 0x70, 0x37, 0xE5,
+/*0580*/0x46, 0x65, 0x47, 0x70, 0x0C, 0x75, 0x73, 0x01,
+	0x75, 0x74, 0x01, 0xF5, 0x3C, 0xF5, 0x3D, 0x80,
+/*0590*/0x35, 0xE4, 0xF5, 0x4E, 0xC3, 0xE5, 0x47, 0x95,
+	0x46, 0xF5, 0x3C, 0xC3, 0x13, 0xF5, 0x71, 0x25,
+/*05A0*/0x46, 0xF5, 0x72, 0xC3, 0x94, 0x3F, 0x40, 0x05,
+	0xE4, 0xF5, 0x3D, 0x80, 0x40, 0xC3, 0x74, 0x3F,
+/*05B0*/0x95, 0x72, 0xF5, 0x3D, 0x80, 0x37, 0xE5, 0x46,
+	0x65, 0x47, 0x70, 0x0F, 0x75, 0x73, 0x01, 0x75,
+/*05C0*/0x75, 0x01, 0xF5, 0x3E, 0xF5, 0x3F, 0x75, 0x4E,
+	0x01, 0x80, 0x22, 0xE4, 0xF5, 0x4E, 0xC3, 0xE5,
+/*05D0*/0x47, 0x95, 0x46, 0xF5, 0x3E, 0xC3, 0x13, 0xF5,
+	0x71, 0x25, 0x46, 0xF5, 0x72, 0xD3, 0x94, 0x3F,
+/*05E0*/0x50, 0x05, 0xE4, 0xF5, 0x3F, 0x80, 0x06, 0xE5,
+	0x72, 0x24, 0xC1, 0xF5, 0x3F, 0x05, 0x6F, 0xE5,
+/*05F0*/0x6F, 0xC3, 0x94, 0x02, 0x50, 0x03, 0x02, 0x04,
+	0x6E, 0xE5, 0x6D, 0x45, 0x6C, 0x70, 0x02, 0x80,
+/*0600*/0x04, 0xE5, 0x74, 0x45, 0x75, 0x90, 0x07, 0x2F,
+	0xF0, 0x7F, 0x01, 0xE5, 0x3E, 0x60, 0x04, 0xE5,
+/*0610*/0x3C, 0x70, 0x14, 0xE4, 0xF5, 0x3C, 0xF5, 0x3D,
+	0xF5, 0x3E, 0xF5, 0x3F, 0x12, 0x08, 0xD2, 0x70,
+/*0620*/0x04, 0xF0, 0x02, 0x06, 0xA4, 0x80, 0x7A, 0xE5,
+	0x3C, 0xC3, 0x95, 0x3E, 0x40, 0x07, 0xE5, 0x3C,
+/*0630*/0x95, 0x3E, 0xFF, 0x80, 0x06, 0xC3, 0xE5, 0x3E,
+	0x95, 0x3C, 0xFF, 0xE5, 0x76, 0xD3, 0x95, 0x79,
+/*0640*/0x40, 0x05, 0x85, 0x76, 0x7A, 0x80, 0x03, 0x85,
+	0x79, 0x7A, 0xE5, 0x77, 0xC3, 0x95, 0x78, 0x50,
+/*0650*/0x05, 0x85, 0x77, 0x7B, 0x80, 0x03, 0x85, 0x78,
+	0x7B, 0xE5, 0x7B, 0xD3, 0x95, 0x7A, 0x40, 0x30,
+/*0660*/0xE5, 0x7B, 0x95, 0x7A, 0xF5, 0x3C, 0xF5, 0x3E,
+	0xC3, 0xE5, 0x7B, 0x95, 0x7A, 0x90, 0x07, 0x19,
+/*0670*/0xF0, 0xE5, 0x3C, 0xC3, 0x13, 0xF5, 0x71, 0x25,
+	0x7A, 0xF5, 0x72, 0xC3, 0x94, 0x3F, 0x40, 0x05,
+/*0680*/0xE4, 0xF5, 0x3D, 0x80, 0x1F, 0xC3, 0x74, 0x3F,
+	0x95, 0x72, 0xF5, 0x3D, 0xF5, 0x3F, 0x80, 0x14,
+/*0690*/0xE4, 0xF5, 0x3C, 0xF5, 0x3E, 0x90, 0x07, 0x19,
+	0xF0, 0x12, 0x08, 0xD2, 0x70, 0x03, 0xF0, 0x80,
+/*06A0*/0x03, 0x74, 0x01, 0xF0, 0x12, 0x08, 0x65, 0x75,
+	0x83, 0xD0, 0xE0, 0x54, 0x0F, 0xFE, 0xAD, 0x3C,
+/*06B0*/0x70, 0x02, 0x7E, 0x07, 0xBE, 0x0F, 0x02, 0x7E,
+	0x80, 0xEE, 0xFB, 0xEF, 0xD3, 0x9B, 0x74, 0x80,
+/*06C0*/0xF8, 0x98, 0x40, 0x1F, 0xE4, 0xF5, 0x3C, 0xF5,
+	0x3E, 0x12, 0x08, 0xD2, 0x70, 0x03, 0xF0, 0x80,
+/*06D0*/0x12, 0x74, 0x01, 0xF0, 0xE5, 0x08, 0xFB, 0xEB,
+	0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xD2, 0xE0,
+/*06E0*/0x44, 0x10, 0xF0, 0xE5, 0x08, 0xFB, 0xEB, 0x44,
+	0x09, 0xF5, 0x82, 0x75, 0x83, 0x9E, 0xED, 0xF0,
+/*06F0*/0xEB, 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xCA,
+	0xED, 0xF0, 0x12, 0x08, 0x65, 0x75, 0x83, 0xCC,
+/*0700*/0xEF, 0xF0, 0x22, 0xE5, 0x08, 0x44, 0x07, 0xF5,
+	0x82, 0x75, 0x83, 0xBC, 0xE0, 0x54, 0xF0, 0xF0,
+/*0710*/0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83,
+	0xBE, 0xE0, 0x54, 0xF0, 0xF0, 0xE5, 0x08, 0x44,
+/*0720*/0x07, 0xF5, 0x82, 0x75, 0x83, 0xC0, 0xE0, 0x54,
+	0xF0, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82,
+/*0730*/0x22, 0xF0, 0x90, 0x07, 0x28, 0xE0, 0xFE, 0xA3,
+	0xE0, 0xF5, 0x82, 0x8E, 0x83, 0x22, 0x85, 0x42,
+/*0740*/0x42, 0x85, 0x41, 0x41, 0x85, 0x40, 0x40, 0x74,
+	0xC0, 0x2F, 0xF5, 0x82, 0x74, 0x02, 0x3E, 0xF5,
+/*0750*/0x83, 0xE5, 0x42, 0xF0, 0x74, 0xE0, 0x2F, 0xF5,
+	0x82, 0x74, 0x02, 0x3E, 0xF5, 0x83, 0x22, 0xE5,
+/*0760*/0x42, 0x29, 0xFD, 0xE4, 0x33, 0xFC, 0xE5, 0x3C,
+	0xC3, 0x9D, 0xEC, 0x64, 0x80, 0xF8, 0x74, 0x80,
+/*0770*/0x98, 0x22, 0xF5, 0x83, 0xE0, 0x90, 0x07, 0x22,
+	0x54, 0x1F, 0xFD, 0xE0, 0xFA, 0xA3, 0xE0, 0xF5,
+/*0780*/0x82, 0x8A, 0x83, 0xED, 0xF0, 0x22, 0x90, 0x07,
+	0x22, 0xE0, 0xFC, 0xA3, 0xE0, 0xF5, 0x82, 0x8C,
+/*0790*/0x83, 0x22, 0x90, 0x07, 0x24, 0xFF, 0xED, 0x44,
+	0x07, 0xCF, 0xF0, 0xA3, 0xEF, 0xF0, 0x22, 0x85,
+/*07A0*/0x38, 0x38, 0x85, 0x39, 0x39, 0x85, 0x3A, 0x3A,
+	0x74, 0xC0, 0x2F, 0xF5, 0x82, 0x74, 0x02, 0x3E,
+/*07B0*/0xF5, 0x83, 0x22, 0x90, 0x07, 0x26, 0xFF, 0xED,
+	0x44, 0x07, 0xCF, 0xF0, 0xA3, 0xEF, 0xF0, 0x22,
+/*07C0*/0xF0, 0x74, 0xA0, 0x2F, 0xF5, 0x82, 0x74, 0x02,
+	0x3E, 0xF5, 0x83, 0x22, 0x74, 0xC0, 0x25, 0x11,
+/*07D0*/0xF5, 0x82, 0xE4, 0x34, 0x01, 0xF5, 0x83, 0x22,
+	0x74, 0x00, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
+/*07E0*/0x02, 0xF5, 0x83, 0x22, 0x74, 0x60, 0x25, 0x11,
+	0xF5, 0x82, 0xE4, 0x34, 0x03, 0xF5, 0x83, 0x22,
+/*07F0*/0x74, 0x80, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
+	0x03, 0xF5, 0x83, 0x22, 0x74, 0xE0, 0x25, 0x11,
+/*0800*/0xF5, 0x82, 0xE4, 0x34, 0x03, 0xF5, 0x83, 0x22,
+	0x74, 0x40, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
+/*0810*/0x06, 0xF5, 0x83, 0x22, 0x74, 0x80, 0x2F, 0xF5,
+	0x82, 0x74, 0x02, 0x3E, 0xF5, 0x83, 0x22, 0xAF,
+/*0820*/0x08, 0x7E, 0x00, 0xEF, 0x44, 0x07, 0xF5, 0x82,
+	0x22, 0xF5, 0x83, 0xE5, 0x82, 0x44, 0x07, 0xF5,
+/*0830*/0x82, 0xE5, 0x40, 0xF0, 0x22, 0x74, 0x40, 0x25,
+	0x11, 0xF5, 0x82, 0xE4, 0x34, 0x02, 0xF5, 0x83,
+/*0840*/0x22, 0x74, 0xC0, 0x25, 0x11, 0xF5, 0x82, 0xE4,
+	0x34, 0x03, 0xF5, 0x83, 0x22, 0x74, 0x00, 0x25,
+/*0850*/0x11, 0xF5, 0x82, 0xE4, 0x34, 0x06, 0xF5, 0x83,
+	0x22, 0x74, 0x20, 0x25, 0x11, 0xF5, 0x82, 0xE4,
+/*0860*/0x34, 0x06, 0xF5, 0x83, 0x22, 0xE5, 0x08, 0xFD,
+	0xED, 0x44, 0x07, 0xF5, 0x82, 0x22, 0xE5, 0x41,
+/*0870*/0xF0, 0xE5, 0x65, 0x64, 0x01, 0x45, 0x64, 0x22,
+	0x7E, 0x00, 0xFB, 0x7A, 0x00, 0xFD, 0x7C, 0x00,
+/*0880*/0x22, 0x74, 0x20, 0x25, 0x11, 0xF5, 0x82, 0xE4,
+	0x34, 0x02, 0x22, 0x74, 0xA0, 0x25, 0x11, 0xF5,
+/*0890*/0x82, 0xE4, 0x34, 0x03, 0x22, 0x85, 0x3E, 0x42,
+	0x85, 0x3F, 0x41, 0x8F, 0x40, 0x22, 0x85, 0x3C,
+/*08A0*/0x42, 0x85, 0x3D, 0x41, 0x8F, 0x40, 0x22, 0x75,
+	0x45, 0x3F, 0x90, 0x07, 0x20, 0xE4, 0xF0, 0xA3,
+/*08B0*/0x22, 0xF5, 0x83, 0xE5, 0x32, 0xF0, 0x05, 0x6E,
+	0xE5, 0x6E, 0xC3, 0x94, 0x40, 0x22, 0xF0, 0xE5,
+/*08C0*/0x08, 0x44, 0x06, 0xF5, 0x82, 0x22, 0x74, 0x00,
+	0x25, 0x6E, 0xF5, 0x82, 0xE4, 0x34, 0x00, 0xF5,
+/*08D0*/0x83, 0x22, 0xE5, 0x6D, 0x45, 0x6C, 0x90, 0x07,
+	0x2F, 0x22, 0xE4, 0xF9, 0xE5, 0x3C, 0xD3, 0x95,
+/*08E0*/0x3E, 0x22, 0x74, 0x80, 0x2E, 0xF5, 0x82, 0xE4,
+	0x34, 0x02, 0xF5, 0x83, 0xE0, 0x22, 0x74, 0xA0,
+/*08F0*/0x2E, 0xF5, 0x82, 0xE4, 0x34, 0x02, 0xF5, 0x83,
+	0xE0, 0x22, 0x74, 0x80, 0x25, 0x6E, 0xF5, 0x82,
+/*0900*/0xE4, 0x34, 0x00, 0x22, 0x25, 0x42, 0xFD, 0xE4,
+	0x33, 0xFC, 0x22, 0x85, 0x42, 0x42, 0x85, 0x41,
+/*0910*/0x41, 0x85, 0x40, 0x40, 0x22, 0xED, 0x4C, 0x60,
+	0x03, 0x02, 0x09, 0xE5, 0xEF, 0x4E, 0x70, 0x37,
+/*0920*/0x90, 0x07, 0x26, 0x12, 0x07, 0x89, 0xE0, 0xFD,
+	0x12, 0x07, 0xCC, 0xED, 0xF0, 0x90, 0x07, 0x28,
+/*0930*/0x12, 0x07, 0x89, 0xE0, 0xFD, 0x12, 0x07, 0xD8,
+	0xED, 0xF0, 0x12, 0x07, 0x86, 0xE0, 0x54, 0x1F,
+/*0940*/0xFD, 0x12, 0x08, 0x81, 0xF5, 0x83, 0xED, 0xF0,
+	0x90, 0x07, 0x24, 0x12, 0x07, 0x89, 0xE0, 0x54,
+/*0950*/0x1F, 0xFD, 0x12, 0x08, 0x35, 0xED, 0xF0, 0xEF,
+	0x64, 0x04, 0x4E, 0x70, 0x37, 0x90, 0x07, 0x26,
+/*0960*/0x12, 0x07, 0x89, 0xE0, 0xFD, 0x12, 0x07, 0xE4,
+	0xED, 0xF0, 0x90, 0x07, 0x28, 0x12, 0x07, 0x89,
+/*0970*/0xE0, 0xFD, 0x12, 0x07, 0xF0, 0xED, 0xF0, 0x12,
+	0x07, 0x86, 0xE0, 0x54, 0x1F, 0xFD, 0x12, 0x08,
+/*0980*/0x8B, 0xF5, 0x83, 0xED, 0xF0, 0x90, 0x07, 0x24,
+	0x12, 0x07, 0x89, 0xE0, 0x54, 0x1F, 0xFD, 0x12,
+/*0990*/0x08, 0x41, 0xED, 0xF0, 0xEF, 0x64, 0x01, 0x4E,
+	0x70, 0x04, 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00,
+/*09A0*/0xEF, 0x64, 0x02, 0x4E, 0x70, 0x04, 0x7F, 0x01,
+	0x80, 0x02, 0x7F, 0x00, 0xEF, 0x4D, 0x60, 0x78,
+/*09B0*/0x90, 0x07, 0x26, 0x12, 0x07, 0x35, 0xE0, 0xFF,
+	0x12, 0x07, 0xFC, 0xEF, 0x12, 0x07, 0x31, 0xE0,
+/*09C0*/0xFF, 0x12, 0x08, 0x08, 0xEF, 0xF0, 0x90, 0x07,
+	0x22, 0x12, 0x07, 0x35, 0xE0, 0x54, 0x1F, 0xFF,
+/*09D0*/0x12, 0x08, 0x4D, 0xEF, 0xF0, 0x90, 0x07, 0x24,
+	0x12, 0x07, 0x35, 0xE0, 0x54, 0x1F, 0xFF, 0x12,
+/*09E0*/0x08, 0x59, 0xEF, 0xF0, 0x22, 0x12, 0x07, 0xCC,
+	0xE4, 0xF0, 0x12, 0x07, 0xD8, 0xE4, 0xF0, 0x12,
+/*09F0*/0x08, 0x81, 0xF5, 0x83, 0xE4, 0xF0, 0x12, 0x08,
+	0x35, 0x74, 0x14, 0xF0, 0x12, 0x07, 0xE4, 0xE4,
+/*0A00*/0xF0, 0x12, 0x07, 0xF0, 0xE4, 0xF0, 0x12, 0x08,
+	0x8B, 0xF5, 0x83, 0xE4, 0xF0, 0x12, 0x08, 0x41,
+/*0A10*/0x74, 0x14, 0xF0, 0x12, 0x07, 0xFC, 0xE4, 0xF0,
+	0x12, 0x08, 0x08, 0xE4, 0xF0, 0x12, 0x08, 0x4D,
+/*0A20*/0xE4, 0xF0, 0x12, 0x08, 0x59, 0x74, 0x14, 0xF0,
+	0x22, 0x53, 0xF9, 0xF7, 0x75, 0xFC, 0x10, 0xE4,
+/*0A30*/0xF5, 0xFD, 0x75, 0xFE, 0x30, 0xF5, 0xFF, 0xE5,
+	0xE7, 0x20, 0xE7, 0x03, 0x43, 0xF9, 0x08, 0xE5,
+/*0A40*/0xE6, 0x20, 0xE7, 0x0B, 0x78, 0xFF, 0xE4, 0xF6,
+	0xD8, 0xFD, 0x53, 0xE6, 0xFE, 0x80, 0x09, 0x78,
+/*0A50*/0x08, 0xE4, 0xF6, 0xD8, 0xFD, 0x53, 0xE6, 0xFE,
+	0x75, 0x81, 0x80, 0xE4, 0xF5, 0xA8, 0xD2, 0xA8,
+/*0A60*/0xC2, 0xA9, 0xD2, 0xAF, 0xE5, 0xE2, 0x20, 0xE5,
+	0x05, 0x20, 0xE6, 0x02, 0x80, 0x03, 0x43, 0xE1,
+/*0A70*/0x02, 0xE5, 0xE2, 0x20, 0xE0, 0x0E, 0x90, 0x00,
+	0x00, 0x7F, 0x00, 0x7E, 0x08, 0xE4, 0xF0, 0xA3,
+/*0A80*/0xDF, 0xFC, 0xDE, 0xFA, 0x02, 0x0A, 0xDB, 0x43,
+	0xFA, 0x01, 0xC0, 0xE0, 0xC0, 0xF0, 0xC0, 0x83,
+/*0A90*/0xC0, 0x82, 0xC0, 0xD0, 0x12, 0x1C, 0xE7, 0xD0,
+	0xD0, 0xD0, 0x82, 0xD0, 0x83, 0xD0, 0xF0, 0xD0,
+/*0AA0*/0xE0, 0x53, 0xFA, 0xFE, 0x32, 0x02, 0x1B, 0x55,
+	0xE4, 0x93, 0xA3, 0xF8, 0xE4, 0x93, 0xA3, 0xF6,
+/*0AB0*/0x08, 0xDF, 0xF9, 0x80, 0x29, 0xE4, 0x93, 0xA3,
+	0xF8, 0x54, 0x07, 0x24, 0x0C, 0xC8, 0xC3, 0x33,
+/*0AC0*/0xC4, 0x54, 0x0F, 0x44, 0x20, 0xC8, 0x83, 0x40,
+	0x04, 0xF4, 0x56, 0x80, 0x01, 0x46, 0xF6, 0xDF,
+/*0AD0*/0xE4, 0x80, 0x0B, 0x01, 0x02, 0x04, 0x08, 0x10,
+	0x20, 0x40, 0x80, 0x90, 0x00, 0x3F, 0xE4, 0x7E,
+/*0AE0*/0x01, 0x93, 0x60, 0xC1, 0xA3, 0xFF, 0x54, 0x3F,
+	0x30, 0xE5, 0x09, 0x54, 0x1F, 0xFE, 0xE4, 0x93,
+/*0AF0*/0xA3, 0x60, 0x01, 0x0E, 0xCF, 0x54, 0xC0, 0x25,
+	0xE0, 0x60, 0xAD, 0x40, 0xB8, 0x80, 0xFE, 0x8C,
+/*0B00*/0x64, 0x8D, 0x65, 0x8A, 0x66, 0x8B, 0x67, 0xE4,
+	0xF5, 0x69, 0xEF, 0x4E, 0x70, 0x03, 0x02, 0x1D,
+/*0B10*/0x55, 0xE4, 0xF5, 0x68, 0xE5, 0x67, 0x45, 0x66,
+	0x70, 0x32, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x90,
+/*0B20*/0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE4,
+	0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0xE4, 0x12,
+/*0B30*/0x08, 0x70, 0x70, 0x29, 0x12, 0x07, 0x2A, 0x75,
+	0x83, 0x92, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83,
+/*0B40*/0xC6, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC8,
+	0xE4, 0xF0, 0x80, 0x11, 0x90, 0x07, 0x26, 0x12,
+/*0B50*/0x07, 0x35, 0xE4, 0x12, 0x08, 0x70, 0x70, 0x05,
+	0x12, 0x07, 0x32, 0xE4, 0xF0, 0x12, 0x1D, 0x55,
+/*0B60*/0x12, 0x1E, 0xBF, 0xE5, 0x67, 0x45, 0x66, 0x70,
+	0x33, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x90, 0xE5,
+/*0B70*/0x41, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE5,
+	0x41, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0x12,
+/*0B80*/0x08, 0x6E, 0x70, 0x29, 0x12, 0x07, 0x2A, 0x75,
+	0x83, 0x92, 0xE5, 0x40, 0x12, 0x07, 0x29, 0x75,
+/*0B90*/0x83, 0xC6, 0xE5, 0x40, 0x12, 0x07, 0x29, 0x75,
+	0x83, 0xC8, 0x80, 0x0E, 0x90, 0x07, 0x26, 0x12,
+/*0BA0*/0x07, 0x35, 0x12, 0x08, 0x6E, 0x70, 0x06, 0x12,
+	0x07, 0x32, 0xE5, 0x40, 0xF0, 0xAF, 0x69, 0x7E,
+/*0BB0*/0x00, 0xAD, 0x67, 0xAC, 0x66, 0x12, 0x04, 0x44,
+	0x12, 0x07, 0x2A, 0x75, 0x83, 0xCA, 0xE0, 0xD3,
+/*0BC0*/0x94, 0x00, 0x50, 0x0C, 0x05, 0x68, 0xE5, 0x68,
+	0xC3, 0x94, 0x05, 0x50, 0x03, 0x02, 0x0B, 0x14,
+/*0BD0*/0x22, 0x8C, 0x60, 0x8D, 0x61, 0x12, 0x08, 0xDA,
+	0x74, 0x20, 0x40, 0x0D, 0x2F, 0xF5, 0x82, 0x74,
+/*0BE0*/0x03, 0x3E, 0xF5, 0x83, 0xE5, 0x3E, 0xF0, 0x80,
+	0x0B, 0x2F, 0xF5, 0x82, 0x74, 0x03, 0x3E, 0xF5,
+/*0BF0*/0x83, 0xE5, 0x3C, 0xF0, 0xE5, 0x3C, 0xD3, 0x95,
+	0x3E, 0x40, 0x3C, 0xE5, 0x61, 0x45, 0x60, 0x70,
+/*0C00*/0x10, 0xE9, 0x12, 0x09, 0x04, 0xE5, 0x3E, 0x12,
+	0x07, 0x68, 0x40, 0x3B, 0x12, 0x08, 0x95, 0x80,
+/*0C10*/0x18, 0xE5, 0x3E, 0xC3, 0x95, 0x38, 0x40, 0x1D,
+	0x85, 0x3E, 0x38, 0xE5, 0x3E, 0x60, 0x05, 0x85,
+/*0C20*/0x3F, 0x39, 0x80, 0x03, 0x85, 0x39, 0x39, 0x8F,
+	0x3A, 0x12, 0x08, 0x14, 0xE5, 0x3E, 0x12, 0x07,
+/*0C30*/0xC0, 0xE5, 0x3F, 0xF0, 0x22, 0x80, 0x43, 0xE5,
+	0x61, 0x45, 0x60, 0x70, 0x19, 0x12, 0x07, 0x5F,
+/*0C40*/0x40, 0x05, 0x12, 0x08, 0x9E, 0x80, 0x27, 0x12,
+	0x09, 0x0B, 0x12, 0x08, 0x14, 0xE5, 0x42, 0x12,
+/*0C50*/0x07, 0xC0, 0xE5, 0x41, 0xF0, 0x22, 0xE5, 0x3C,
+	0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3C, 0x38,
+/*0C60*/0xE5, 0x3C, 0x60, 0x05, 0x85, 0x3D, 0x39, 0x80,
+	0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12, 0x08,
+/*0C70*/0x14, 0xE5, 0x3C, 0x12, 0x07, 0xC0, 0xE5, 0x3D,
+	0xF0, 0x22, 0x85, 0x38, 0x38, 0x85, 0x39, 0x39,
+/*0C80*/0x85, 0x3A, 0x3A, 0x12, 0x08, 0x14, 0xE5, 0x38,
+	0x12, 0x07, 0xC0, 0xE5, 0x39, 0xF0, 0x22, 0x7F,
+/*0C90*/0x06, 0x12, 0x17, 0x31, 0x12, 0x1D, 0x23, 0x12,
+	0x0E, 0x04, 0x12, 0x0E, 0x33, 0xE0, 0x44, 0x0A,
+/*0CA0*/0xF0, 0x74, 0x8E, 0xFE, 0x12, 0x0E, 0x04, 0x12,
+	0x0E, 0x0B, 0xEF, 0xF0, 0xE5, 0x28, 0x30, 0xE5,
+/*0CB0*/0x03, 0xD3, 0x80, 0x01, 0xC3, 0x40, 0x05, 0x75,
+	0x14, 0x20, 0x80, 0x03, 0x75, 0x14, 0x08, 0x12,
+/*0CC0*/0x0E, 0x04, 0x75, 0x83, 0x8A, 0xE5, 0x14, 0xF0,
+	0xB4, 0xFF, 0x05, 0x75, 0x12, 0x80, 0x80, 0x06,
+/*0CD0*/0xE5, 0x14, 0xC3, 0x13, 0xF5, 0x12, 0xE4, 0xF5,
+	0x16, 0xF5, 0x7F, 0x12, 0x19, 0x36, 0x12, 0x13,
+/*0CE0*/0xA3, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x50, 0x09,
+	0x05, 0x16, 0xE5, 0x16, 0xC3, 0x94, 0x14, 0x40,
+/*0CF0*/0xEA, 0xE5, 0xE4, 0x20, 0xE7, 0x28, 0x12, 0x0E,
+	0x04, 0x75, 0x83, 0xD2, 0xE0, 0x54, 0x08, 0xD3,
+/*0D00*/0x94, 0x00, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
+	0x7F, 0x00, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40,
+/*0D10*/0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF,
+	0x5E, 0x60, 0x03, 0x12, 0x1D, 0xD7, 0xE5, 0x7F,
+/*0D20*/0xC3, 0x94, 0x11, 0x40, 0x14, 0x12, 0x0E, 0x04,
+	0x75, 0x83, 0xD2, 0xE0, 0x44, 0x80, 0xF0, 0xE5,
+/*0D30*/0xE4, 0x20, 0xE7, 0x0F, 0x12, 0x1D, 0xD7, 0x80,
+	0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0xD2, 0xE0,
+/*0D40*/0x54, 0x7F, 0xF0, 0x12, 0x1D, 0x23, 0x22, 0x74,
+	0x8A, 0x85, 0x08, 0x82, 0xF5, 0x83, 0xE5, 0x17,
+/*0D50*/0xF0, 0x12, 0x0E, 0x3A, 0xE4, 0xF0, 0x90, 0x07,
+	0x02, 0xE0, 0x12, 0x0E, 0x17, 0x75, 0x83, 0x90,
+/*0D60*/0xEF, 0xF0, 0x74, 0x92, 0xFE, 0xE5, 0x08, 0x44,
+	0x07, 0xFF, 0xF5, 0x82, 0x8E, 0x83, 0xE0, 0x54,
+/*0D70*/0xC0, 0xFD, 0x90, 0x07, 0x03, 0xE0, 0x54, 0x3F,
+	0x4D, 0x8F, 0x82, 0x8E, 0x83, 0xF0, 0x90, 0x07,
+/*0D80*/0x04, 0xE0, 0x12, 0x0E, 0x17, 0x75, 0x83, 0x82,
+	0xEF, 0xF0, 0x90, 0x07, 0x05, 0xE0, 0xFF, 0xED,
+/*0D90*/0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xB4, 0xEF,
+	0x12, 0x0E, 0x03, 0x75, 0x83, 0x80, 0xE0, 0x54,
+/*0DA0*/0xBF, 0xF0, 0x30, 0x37, 0x0A, 0x12, 0x0E, 0x91,
+	0x75, 0x83, 0x94, 0xE0, 0x44, 0x80, 0xF0, 0x30,
+/*0DB0*/0x38, 0x0A, 0x12, 0x0E, 0x91, 0x75, 0x83, 0x92,
+	0xE0, 0x44, 0x80, 0xF0, 0xE5, 0x28, 0x30, 0xE4,
+/*0DC0*/0x1A, 0x20, 0x39, 0x0A, 0x12, 0x0E, 0x04, 0x75,
+	0x83, 0x88, 0xE0, 0x54, 0x7F, 0xF0, 0x20, 0x3A,
+/*0DD0*/0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0x88, 0xE0,
+	0x54, 0xBF, 0xF0, 0x74, 0x8C, 0xFE, 0x12, 0x0E,
+/*0DE0*/0x04, 0x8E, 0x83, 0xE0, 0x54, 0x0F, 0x12, 0x0E,
+	0x03, 0x75, 0x83, 0x86, 0xE0, 0x54, 0xBF, 0xF0,
+/*0DF0*/0xE5, 0x08, 0x44, 0x06, 0x12, 0x0D, 0xFD, 0x75,
+	0x83, 0x8A, 0xE4, 0xF0, 0x22, 0xF5, 0x82, 0x75,
+/*0E00*/0x83, 0x82, 0xE4, 0xF0, 0xE5, 0x08, 0x44, 0x07,
+	0xF5, 0x82, 0x22, 0x8E, 0x83, 0xE0, 0xF5, 0x10,
+/*0E10*/0x54, 0xFE, 0xF0, 0xE5, 0x10, 0x44, 0x01, 0xFF,
+	0xE5, 0x08, 0xFD, 0xED, 0x44, 0x07, 0xF5, 0x82,
+/*0E20*/0x22, 0xE5, 0x15, 0xC4, 0x54, 0x07, 0xFF, 0xE5,
+	0x08, 0xFD, 0xED, 0x44, 0x08, 0xF5, 0x82, 0x75,
+/*0E30*/0x83, 0x82, 0x22, 0x75, 0x83, 0x80, 0xE0, 0x44,
+	0x40, 0xF0, 0xE5, 0x08, 0x44, 0x08, 0xF5, 0x82,
+/*0E40*/0x75, 0x83, 0x8A, 0x22, 0xE5, 0x16, 0x25, 0xE0,
+	0x25, 0xE0, 0x24, 0xAF, 0xF5, 0x82, 0xE4, 0x34,
+/*0E50*/0x1A, 0xF5, 0x83, 0xE4, 0x93, 0xF5, 0x0D, 0x22,
+	0x43, 0xE1, 0x10, 0x43, 0xE1, 0x80, 0x53, 0xE1,
+/*0E60*/0xFD, 0x85, 0xE1, 0x10, 0x22, 0xE5, 0x16, 0x25,
+	0xE0, 0x25, 0xE0, 0x24, 0xB2, 0xF5, 0x82, 0xE4,
+/*0E70*/0x34, 0x1A, 0xF5, 0x83, 0xE4, 0x93, 0x22, 0x85,
+	0x55, 0x82, 0x85, 0x54, 0x83, 0xE5, 0x15, 0xF0,
+/*0E80*/0x22, 0xE5, 0xE2, 0x54, 0x20, 0xD3, 0x94, 0x00,
+	0x22, 0xE5, 0xE2, 0x54, 0x40, 0xD3, 0x94, 0x00,
+/*0E90*/0x22, 0xE5, 0x08, 0x44, 0x06, 0xF5, 0x82, 0x22,
+	0xFD, 0xE5, 0x08, 0xFB, 0xEB, 0x44, 0x07, 0xF5,
+/*0EA0*/0x82, 0x22, 0x53, 0xF9, 0xF7, 0x75, 0xFE, 0x30,
+	0x22, 0xEF, 0x4E, 0x70, 0x26, 0x12, 0x07, 0xCC,
+/*0EB0*/0xE0, 0xFD, 0x90, 0x07, 0x26, 0x12, 0x07, 0x7B,
+	0x12, 0x07, 0xD8, 0xE0, 0xFD, 0x90, 0x07, 0x28,
+/*0EC0*/0x12, 0x07, 0x7B, 0x12, 0x08, 0x81, 0x12, 0x07,
+	0x72, 0x12, 0x08, 0x35, 0xE0, 0x90, 0x07, 0x24,
+/*0ED0*/0x12, 0x07, 0x78, 0xEF, 0x64, 0x04, 0x4E, 0x70,
+	0x29, 0x12, 0x07, 0xE4, 0xE0, 0xFD, 0x90, 0x07,
+/*0EE0*/0x26, 0x12, 0x07, 0x7B, 0x12, 0x07, 0xF0, 0xE0,
+	0xFD, 0x90, 0x07, 0x28, 0x12, 0x07, 0x7B, 0x12,
+/*0EF0*/0x08, 0x8B, 0x12, 0x07, 0x72, 0x12, 0x08, 0x41,
+	0xE0, 0x54, 0x1F, 0xFD, 0x90, 0x07, 0x24, 0x12,
+/*0F00*/0x07, 0x7B, 0xEF, 0x64, 0x01, 0x4E, 0x70, 0x04,
+	0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00, 0xEF, 0x64,
+/*0F10*/0x02, 0x4E, 0x70, 0x04, 0x7F, 0x01, 0x80, 0x02,
+	0x7F, 0x00, 0xEF, 0x4D, 0x60, 0x35, 0x12, 0x07,
+/*0F20*/0xFC, 0xE0, 0xFF, 0x90, 0x07, 0x26, 0x12, 0x07,
+	0x89, 0xEF, 0xF0, 0x12, 0x08, 0x08, 0xE0, 0xFF,
+/*0F30*/0x90, 0x07, 0x28, 0x12, 0x07, 0x89, 0xEF, 0xF0,
+	0x12, 0x08, 0x4D, 0xE0, 0x54, 0x1F, 0xFF, 0x12,
+/*0F40*/0x07, 0x86, 0xEF, 0xF0, 0x12, 0x08, 0x59, 0xE0,
+	0x54, 0x1F, 0xFF, 0x90, 0x07, 0x24, 0x12, 0x07,
+/*0F50*/0x89, 0xEF, 0xF0, 0x22, 0xE4, 0xF5, 0x53, 0x12,
+	0x0E, 0x81, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
+/*0F60*/0x7F, 0x00, 0x12, 0x0E, 0x89, 0x40, 0x04, 0x7E,
+	0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x70,
+/*0F70*/0x03, 0x02, 0x0F, 0xF6, 0x85, 0xE1, 0x10, 0x43,
+	0xE1, 0x02, 0x53, 0xE1, 0x0F, 0x85, 0xE1, 0x10,
+/*0F80*/0xE4, 0xF5, 0x51, 0xE5, 0xE3, 0x54, 0x3F, 0xF5,
+	0x52, 0x12, 0x0E, 0x89, 0x40, 0x1D, 0xAD, 0x52,
+/*0F90*/0xAF, 0x51, 0x12, 0x11, 0x18, 0xEF, 0x60, 0x08,
+	0x85, 0xE1, 0x10, 0x43, 0xE1, 0x40, 0x80, 0x0B,
+/*0FA0*/0x53, 0xE1, 0xBF, 0x12, 0x0E, 0x58, 0x12, 0x00,
+	0x06, 0x80, 0xFB, 0xE5, 0xE3, 0x54, 0x3F, 0xF5,
+/*0FB0*/0x51, 0xE5, 0xE4, 0x54, 0x3F, 0xF5, 0x52, 0x12,
+	0x0E, 0x81, 0x40, 0x1D, 0xAD, 0x52, 0xAF, 0x51,
+/*0FC0*/0x12, 0x11, 0x18, 0xEF, 0x60, 0x08, 0x85, 0xE1,
+	0x10, 0x43, 0xE1, 0x20, 0x80, 0x0B, 0x53, 0xE1,
+/*0FD0*/0xDF, 0x12, 0x0E, 0x58, 0x12, 0x00, 0x06, 0x80,
+	0xFB, 0x12, 0x0E, 0x81, 0x40, 0x04, 0x7F, 0x01,
+/*0FE0*/0x80, 0x02, 0x7F, 0x00, 0x12, 0x0E, 0x89, 0x40,
+	0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE,
+/*0FF0*/0x4F, 0x60, 0x03, 0x12, 0x0E, 0x5B, 0x22, 0x12,
+	0x0E, 0x21, 0xEF, 0xF0, 0x12, 0x10, 0x91, 0x22,
+/*1000*/0x02, 0x11, 0x00, 0x02, 0x10, 0x40, 0x02, 0x10,
+	0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1010*/0x01, 0x20, 0x01, 0x20, 0xE4, 0xF5, 0x57, 0x12,
+	0x16, 0xBD, 0x12, 0x16, 0x44, 0xE4, 0x12, 0x10,
+/*1020*/0x56, 0x12, 0x14, 0xB7, 0x90, 0x07, 0x26, 0x12,
+	0x07, 0x35, 0xE4, 0x12, 0x07, 0x31, 0xE4, 0xF0,
+/*1030*/0x12, 0x10, 0x56, 0x12, 0x14, 0xB7, 0x90, 0x07,
+	0x26, 0x12, 0x07, 0x35, 0xE5, 0x41, 0x12, 0x07,
+/*1040*/0x31, 0xE5, 0x40, 0xF0, 0xAF, 0x57, 0x7E, 0x00,
+	0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44, 0xAF,
+/*1050*/0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0xFF, 0x90,
+	0x07, 0x20, 0xA3, 0xE0, 0xFD, 0xE4, 0xF5, 0x56,
+/*1060*/0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA, 0x12,
+	0x11, 0x51, 0x7F, 0x0F, 0x7D, 0x18, 0xE4, 0xF5,
+/*1070*/0x56, 0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA,
+	0x12, 0x15, 0x41, 0xAF, 0x56, 0x7E, 0x00, 0x12,
+/*1080*/0x1A, 0xFF, 0xE4, 0xFF, 0xF5, 0x56, 0x7D, 0x1F,
+	0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA, 0x22,
+/*1090*/0x22, 0xE4, 0xF5, 0x55, 0xE5, 0x08, 0xFD, 0x74,
+	0xA0, 0xF5, 0x56, 0xED, 0x44, 0x07, 0xF5, 0x57,
+/*10A0*/0xE5, 0x28, 0x30, 0xE5, 0x03, 0xD3, 0x80, 0x01,
+	0xC3, 0x40, 0x05, 0x7F, 0x28, 0xEF, 0x80, 0x04,
+/*10B0*/0x7F, 0x14, 0xEF, 0xC3, 0x13, 0xF5, 0x54, 0xE4,
+	0xF9, 0x12, 0x0E, 0x18, 0x75, 0x83, 0x8E, 0xE0,
+/*10C0*/0xF5, 0x10, 0xCE, 0xEF, 0xCE, 0xEE, 0xD3, 0x94,
+	0x00, 0x40, 0x26, 0xE5, 0x10, 0x54, 0xFE, 0x12,
+/*10D0*/0x0E, 0x98, 0x75, 0x83, 0x8E, 0xED, 0xF0, 0xE5,
+	0x10, 0x44, 0x01, 0xFD, 0xEB, 0x44, 0x07, 0xF5,
+/*10E0*/0x82, 0xED, 0xF0, 0x85, 0x57, 0x82, 0x85, 0x56,
+	0x83, 0xE0, 0x30, 0xE3, 0x01, 0x09, 0x1E, 0x80,
+/*10F0*/0xD4, 0xC2, 0x34, 0xE9, 0xC3, 0x95, 0x54, 0x40,
+	0x02, 0xD2, 0x34, 0x22, 0x02, 0x00, 0x06, 0x22,
+/*1100*/0x30, 0x30, 0x11, 0x90, 0x10, 0x00, 0xE4, 0x93,
+	0xF5, 0x10, 0x90, 0x10, 0x10, 0xE4, 0x93, 0xF5,
+/*1110*/0x10, 0x12, 0x10, 0x90, 0x12, 0x11, 0x50, 0x22,
+	0xE4, 0xFC, 0xC3, 0xED, 0x9F, 0xFA, 0xEF, 0xF5,
+/*1120*/0x83, 0x75, 0x82, 0x00, 0x79, 0xFF, 0xE4, 0x93,
+	0xCC, 0x6C, 0xCC, 0xA3, 0xD9, 0xF8, 0xDA, 0xF6,
+/*1130*/0xE5, 0xE2, 0x30, 0xE4, 0x02, 0x8C, 0xE5, 0xED,
+	0x24, 0xFF, 0xFF, 0xEF, 0x75, 0x82, 0xFF, 0xF5,
+/*1140*/0x83, 0xE4, 0x93, 0x6C, 0x70, 0x03, 0x7F, 0x01,
+	0x22, 0x7F, 0x00, 0x22, 0x22, 0x11, 0x00, 0x00,
+/*1150*/0x22, 0x8E, 0x58, 0x8F, 0x59, 0x8C, 0x5A, 0x8D,
+	0x5B, 0x8A, 0x5C, 0x8B, 0x5D, 0x75, 0x5E, 0x01,
+/*1160*/0xE4, 0xF5, 0x5F, 0xF5, 0x60, 0xF5, 0x62, 0x12,
+	0x07, 0x2A, 0x75, 0x83, 0xD0, 0xE0, 0xFF, 0xC4,
+/*1170*/0x54, 0x0F, 0xF5, 0x61, 0x12, 0x1E, 0xA5, 0x85,
+	0x59, 0x5E, 0xD3, 0xE5, 0x5E, 0x95, 0x5B, 0xE5,
+/*1180*/0x5A, 0x12, 0x07, 0x6B, 0x50, 0x4B, 0x12, 0x07,
+	0x03, 0x75, 0x83, 0xBC, 0xE0, 0x45, 0x5E, 0x12,
+/*1190*/0x07, 0x29, 0x75, 0x83, 0xBE, 0xE0, 0x45, 0x5E,
+	0x12, 0x07, 0x29, 0x75, 0x83, 0xC0, 0xE0, 0x45,
+/*11A0*/0x5E, 0xF0, 0xAF, 0x5F, 0xE5, 0x60, 0x12, 0x08,
+	0x78, 0x12, 0x0A, 0xFF, 0xAF, 0x62, 0x7E, 0x00,
+/*11B0*/0xAD, 0x5D, 0xAC, 0x5C, 0x12, 0x04, 0x44, 0xE5,
+	0x61, 0xAF, 0x5E, 0x7E, 0x00, 0xB4, 0x03, 0x05,
+/*11C0*/0x12, 0x1E, 0x21, 0x80, 0x07, 0xAD, 0x5D, 0xAC,
+	0x5C, 0x12, 0x13, 0x17, 0x05, 0x5E, 0x02, 0x11,
+/*11D0*/0x7A, 0x12, 0x07, 0x03, 0x75, 0x83, 0xBC, 0xE0,
+	0x45, 0x40, 0x12, 0x07, 0x29, 0x75, 0x83, 0xBE,
+/*11E0*/0xE0, 0x45, 0x40, 0x12, 0x07, 0x29, 0x75, 0x83,
+	0xC0, 0xE0, 0x45, 0x40, 0xF0, 0x22, 0x8E, 0x58,
+/*11F0*/0x8F, 0x59, 0x75, 0x5A, 0x01, 0x79, 0x01, 0x75,
+	0x5B, 0x01, 0xE4, 0xFB, 0x12, 0x07, 0x2A, 0x75,
+/*1200*/0x83, 0xAE, 0xE0, 0x54, 0x1A, 0xFF, 0x12, 0x08,
+	0x65, 0xE0, 0xC4, 0x13, 0x54, 0x07, 0xFE, 0xEF,
+/*1210*/0x70, 0x0C, 0xEE, 0x65, 0x35, 0x70, 0x07, 0x90,
+	0x07, 0x2F, 0xE0, 0xB4, 0x01, 0x0D, 0xAF, 0x35,
+/*1220*/0x7E, 0x00, 0x12, 0x0E, 0xA9, 0xCF, 0xEB, 0xCF,
+	0x02, 0x1E, 0x60, 0xE5, 0x59, 0x64, 0x02, 0x45,
+/*1230*/0x58, 0x70, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
+	0x00, 0xE5, 0x59, 0x45, 0x58, 0x70, 0x04, 0x7E,
+/*1240*/0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x60,
+	0x23, 0x85, 0x41, 0x49, 0x85, 0x40, 0x4B, 0xE5,
+/*1250*/0x59, 0x45, 0x58, 0x70, 0x2C, 0xAF, 0x5A, 0xFE,
+	0xCD, 0xE9, 0xCD, 0xFC, 0xAB, 0x59, 0xAA, 0x58,
+/*1260*/0x12, 0x0A, 0xFF, 0xAF, 0x5B, 0x7E, 0x00, 0x12,
+	0x1E, 0x60, 0x80, 0x15, 0xAF, 0x5B, 0x7E, 0x00,
+/*1270*/0x12, 0x1E, 0x60, 0x90, 0x07, 0x26, 0x12, 0x07,
+	0x35, 0xE5, 0x49, 0x12, 0x07, 0x31, 0xE5, 0x4B,
+/*1280*/0xF0, 0xE4, 0xFD, 0xAF, 0x35, 0xFE, 0xFC, 0x12,
+	0x09, 0x15, 0x22, 0x8C, 0x64, 0x8D, 0x65, 0x12,
+/*1290*/0x08, 0xDA, 0x40, 0x3C, 0xE5, 0x65, 0x45, 0x64,
+	0x70, 0x10, 0x12, 0x09, 0x04, 0xC3, 0xE5, 0x3E,
+/*12A0*/0x12, 0x07, 0x69, 0x40, 0x3B, 0x12, 0x08, 0x95,
+	0x80, 0x18, 0xE5, 0x3E, 0xC3, 0x95, 0x38, 0x40,
+/*12B0*/0x1D, 0x85, 0x3E, 0x38, 0xE5, 0x3E, 0x60, 0x05,
+	0x85, 0x3F, 0x39, 0x80, 0x03, 0x85, 0x39, 0x39,
+/*12C0*/0x8F, 0x3A, 0x12, 0x07, 0xA8, 0xE5, 0x3E, 0x12,
+	0x07, 0x53, 0xE5, 0x3F, 0xF0, 0x22, 0x80, 0x3B,
+/*12D0*/0xE5, 0x65, 0x45, 0x64, 0x70, 0x11, 0x12, 0x07,
+	0x5F, 0x40, 0x05, 0x12, 0x08, 0x9E, 0x80, 0x1F,
+/*12E0*/0x12, 0x07, 0x3E, 0xE5, 0x41, 0xF0, 0x22, 0xE5,
+	0x3C, 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3C,
+/*12F0*/0x38, 0xE5, 0x3C, 0x60, 0x05, 0x85, 0x3D, 0x39,
+	0x80, 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12,
+/*1300*/0x07, 0xA8, 0xE5, 0x3C, 0x12, 0x07, 0x53, 0xE5,
+	0x3D, 0xF0, 0x22, 0x12, 0x07, 0x9F, 0xE5, 0x38,
+/*1310*/0x12, 0x07, 0x53, 0xE5, 0x39, 0xF0, 0x22, 0x8C,
+	0x63, 0x8D, 0x64, 0x12, 0x08, 0xDA, 0x40, 0x3C,
+/*1320*/0xE5, 0x64, 0x45, 0x63, 0x70, 0x10, 0x12, 0x09,
+	0x04, 0xC3, 0xE5, 0x3E, 0x12, 0x07, 0x69, 0x40,
+/*1330*/0x3B, 0x12, 0x08, 0x95, 0x80, 0x18, 0xE5, 0x3E,
+	0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3E, 0x38,
+/*1340*/0xE5, 0x3E, 0x60, 0x05, 0x85, 0x3F, 0x39, 0x80,
+	0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12, 0x07,
+/*1350*/0xA8, 0xE5, 0x3E, 0x12, 0x07, 0x53, 0xE5, 0x3F,
+	0xF0, 0x22, 0x80, 0x3B, 0xE5, 0x64, 0x45, 0x63,
+/*1360*/0x70, 0x11, 0x12, 0x07, 0x5F, 0x40, 0x05, 0x12,
+	0x08, 0x9E, 0x80, 0x1F, 0x12, 0x07, 0x3E, 0xE5,
+/*1370*/0x41, 0xF0, 0x22, 0xE5, 0x3C, 0xC3, 0x95, 0x38,
+	0x40, 0x1D, 0x85, 0x3C, 0x38, 0xE5, 0x3C, 0x60,
+/*1380*/0x05, 0x85, 0x3D, 0x39, 0x80, 0x03, 0x85, 0x39,
+	0x39, 0x8F, 0x3A, 0x12, 0x07, 0xA8, 0xE5, 0x3C,
+/*1390*/0x12, 0x07, 0x53, 0xE5, 0x3D, 0xF0, 0x22, 0x12,
+	0x07, 0x9F, 0xE5, 0x38, 0x12, 0x07, 0x53, 0xE5,
+/*13A0*/0x39, 0xF0, 0x22, 0xE5, 0x0D, 0xFE, 0xE5, 0x08,
+	0x8E, 0x54, 0x44, 0x05, 0xF5, 0x55, 0x75, 0x15,
+/*13B0*/0x0F, 0xF5, 0x82, 0x12, 0x0E, 0x7A, 0x12, 0x17,
+	0xA3, 0x20, 0x31, 0x05, 0x75, 0x15, 0x03, 0x80,
+/*13C0*/0x03, 0x75, 0x15, 0x0B, 0xE5, 0x0A, 0xC3, 0x94,
+	0x01, 0x50, 0x38, 0x12, 0x14, 0x20, 0x20, 0x31,
+/*13D0*/0x06, 0x05, 0x15, 0x05, 0x15, 0x80, 0x04, 0x15,
+	0x15, 0x15, 0x15, 0xE5, 0x0A, 0xC3, 0x94, 0x01,
+/*13E0*/0x50, 0x21, 0x12, 0x14, 0x20, 0x20, 0x31, 0x04,
+	0x05, 0x15, 0x80, 0x02, 0x15, 0x15, 0xE5, 0x0A,
+/*13F0*/0xC3, 0x94, 0x01, 0x50, 0x0E, 0x12, 0x0E, 0x77,
+	0x12, 0x17, 0xA3, 0x20, 0x31, 0x05, 0x05, 0x15,
+/*1400*/0x12, 0x0E, 0x77, 0xE5, 0x15, 0xB4, 0x08, 0x04,
+	0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x15,
+/*1410*/0xB4, 0x07, 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E,
+	0x00, 0xEE, 0x4F, 0x60, 0x02, 0x05, 0x7F, 0x22,
+/*1420*/0x85, 0x55, 0x82, 0x85, 0x54, 0x83, 0xE5, 0x15,
+	0xF0, 0x12, 0x17, 0xA3, 0x22, 0x12, 0x07, 0x2A,
+/*1430*/0x75, 0x83, 0xAE, 0x74, 0xFF, 0x12, 0x07, 0x29,
+	0xE0, 0x54, 0x1A, 0xF5, 0x34, 0xE0, 0xC4, 0x13,
+/*1440*/0x54, 0x07, 0xF5, 0x35, 0x24, 0xFE, 0x60, 0x24,
+	0x24, 0xFE, 0x60, 0x3C, 0x24, 0x04, 0x70, 0x63,
+/*1450*/0x75, 0x31, 0x2D, 0xE5, 0x08, 0xFD, 0x74, 0xB6,
+	0x12, 0x07, 0x92, 0x74, 0xBC, 0x90, 0x07, 0x22,
+/*1460*/0x12, 0x07, 0x95, 0x74, 0x90, 0x12, 0x07, 0xB3,
+	0x74, 0x92, 0x80, 0x3C, 0x75, 0x31, 0x3A, 0xE5,
+/*1470*/0x08, 0xFD, 0x74, 0xBA, 0x12, 0x07, 0x92, 0x74,
+	0xC0, 0x90, 0x07, 0x22, 0x12, 0x07, 0xB6, 0x74,
+/*1480*/0xC4, 0x12, 0x07, 0xB3, 0x74, 0xC8, 0x80, 0x20,
+	0x75, 0x31, 0x35, 0xE5, 0x08, 0xFD, 0x74, 0xB8,
+/*1490*/0x12, 0x07, 0x92, 0x74, 0xBE, 0xFF, 0xED, 0x44,
+	0x07, 0x90, 0x07, 0x22, 0xCF, 0xF0, 0xA3, 0xEF,
+/*14A0*/0xF0, 0x74, 0xC2, 0x12, 0x07, 0xB3, 0x74, 0xC6,
+	0xFF, 0xED, 0x44, 0x07, 0xA3, 0xCF, 0xF0, 0xA3,
+/*14B0*/0xEF, 0xF0, 0x22, 0x75, 0x34, 0x01, 0x22, 0x8E,
+	0x58, 0x8F, 0x59, 0x8C, 0x5A, 0x8D, 0x5B, 0x8A,
+/*14C0*/0x5C, 0x8B, 0x5D, 0x75, 0x5E, 0x01, 0xE4, 0xF5,
+	0x5F, 0x12, 0x1E, 0xA5, 0x85, 0x59, 0x5E, 0xD3,
+/*14D0*/0xE5, 0x5E, 0x95, 0x5B, 0xE5, 0x5A, 0x12, 0x07,
+	0x6B, 0x50, 0x57, 0xE5, 0x5D, 0x45, 0x5C, 0x70,
+/*14E0*/0x30, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x92, 0xE5,
+	0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC6, 0xE5,
+/*14F0*/0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC8, 0xE5,
+	0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0x90, 0xE5,
+/*1500*/0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE5,
+	0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0x80,
+/*1510*/0x03, 0x12, 0x07, 0x32, 0xE5, 0x5E, 0xF0, 0xAF,
+	0x5F, 0x7E, 0x00, 0xAD, 0x5D, 0xAC, 0x5C, 0x12,
+/*1520*/0x04, 0x44, 0xAF, 0x5E, 0x7E, 0x00, 0xAD, 0x5D,
+	0xAC, 0x5C, 0x12, 0x0B, 0xD1, 0x05, 0x5E, 0x02,
+/*1530*/0x14, 0xCF, 0xAB, 0x5D, 0xAA, 0x5C, 0xAD, 0x5B,
+	0xAC, 0x5A, 0xAF, 0x59, 0xAE, 0x58, 0x02, 0x1B,
+/*1540*/0xFB, 0x8C, 0x5C, 0x8D, 0x5D, 0x8A, 0x5E, 0x8B,
+	0x5F, 0x75, 0x60, 0x01, 0xE4, 0xF5, 0x61, 0xF5,
+/*1550*/0x62, 0xF5, 0x63, 0x12, 0x1E, 0xA5, 0x8F, 0x60,
+	0xD3, 0xE5, 0x60, 0x95, 0x5D, 0xE5, 0x5C, 0x12,
+/*1560*/0x07, 0x6B, 0x50, 0x61, 0xE5, 0x5F, 0x45, 0x5E,
+	0x70, 0x27, 0x12, 0x07, 0x2A, 0x75, 0x83, 0xB6,
+/*1570*/0xE5, 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0xB8,
+	0xE5, 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0xBA,
+/*1580*/0xE5, 0x60, 0xF0, 0xAF, 0x61, 0x7E, 0x00, 0xE5,
+	0x62, 0x12, 0x08, 0x7A, 0x12, 0x0A, 0xFF, 0x80,
+/*1590*/0x19, 0x90, 0x07, 0x24, 0x12, 0x07, 0x35, 0xE5,
+	0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0x8E, 0xE4,
+/*15A0*/0x12, 0x07, 0x29, 0x74, 0x01, 0x12, 0x07, 0x29,
+	0xE4, 0xF0, 0xAF, 0x63, 0x7E, 0x00, 0xAD, 0x5F,
+/*15B0*/0xAC, 0x5E, 0x12, 0x04, 0x44, 0xAF, 0x60, 0x7E,
+	0x00, 0xAD, 0x5F, 0xAC, 0x5E, 0x12, 0x12, 0x8B,
+/*15C0*/0x05, 0x60, 0x02, 0x15, 0x58, 0x22, 0x90, 0x11,
+	0x4D, 0xE4, 0x93, 0x90, 0x07, 0x2E, 0xF0, 0x12,
+/*15D0*/0x08, 0x1F, 0x75, 0x83, 0xAE, 0xE0, 0x54, 0x1A,
+	0xF5, 0x34, 0x70, 0x67, 0xEF, 0x44, 0x07, 0xF5,
+/*15E0*/0x82, 0x75, 0x83, 0xCE, 0xE0, 0xFF, 0x13, 0x13,
+	0x13, 0x54, 0x07, 0xF5, 0x36, 0x54, 0x0F, 0xD3,
+/*15F0*/0x94, 0x00, 0x40, 0x06, 0x12, 0x14, 0x2D, 0x12,
+	0x1B, 0xA9, 0xE5, 0x36, 0x54, 0x0F, 0x24, 0xFE,
+/*1600*/0x60, 0x0C, 0x14, 0x60, 0x0C, 0x14, 0x60, 0x19,
+	0x24, 0x03, 0x70, 0x37, 0x80, 0x10, 0x02, 0x1E,
+/*1610*/0x91, 0x12, 0x1E, 0x91, 0x12, 0x07, 0x2A, 0x75,
+	0x83, 0xCE, 0xE0, 0x54, 0xEF, 0xF0, 0x02, 0x1D,
+/*1620*/0xAE, 0x12, 0x10, 0x14, 0xE4, 0xF5, 0x55, 0x12,
+	0x1D, 0x85, 0x05, 0x55, 0xE5, 0x55, 0xC3, 0x94,
+/*1630*/0x05, 0x40, 0xF4, 0x12, 0x07, 0x2A, 0x75, 0x83,
+	0xCE, 0xE0, 0x54, 0xC7, 0x12, 0x07, 0x29, 0xE0,
+/*1640*/0x44, 0x08, 0xF0, 0x22, 0xE4, 0xF5, 0x58, 0xF5,
+	0x59, 0xAF, 0x08, 0xEF, 0x44, 0x07, 0xF5, 0x82,
+/*1650*/0x75, 0x83, 0xD0, 0xE0, 0xFD, 0xC4, 0x54, 0x0F,
+	0xF5, 0x5A, 0xEF, 0x44, 0x07, 0xF5, 0x82, 0x75,
+/*1660*/0x83, 0x80, 0x74, 0x01, 0xF0, 0x12, 0x08, 0x21,
+	0x75, 0x83, 0x82, 0xE5, 0x45, 0xF0, 0xEF, 0x44,
+/*1670*/0x07, 0xF5, 0x82, 0x75, 0x83, 0x8A, 0x74, 0xFF,
+	0xF0, 0x12, 0x1A, 0x4D, 0x12, 0x07, 0x2A, 0x75,
+/*1680*/0x83, 0xBC, 0xE0, 0x54, 0xEF, 0x12, 0x07, 0x29,
+	0x75, 0x83, 0xBE, 0xE0, 0x54, 0xEF, 0x12, 0x07,
+/*1690*/0x29, 0x75, 0x83, 0xC0, 0xE0, 0x54, 0xEF, 0x12,
+	0x07, 0x29, 0x75, 0x83, 0xBC, 0xE0, 0x44, 0x10,
+/*16A0*/0x12, 0x07, 0x29, 0x75, 0x83, 0xBE, 0xE0, 0x44,
+	0x10, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC0, 0xE0,
+/*16B0*/0x44, 0x10, 0xF0, 0xAF, 0x58, 0xE5, 0x59, 0x12,
+	0x08, 0x78, 0x02, 0x0A, 0xFF, 0xE4, 0xF5, 0x58,
+/*16C0*/0x7D, 0x01, 0xF5, 0x59, 0xAF, 0x35, 0xFE, 0xFC,
+	0x12, 0x09, 0x15, 0x12, 0x07, 0x2A, 0x75, 0x83,
+/*16D0*/0xB6, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
+	0xB8, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
+/*16E0*/0xBA, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
+	0xBC, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
+/*16F0*/0xBE, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
+	0xC0, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
+/*1700*/0x90, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2,
+	0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0xE4,
+/*1710*/0x12, 0x07, 0x29, 0x75, 0x83, 0x92, 0xE4, 0x12,
+	0x07, 0x29, 0x75, 0x83, 0xC6, 0xE4, 0x12, 0x07,
+/*1720*/0x29, 0x75, 0x83, 0xC8, 0xE4, 0xF0, 0xAF, 0x58,
+	0xFE, 0xE5, 0x59, 0x12, 0x08, 0x7A, 0x02, 0x0A,
+/*1730*/0xFF, 0xE5, 0xE2, 0x30, 0xE4, 0x6C, 0xE5, 0xE7,
+	0x54, 0xC0, 0x64, 0x40, 0x70, 0x64, 0xE5, 0x09,
+/*1740*/0xC4, 0x54, 0x30, 0xFE, 0xE5, 0x08, 0x25, 0xE0,
+	0x25, 0xE0, 0x54, 0xC0, 0x4E, 0xFE, 0xEF, 0x54,
+/*1750*/0x3F, 0x4E, 0xFD, 0xE5, 0x2B, 0xAE, 0x2A, 0x78,
+	0x02, 0xC3, 0x33, 0xCE, 0x33, 0xCE, 0xD8, 0xF9,
+/*1760*/0xF5, 0x82, 0x8E, 0x83, 0xED, 0xF0, 0xE5, 0x2B,
+	0xAE, 0x2A, 0x78, 0x02, 0xC3, 0x33, 0xCE, 0x33,
+/*1770*/0xCE, 0xD8, 0xF9, 0xFF, 0xF5, 0x82, 0x8E, 0x83,
+	0xA3, 0xE5, 0xFE, 0xF0, 0x8F, 0x82, 0x8E, 0x83,
+/*1780*/0xA3, 0xA3, 0xE5, 0xFD, 0xF0, 0x8F, 0x82, 0x8E,
+	0x83, 0xA3, 0xA3, 0xA3, 0xE5, 0xFC, 0xF0, 0xC3,
+/*1790*/0xE5, 0x2B, 0x94, 0xFA, 0xE5, 0x2A, 0x94, 0x00,
+	0x50, 0x08, 0x05, 0x2B, 0xE5, 0x2B, 0x70, 0x02,
+/*17A0*/0x05, 0x2A, 0x22, 0xE4, 0xFF, 0xE4, 0xF5, 0x58,
+	0xF5, 0x56, 0xF5, 0x57, 0x74, 0x82, 0xFC, 0x12,
+/*17B0*/0x0E, 0x04, 0x8C, 0x83, 0xE0, 0xF5, 0x10, 0x54,
+	0x7F, 0xF0, 0xE5, 0x10, 0x44, 0x80, 0x12, 0x0E,
+/*17C0*/0x98, 0xED, 0xF0, 0x7E, 0x0A, 0x12, 0x0E, 0x04,
+	0x75, 0x83, 0xA0, 0xE0, 0x20, 0xE0, 0x26, 0xDE,
+/*17D0*/0xF4, 0x05, 0x57, 0xE5, 0x57, 0x70, 0x02, 0x05,
+	0x56, 0xE5, 0x14, 0x24, 0x01, 0xFD, 0xE4, 0x33,
+/*17E0*/0xFC, 0xD3, 0xE5, 0x57, 0x9D, 0xE5, 0x56, 0x9C,
+	0x40, 0xD9, 0xE5, 0x0A, 0x94, 0x20, 0x50, 0x02,
+/*17F0*/0x05, 0x0A, 0x43, 0xE1, 0x08, 0xC2, 0x31, 0x12,
+	0x0E, 0x04, 0x75, 0x83, 0xA6, 0xE0, 0x55, 0x12,
+/*1800*/0x65, 0x12, 0x70, 0x03, 0xD2, 0x31, 0x22, 0xC2,
+	0x31, 0x22, 0x90, 0x07, 0x26, 0xE0, 0xFA, 0xA3,
+/*1810*/0xE0, 0xF5, 0x82, 0x8A, 0x83, 0xE0, 0xF5, 0x41,
+	0xE5, 0x39, 0xC3, 0x95, 0x41, 0x40, 0x26, 0xE5,
+/*1820*/0x39, 0x95, 0x41, 0xC3, 0x9F, 0xEE, 0x12, 0x07,
+	0x6B, 0x40, 0x04, 0x7C, 0x01, 0x80, 0x02, 0x7C,
+/*1830*/0x00, 0xE5, 0x41, 0x64, 0x3F, 0x60, 0x04, 0x7B,
+	0x01, 0x80, 0x02, 0x7B, 0x00, 0xEC, 0x5B, 0x60,
+/*1840*/0x29, 0x05, 0x41, 0x80, 0x28, 0xC3, 0xE5, 0x41,
+	0x95, 0x39, 0xC3, 0x9F, 0xEE, 0x12, 0x07, 0x6B,
+/*1850*/0x40, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00,
+	0xE5, 0x41, 0x60, 0x04, 0x7E, 0x01, 0x80, 0x02,
+/*1860*/0x7E, 0x00, 0xEF, 0x5E, 0x60, 0x04, 0x15, 0x41,
+	0x80, 0x03, 0x85, 0x39, 0x41, 0x85, 0x3A, 0x40,
+/*1870*/0x22, 0xE5, 0xE2, 0x30, 0xE4, 0x60, 0xE5, 0xE1,
+	0x30, 0xE2, 0x5B, 0xE5, 0x09, 0x70, 0x04, 0x7F,
+/*1880*/0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x08, 0x70,
+	0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE,
+/*1890*/0x5F, 0x60, 0x43, 0x53, 0xF9, 0xF8, 0xE5, 0xE2,
+	0x30, 0xE4, 0x3B, 0xE5, 0xE1, 0x30, 0xE2, 0x2E,
+/*18A0*/0x43, 0xFA, 0x02, 0x53, 0xFA, 0xFB, 0xE4, 0xF5,
+	0x10, 0x90, 0x94, 0x70, 0xE5, 0x10, 0xF0, 0xE5,
+/*18B0*/0xE1, 0x30, 0xE2, 0xE7, 0x90, 0x94, 0x70, 0xE0,
+	0x65, 0x10, 0x60, 0x03, 0x43, 0xFA, 0x04, 0x05,
+/*18C0*/0x10, 0x90, 0x94, 0x70, 0xE5, 0x10, 0xF0, 0x70,
+	0xE6, 0x12, 0x00, 0x06, 0x80, 0xE1, 0x53, 0xFA,
+/*18D0*/0xFD, 0x53, 0xFA, 0xFB, 0x80, 0xC0, 0x22, 0x8F,
+	0x54, 0x12, 0x00, 0x06, 0xE5, 0xE1, 0x30, 0xE0,
+/*18E0*/0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5,
+	0x7E, 0xD3, 0x94, 0x05, 0x40, 0x04, 0x7E, 0x01,
+/*18F0*/0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x60, 0x3D,
+	0x85, 0x54, 0x11, 0xE5, 0xE2, 0x20, 0xE1, 0x32,
+/*1900*/0x74, 0xCE, 0x12, 0x1A, 0x05, 0x30, 0xE7, 0x04,
+	0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00, 0x8F, 0x82,
+/*1910*/0x8E, 0x83, 0xE0, 0x30, 0xE6, 0x04, 0x7F, 0x01,
+	0x80, 0x02, 0x7F, 0x00, 0xEF, 0x5D, 0x70, 0x15,
+/*1920*/0x12, 0x15, 0xC6, 0x74, 0xCE, 0x12, 0x1A, 0x05,
+	0x30, 0xE6, 0x07, 0xE0, 0x44, 0x80, 0xF0, 0x43,
+/*1930*/0xF9, 0x80, 0x12, 0x18, 0x71, 0x22, 0x12, 0x0E,
+	0x44, 0xE5, 0x16, 0x25, 0xE0, 0x25, 0xE0, 0x24,
+/*1940*/0xB0, 0xF5, 0x82, 0xE4, 0x34, 0x1A, 0xF5, 0x83,
+	0xE4, 0x93, 0xF5, 0x0F, 0xE5, 0x16, 0x25, 0xE0,
+/*1950*/0x25, 0xE0, 0x24, 0xB1, 0xF5, 0x82, 0xE4, 0x34,
+	0x1A, 0xF5, 0x83, 0xE4, 0x93, 0xF5, 0x0E, 0x12,
+/*1960*/0x0E, 0x65, 0xF5, 0x10, 0xE5, 0x0F, 0x54, 0xF0,
+	0x12, 0x0E, 0x17, 0x75, 0x83, 0x8C, 0xEF, 0xF0,
+/*1970*/0xE5, 0x0F, 0x30, 0xE0, 0x0C, 0x12, 0x0E, 0x04,
+	0x75, 0x83, 0x86, 0xE0, 0x44, 0x40, 0xF0, 0x80,
+/*1980*/0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0x86, 0xE0,
+	0x54, 0xBF, 0xF0, 0x12, 0x0E, 0x91, 0x75, 0x83,
+/*1990*/0x82, 0xE5, 0x0E, 0xF0, 0x22, 0x7F, 0x05, 0x12,
+	0x17, 0x31, 0x12, 0x0E, 0x04, 0x12, 0x0E, 0x33,
+/*19A0*/0x74, 0x02, 0xF0, 0x74, 0x8E, 0xFE, 0x12, 0x0E,
+	0x04, 0x12, 0x0E, 0x0B, 0xEF, 0xF0, 0x75, 0x15,
+/*19B0*/0x70, 0x12, 0x0F, 0xF7, 0x20, 0x34, 0x05, 0x75,
+	0x15, 0x10, 0x80, 0x03, 0x75, 0x15, 0x50, 0x12,
+/*19C0*/0x0F, 0xF7, 0x20, 0x34, 0x04, 0x74, 0x10, 0x80,
+	0x02, 0x74, 0xF0, 0x25, 0x15, 0xF5, 0x15, 0x12,
+/*19D0*/0x0E, 0x21, 0xEF, 0xF0, 0x12, 0x10, 0x91, 0x20,
+	0x34, 0x17, 0xE5, 0x15, 0x64, 0x30, 0x60, 0x0C,
+/*19E0*/0x74, 0x10, 0x25, 0x15, 0xF5, 0x15, 0xB4, 0x80,
+	0x03, 0xE4, 0xF5, 0x15, 0x12, 0x0E, 0x21, 0xEF,
+/*19F0*/0xF0, 0x22, 0xF0, 0xE5, 0x0B, 0x25, 0xE0, 0x25,
+	0xE0, 0x24, 0x82, 0xF5, 0x82, 0xE4, 0x34, 0x07,
+/*1A00*/0xF5, 0x83, 0x22, 0x74, 0x88, 0xFE, 0xE5, 0x08,
+	0x44, 0x07, 0xFF, 0xF5, 0x82, 0x8E, 0x83, 0xE0,
+/*1A10*/0x22, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82,
+	0x22, 0xF0, 0xE0, 0x54, 0xC0, 0x8F, 0x82, 0x8E,
+/*1A20*/0x83, 0xF0, 0x22, 0xEF, 0x44, 0x07, 0xF5, 0x82,
+	0x75, 0x83, 0x86, 0xE0, 0x54, 0x10, 0xD3, 0x94,
+/*1A30*/0x00, 0x22, 0xF0, 0x90, 0x07, 0x15, 0xE0, 0x04,
+	0xF0, 0x22, 0x44, 0x06, 0xF5, 0x82, 0x75, 0x83,
+/*1A40*/0x9E, 0xE0, 0x22, 0xFE, 0xEF, 0x44, 0x07, 0xF5,
+	0x82, 0x8E, 0x83, 0xE0, 0x22, 0xE4, 0x90, 0x07,
+/*1A50*/0x2A, 0xF0, 0xA3, 0xF0, 0x12, 0x07, 0x2A, 0x75,
+	0x83, 0x82, 0xE0, 0x54, 0x7F, 0x12, 0x07, 0x29,
+/*1A60*/0xE0, 0x44, 0x80, 0xF0, 0x12, 0x10, 0xFC, 0x12,
+	0x08, 0x1F, 0x75, 0x83, 0xA0, 0xE0, 0x20, 0xE0,
+/*1A70*/0x1A, 0x90, 0x07, 0x2B, 0xE0, 0x04, 0xF0, 0x70,
+	0x06, 0x90, 0x07, 0x2A, 0xE0, 0x04, 0xF0, 0x90,
+/*1A80*/0x07, 0x2A, 0xE0, 0xB4, 0x10, 0xE1, 0xA3, 0xE0,
+	0xB4, 0x00, 0xDC, 0xEE, 0x44, 0xA6, 0xFC, 0xEF,
+/*1A90*/0x44, 0x07, 0xF5, 0x82, 0x8C, 0x83, 0xE0, 0xF5,
+	0x32, 0xEE, 0x44, 0xA8, 0xFE, 0xEF, 0x44, 0x07,
+/*1AA0*/0xF5, 0x82, 0x8E, 0x83, 0xE0, 0xF5, 0x33, 0x22,
+	0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x90,
+/*1AB0*/0x00, 0x20, 0x0F, 0x92, 0x00, 0x21, 0x0F, 0x94,
+	0x00, 0x22, 0x0F, 0x96, 0x00, 0x23, 0x0F, 0x98,
+/*1AC0*/0x00, 0x24, 0x0F, 0x9A, 0x00, 0x25, 0x0F, 0x9C,
+	0x00, 0x26, 0x0F, 0x9E, 0x00, 0x27, 0x0F, 0xA0,
+/*1AD0*/0x01, 0x20, 0x01, 0xA2, 0x01, 0x21, 0x01, 0xA4,
+	0x01, 0x22, 0x01, 0xA6, 0x01, 0x23, 0x01, 0xA8,
+/*1AE0*/0x01, 0x24, 0x01, 0xAA, 0x01, 0x25, 0x01, 0xAC,
+	0x01, 0x26, 0x01, 0xAE, 0x01, 0x27, 0x01, 0xB0,
+/*1AF0*/0x01, 0x28, 0x01, 0xB4, 0x00, 0x28, 0x0F, 0xB6,
+	0x40, 0x28, 0x0F, 0xB8, 0x61, 0x28, 0x01, 0xCB,
+/*1B00*/0xEF, 0xCB, 0xCA, 0xEE, 0xCA, 0x7F, 0x01, 0xE4,
+	0xFD, 0xEB, 0x4A, 0x70, 0x24, 0xE5, 0x08, 0xF5,
+/*1B10*/0x82, 0x74, 0xB6, 0x12, 0x08, 0x29, 0xE5, 0x08,
+	0xF5, 0x82, 0x74, 0xB8, 0x12, 0x08, 0x29, 0xE5,
+/*1B20*/0x08, 0xF5, 0x82, 0x74, 0xBA, 0x12, 0x08, 0x29,
+	0x7E, 0x00, 0x7C, 0x00, 0x12, 0x0A, 0xFF, 0x80,
+/*1B30*/0x12, 0x90, 0x07, 0x26, 0x12, 0x07, 0x35, 0xE5,
+	0x41, 0xF0, 0x90, 0x07, 0x24, 0x12, 0x07, 0x35,
+/*1B40*/0xE5, 0x40, 0xF0, 0x12, 0x07, 0x2A, 0x75, 0x83,
+	0x8E, 0xE4, 0x12, 0x07, 0x29, 0x74, 0x01, 0x12,
+/*1B50*/0x07, 0x29, 0xE4, 0xF0, 0x22, 0xE4, 0xF5, 0x26,
+	0xF5, 0x27, 0x53, 0xE1, 0xFE, 0xF5, 0x2A, 0x75,
+/*1B60*/0x2B, 0x01, 0xF5, 0x08, 0x7F, 0x01, 0x12, 0x17,
+	0x31, 0x30, 0x30, 0x1C, 0x90, 0x1A, 0xA9, 0xE4,
+/*1B70*/0x93, 0xF5, 0x10, 0x90, 0x1F, 0xF9, 0xE4, 0x93,
+	0xF5, 0x10, 0x90, 0x00, 0x41, 0xE4, 0x93, 0xF5,
+/*1B80*/0x10, 0x90, 0x1E, 0xCA, 0xE4, 0x93, 0xF5, 0x10,
+	0x7F, 0x02, 0x12, 0x17, 0x31, 0x12, 0x0F, 0x54,
+/*1B90*/0x7F, 0x03, 0x12, 0x17, 0x31, 0x12, 0x00, 0x06,
+	0xE5, 0xE2, 0x30, 0xE7, 0x09, 0x12, 0x10, 0x00,
+/*1BA0*/0x30, 0x30, 0x03, 0x12, 0x11, 0x00, 0x02, 0x00,
+	0x47, 0x12, 0x08, 0x1F, 0x75, 0x83, 0xD0, 0xE0,
+/*1BB0*/0xC4, 0x54, 0x0F, 0xFD, 0x75, 0x43, 0x01, 0x75,
+	0x44, 0xFF, 0x12, 0x08, 0xAA, 0x74, 0x04, 0xF0,
+/*1BC0*/0x75, 0x3B, 0x01, 0xED, 0x14, 0x60, 0x0C, 0x14,
+	0x60, 0x0B, 0x14, 0x60, 0x0F, 0x24, 0x03, 0x70,
+/*1BD0*/0x0B, 0x80, 0x09, 0x80, 0x00, 0x12, 0x08, 0xA7,
+	0x04, 0xF0, 0x80, 0x06, 0x12, 0x08, 0xA7, 0x74,
+/*1BE0*/0x04, 0xF0, 0xEE, 0x44, 0x82, 0xFE, 0xEF, 0x44,
+	0x07, 0xF5, 0x82, 0x8E, 0x83, 0xE5, 0x45, 0x12,
+/*1BF0*/0x08, 0xBE, 0x75, 0x83, 0x82, 0xE5, 0x31, 0xF0,
+	0x02, 0x11, 0x4C, 0x8E, 0x60, 0x8F, 0x61, 0x12,
+/*1C00*/0x1E, 0xA5, 0xE4, 0xFF, 0xCE, 0xED, 0xCE, 0xEE,
+	0xD3, 0x95, 0x61, 0xE5, 0x60, 0x12, 0x07, 0x6B,
+/*1C10*/0x40, 0x39, 0x74, 0x20, 0x2E, 0xF5, 0x82, 0xE4,
+	0x34, 0x03, 0xF5, 0x83, 0xE0, 0x70, 0x03, 0xFF,
+/*1C20*/0x80, 0x26, 0x12, 0x08, 0xE2, 0xFD, 0xC3, 0x9F,
+	0x40, 0x1E, 0xCF, 0xED, 0xCF, 0xEB, 0x4A, 0x70,
+/*1C30*/0x0B, 0x8D, 0x42, 0x12, 0x08, 0xEE, 0xF5, 0x41,
+	0x8E, 0x40, 0x80, 0x0C, 0x12, 0x08, 0xE2, 0xF5,
+/*1C40*/0x38, 0x12, 0x08, 0xEE, 0xF5, 0x39, 0x8E, 0x3A,
+	0x1E, 0x80, 0xBC, 0x22, 0x75, 0x58, 0x01, 0xE5,
+/*1C50*/0x35, 0x70, 0x0C, 0x12, 0x07, 0xCC, 0xE0, 0xF5,
+	0x4A, 0x12, 0x07, 0xD8, 0xE0, 0xF5, 0x4C, 0xE5,
+/*1C60*/0x35, 0xB4, 0x04, 0x0C, 0x12, 0x07, 0xE4, 0xE0,
+	0xF5, 0x4A, 0x12, 0x07, 0xF0, 0xE0, 0xF5, 0x4C,
+/*1C70*/0xE5, 0x35, 0xB4, 0x01, 0x04, 0x7F, 0x01, 0x80,
+	0x02, 0x7F, 0x00, 0xE5, 0x35, 0xB4, 0x02, 0x04,
+/*1C80*/0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F,
+	0x60, 0x0C, 0x12, 0x07, 0xFC, 0xE0, 0xF5, 0x4A,
+/*1C90*/0x12, 0x08, 0x08, 0xE0, 0xF5, 0x4C, 0x85, 0x41,
+	0x49, 0x85, 0x40, 0x4B, 0x22, 0x75, 0x5B, 0x01,
+/*1CA0*/0x90, 0x07, 0x24, 0x12, 0x07, 0x35, 0xE0, 0x54,
+	0x1F, 0xFF, 0xD3, 0x94, 0x02, 0x50, 0x04, 0x8F,
+/*1CB0*/0x58, 0x80, 0x05, 0xEF, 0x24, 0xFE, 0xF5, 0x58,
+	0xEF, 0xC3, 0x94, 0x18, 0x40, 0x05, 0x75, 0x59,
+/*1CC0*/0x18, 0x80, 0x04, 0xEF, 0x04, 0xF5, 0x59, 0x85,
+	0x43, 0x5A, 0xAF, 0x58, 0x7E, 0x00, 0xAD, 0x59,
+/*1CD0*/0x7C, 0x00, 0xAB, 0x5B, 0x7A, 0x00, 0x12, 0x15,
+	0x41, 0xAF, 0x5A, 0x7E, 0x00, 0x12, 0x18, 0x0A,
+/*1CE0*/0xAF, 0x5B, 0x7E, 0x00, 0x02, 0x1A, 0xFF, 0xE5,
+	0xE2, 0x30, 0xE7, 0x0E, 0x12, 0x10, 0x03, 0xC2,
+/*1CF0*/0x30, 0x30, 0x30, 0x03, 0x12, 0x10, 0xFF, 0x20,
+	0x33, 0x28, 0xE5, 0xE7, 0x30, 0xE7, 0x05, 0x12,
+/*1D00*/0x0E, 0xA2, 0x80, 0x0D, 0xE5, 0xFE, 0xC3, 0x94,
+	0x20, 0x50, 0x06, 0x12, 0x0E, 0xA2, 0x43, 0xF9,
+/*1D10*/0x08, 0xE5, 0xF2, 0x30, 0xE7, 0x03, 0x53, 0xF9,
+	0x7F, 0xE5, 0xF1, 0x54, 0x70, 0xD3, 0x94, 0x00,
+/*1D20*/0x50, 0xD8, 0x22, 0x12, 0x0E, 0x04, 0x75, 0x83,
+	0x80, 0xE4, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0x12,
+/*1D30*/0x0D, 0xFD, 0x75, 0x83, 0x84, 0x12, 0x0E, 0x02,
+	0x75, 0x83, 0x86, 0x12, 0x0E, 0x02, 0x75, 0x83,
+/*1D40*/0x8C, 0xE0, 0x54, 0xF3, 0x12, 0x0E, 0x03, 0x75,
+	0x83, 0x8E, 0x12, 0x0E, 0x02, 0x75, 0x83, 0x94,
+/*1D50*/0xE0, 0x54, 0xFB, 0xF0, 0x22, 0x12, 0x07, 0x2A,
+	0x75, 0x83, 0x8E, 0xE4, 0x12, 0x07, 0x29, 0x74,
+/*1D60*/0x01, 0x12, 0x07, 0x29, 0xE4, 0x12, 0x08, 0xBE,
+	0x75, 0x83, 0x8C, 0xE0, 0x44, 0x20, 0x12, 0x08,
+/*1D70*/0xBE, 0xE0, 0x54, 0xDF, 0xF0, 0x74, 0x84, 0x85,
+	0x08, 0x82, 0xF5, 0x83, 0xE0, 0x54, 0x7F, 0xF0,
+/*1D80*/0xE0, 0x44, 0x80, 0xF0, 0x22, 0x75, 0x56, 0x01,
+	0xE4, 0xFD, 0xF5, 0x57, 0xAF, 0x35, 0xFE, 0xFC,
+/*1D90*/0x12, 0x09, 0x15, 0x12, 0x1C, 0x9D, 0x12, 0x1E,
+	0x7A, 0x12, 0x1C, 0x4C, 0xAF, 0x57, 0x7E, 0x00,
+/*1DA0*/0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44, 0xAF,
+	0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0x75, 0x56,
+/*1DB0*/0x01, 0xE4, 0xFD, 0xF5, 0x57, 0xAF, 0x35, 0xFE,
+	0xFC, 0x12, 0x09, 0x15, 0x12, 0x1C, 0x9D, 0x12,
+/*1DC0*/0x1E, 0x7A, 0x12, 0x1C, 0x4C, 0xAF, 0x57, 0x7E,
+	0x00, 0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44,
+/*1DD0*/0xAF, 0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0xE4,
+	0xF5, 0x16, 0x12, 0x0E, 0x44, 0xFE, 0xE5, 0x08,
+/*1DE0*/0x44, 0x05, 0xFF, 0x12, 0x0E, 0x65, 0x8F, 0x82,
+	0x8E, 0x83, 0xF0, 0x05, 0x16, 0xE5, 0x16, 0xC3,
+/*1DF0*/0x94, 0x14, 0x40, 0xE6, 0xE5, 0x08, 0x12, 0x0E,
+	0x2B, 0xE4, 0xF0, 0x22, 0xE4, 0xF5, 0x58, 0xF5,
+/*1E00*/0x59, 0xF5, 0x5A, 0xFF, 0xFE, 0xAD, 0x58, 0xFC,
+	0x12, 0x09, 0x15, 0x7F, 0x04, 0x7E, 0x00, 0xAD,
+/*1E10*/0x58, 0x7C, 0x00, 0x12, 0x09, 0x15, 0x7F, 0x02,
+	0x7E, 0x00, 0xAD, 0x58, 0x7C, 0x00, 0x02, 0x09,
+/*1E20*/0x15, 0xE5, 0x3C, 0x25, 0x3E, 0xFC, 0xE5, 0x42,
+	0x24, 0x00, 0xFB, 0xE4, 0x33, 0xFA, 0xEC, 0xC3,
+/*1E30*/0x9B, 0xEA, 0x12, 0x07, 0x6B, 0x40, 0x0B, 0x8C,
+	0x42, 0xE5, 0x3D, 0x25, 0x3F, 0xF5, 0x41, 0x8F,
+/*1E40*/0x40, 0x22, 0x12, 0x09, 0x0B, 0x22, 0x74, 0x84,
+	0xF5, 0x18, 0x85, 0x08, 0x19, 0x85, 0x19, 0x82,
+/*1E50*/0x85, 0x18, 0x83, 0xE0, 0x54, 0x7F, 0xF0, 0xE0,
+	0x44, 0x80, 0xF0, 0xE0, 0x44, 0x80, 0xF0, 0x22,
+/*1E60*/0xEF, 0x4E, 0x70, 0x0B, 0x12, 0x07, 0x2A, 0x75,
+	0x83, 0xD2, 0xE0, 0x54, 0xDF, 0xF0, 0x22, 0x12,
+/*1E70*/0x07, 0x2A, 0x75, 0x83, 0xD2, 0xE0, 0x44, 0x20,
+	0xF0, 0x22, 0x75, 0x58, 0x01, 0x90, 0x07, 0x26,
+/*1E80*/0x12, 0x07, 0x35, 0xE0, 0x54, 0x3F, 0xF5, 0x41,
+	0x12, 0x07, 0x32, 0xE0, 0x54, 0x3F, 0xF5, 0x40,
+/*1E90*/0x22, 0x75, 0x56, 0x02, 0xE4, 0xF5, 0x57, 0x12,
+	0x1D, 0xFC, 0xAF, 0x57, 0x7E, 0x00, 0xAD, 0x56,
+/*1EA0*/0x7C, 0x00, 0x02, 0x04, 0x44, 0xE4, 0xF5, 0x42,
+	0xF5, 0x41, 0xF5, 0x40, 0xF5, 0x38, 0xF5, 0x39,
+/*1EB0*/0xF5, 0x3A, 0x22, 0xEF, 0x54, 0x07, 0xFF, 0xE5,
+	0xF9, 0x54, 0xF8, 0x4F, 0xF5, 0xF9, 0x22, 0x7F,
+/*1EC0*/0x01, 0xE4, 0xFE, 0x0F, 0x0E, 0xBE, 0xFF, 0xFB,
+	0x22, 0x01, 0x20, 0x00, 0x01, 0x04, 0x20, 0x00,
+/*1ED0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1EE0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1EF0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F00*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F10*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F20*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F30*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F40*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F50*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F60*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F70*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F80*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1F90*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1FA0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1FB0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1FC0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1FD0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1FE0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/*1FF0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
+};
+
+int ipath_sd7220_ib_load(struct ipath_devdata *dd)
+{
+	return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
+		sizeof(ipath_sd7220_ib_img), 0);
+}
+
+int ipath_sd7220_ib_vfy(struct ipath_devdata *dd)
+{
+	return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
+		sizeof(ipath_sd7220_ib_img), 0);
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
new file mode 100644
index 0000000..1974df7
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+
+#include "ipath_kernel.h"
+#include "ipath_verbs.h"
+#include "ipath_common.h"
+
+#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
+
+static void vl15_watchdog_enq(struct ipath_devdata *dd)
+{
+	/* ipath_sdma_lock must already be held */
+	if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
+		unsigned long interval = (HZ + 19) / 20;
+		dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
+		add_timer(&dd->ipath_sdma_vl15_timer);
+	}
+}
+
+static void vl15_watchdog_deq(struct ipath_devdata *dd)
+{
+	/* ipath_sdma_lock must already be held */
+	if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
+		unsigned long interval = (HZ + 19) / 20;
+		mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
+	} else {
+		del_timer(&dd->ipath_sdma_vl15_timer);
+	}
+}
+
+static void vl15_watchdog_timeout(unsigned long opaque)
+{
+	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
+
+	if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
+		ipath_dbg("vl15 watchdog timeout - clearing\n");
+		ipath_cancel_sends(dd, 1);
+		ipath_hol_down(dd);
+	} else {
+		ipath_dbg("vl15 watchdog timeout - "
+			  "condition already cleared\n");
+	}
+}
+
+static void unmap_desc(struct ipath_devdata *dd, unsigned head)
+{
+	__le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
+	u64 desc[2];
+	dma_addr_t addr;
+	size_t len;
+
+	desc[0] = le64_to_cpu(descqp[0]);
+	desc[1] = le64_to_cpu(descqp[1]);
+
+	addr = (desc[1] << 32) | (desc[0] >> 32);
+	len = (desc[0] >> 14) & (0x7ffULL << 2);
+	dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
+}
+
+/*
+ * ipath_sdma_lock should be locked before calling this.
+ */
+int ipath_sdma_make_progress(struct ipath_devdata *dd)
+{
+	struct list_head *lp = NULL;
+	struct ipath_sdma_txreq *txp = NULL;
+	u16 dmahead;
+	u16 start_idx = 0;
+	int progress = 0;
+
+	if (!list_empty(&dd->ipath_sdma_activelist)) {
+		lp = dd->ipath_sdma_activelist.next;
+		txp = list_entry(lp, struct ipath_sdma_txreq, list);
+		start_idx = txp->start_idx;
+	}
+
+	/*
+	 * Read the SDMA head register in order to know that the
+	 * interrupt clear has been written to the chip.
+	 * Otherwise, we may not get an interrupt for the last
+	 * descriptor in the queue.
+	 */
+	dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
+	/* sanity check return value for error handling (chip reset, etc.) */
+	if (dmahead >= dd->ipath_sdma_descq_cnt)
+		goto done;
+
+	while (dd->ipath_sdma_descq_head != dmahead) {
+		if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
+		    dd->ipath_sdma_descq_head == start_idx) {
+			unmap_desc(dd, dd->ipath_sdma_descq_head);
+			start_idx++;
+			if (start_idx == dd->ipath_sdma_descq_cnt)
+				start_idx = 0;
+		}
+
+		/* increment free count and head */
+		dd->ipath_sdma_descq_removed++;
+		if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
+			dd->ipath_sdma_descq_head = 0;
+
+		if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
+			/* move to notify list */
+			if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
+				vl15_watchdog_deq(dd);
+			list_move_tail(lp, &dd->ipath_sdma_notifylist);
+			if (!list_empty(&dd->ipath_sdma_activelist)) {
+				lp = dd->ipath_sdma_activelist.next;
+				txp = list_entry(lp, struct ipath_sdma_txreq,
+						 list);
+				start_idx = txp->start_idx;
+			} else {
+				lp = NULL;
+				txp = NULL;
+			}
+		}
+		progress = 1;
+	}
+
+	if (progress)
+		tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
+
+done:
+	return progress;
+}
+
+static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
+{
+	struct ipath_sdma_txreq *txp, *txp_next;
+
+	list_for_each_entry_safe(txp, txp_next, list, list) {
+		list_del_init(&txp->list);
+
+		if (txp->callback)
+			(*txp->callback)(txp->callback_cookie,
+					 txp->callback_status);
+	}
+}
+
+static void sdma_notify_taskbody(struct ipath_devdata *dd)
+{
+	unsigned long flags;
+	struct list_head list;
+
+	INIT_LIST_HEAD(&list);
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+
+	list_splice_init(&dd->ipath_sdma_notifylist, &list);
+
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+
+	ipath_sdma_notify(dd, &list);
+
+	/*
+	 * The IB verbs layer needs to see the callback before getting
+	 * the call to ipath_ib_piobufavail() because the callback
+	 * handles releasing resources the next send will need.
+	 * Otherwise, we could do these calls in
+	 * ipath_sdma_make_progress().
+	 */
+	ipath_ib_piobufavail(dd->verbs_dev);
+}
+
+static void sdma_notify_task(unsigned long opaque)
+{
+	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
+
+	if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+		sdma_notify_taskbody(dd);
+}
+
+static void dump_sdma_state(struct ipath_devdata *dd)
+{
+	unsigned long reg;
+
+	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
+	ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
+
+	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
+	ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
+
+	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
+	ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
+
+	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
+	ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
+
+	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
+	ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
+
+	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
+	ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
+
+	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
+	ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
+}
+
+static void sdma_abort_task(unsigned long opaque)
+{
+	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
+	u64 status;
+	unsigned long flags;
+
+	if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+		return;
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+
+	status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
+
+	/* nothing to do */
+	if (status == IPATH_SDMA_ABORT_NONE)
+		goto unlock;
+
+	/* ipath_sdma_abort() is done, waiting for interrupt */
+	if (status == IPATH_SDMA_ABORT_DISARMED) {
+		if (jiffies < dd->ipath_sdma_abort_intr_timeout)
+			goto resched_noprint;
+		/* give up, intr got lost somewhere */
+		ipath_dbg("give up waiting for SDMADISABLED intr\n");
+		__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
+		status = IPATH_SDMA_ABORT_ABORTED;
+	}
+
+	/* everything is stopped, time to clean up and restart */
+	if (status == IPATH_SDMA_ABORT_ABORTED) {
+		struct ipath_sdma_txreq *txp, *txpnext;
+		u64 hwstatus;
+		int notify = 0;
+
+		hwstatus = ipath_read_kreg64(dd,
+				dd->ipath_kregs->kr_senddmastatus);
+
+		if (/* ScoreBoardDrainInProg */
+		    test_bit(63, &hwstatus) ||
+		    /* AbortInProg */
+		    test_bit(62, &hwstatus) ||
+		    /* InternalSDmaEnable */
+		    test_bit(61, &hwstatus) ||
+		    /* ScbEmpty */
+		    !test_bit(30, &hwstatus)) {
+			if (dd->ipath_sdma_reset_wait > 0) {
+				/* not done shutting down sdma */
+				--dd->ipath_sdma_reset_wait;
+				goto resched;
+			}
+			ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
+				"status after SDMA reset, continuing\n");
+			dump_sdma_state(dd);
+		}
+
+		/* dequeue all "sent" requests */
+		list_for_each_entry_safe(txp, txpnext,
+					 &dd->ipath_sdma_activelist, list) {
+			txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
+			if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
+				vl15_watchdog_deq(dd);
+			list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
+			notify = 1;
+		}
+		if (notify)
+			tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
+
+		/* reset our notion of head and tail */
+		dd->ipath_sdma_descq_tail = 0;
+		dd->ipath_sdma_descq_head = 0;
+		dd->ipath_sdma_head_dma[0] = 0;
+		dd->ipath_sdma_generation = 0;
+		dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
+
+		/* Reset SendDmaLenGen */
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
+			(u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
+
+		/* done with sdma state for a bit */
+		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+
+		/*
+		 * Don't restart sdma here. Wait until link is up to ACTIVE.
+		 * VL15 MADs used to bring the link up use PIO, and multiple
+		 * link transitions otherwise cause the sdma engine to be
+		 * stopped and started multiple times.
+		 * The disable is done here, including the shadow, so the
+		 * state is kept consistent.
+		 * See ipath_restart_sdma() for the actual starting of sdma.
+		 */
+		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+		dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+				 dd->ipath_sendctrl);
+		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+
+		/* make sure I see next message */
+		dd->ipath_sdma_abort_jiffies = 0;
+
+		goto done;
+	}
+
+resched:
+	/*
+	 * for now, keep spinning
+	 * JAG - this is bad to just have default be a loop without
+	 * state change
+	 */
+	if (jiffies > dd->ipath_sdma_abort_jiffies) {
+		ipath_dbg("looping with status 0x%016llx\n",
+			  dd->ipath_sdma_status);
+		dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
+	}
+resched_noprint:
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+	if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+		tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
+	return;
+
+unlock:
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+done:
+	return;
+}
+
+/*
+ * This is called from interrupt context.
+ */
+void ipath_sdma_intr(struct ipath_devdata *dd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+
+	(void) ipath_sdma_make_progress(dd);
+
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+}
+
+static int alloc_sdma(struct ipath_devdata *dd)
+{
+	int ret = 0;
+
+	/* Allocate memory for SendDMA descriptor FIFO */
+	dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
+		SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
+
+	if (!dd->ipath_sdma_descq) {
+		ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
+			"FIFO memory\n");
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	dd->ipath_sdma_descq_cnt =
+		SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
+
+	/* Allocate memory for DMA of head register to memory */
+	dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
+		PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
+	if (!dd->ipath_sdma_head_dma) {
+		ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
+		ret = -ENOMEM;
+		goto cleanup_descq;
+	}
+	dd->ipath_sdma_head_dma[0] = 0;
+
+	init_timer(&dd->ipath_sdma_vl15_timer);
+	dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;
+	dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;
+	atomic_set(&dd->ipath_sdma_vl15_count, 0);
+
+	goto done;
+
+cleanup_descq:
+	dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
+		(void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
+	dd->ipath_sdma_descq = NULL;
+	dd->ipath_sdma_descq_phys = 0;
+done:
+	return ret;
+}
+
+int setup_sdma(struct ipath_devdata *dd)
+{
+	int ret = 0;
+	unsigned i, n;
+	u64 tmp64;
+	u64 senddmabufmask[3] = { 0 };
+	unsigned long flags;
+
+	ret = alloc_sdma(dd);
+	if (ret)
+		goto done;
+
+	if (!dd->ipath_sdma_descq) {
+		ipath_dev_err(dd, "SendDMA memory not allocated\n");
+		goto done;
+	}
+
+	dd->ipath_sdma_status = 0;
+	dd->ipath_sdma_abort_jiffies = 0;
+	dd->ipath_sdma_generation = 0;
+	dd->ipath_sdma_descq_tail = 0;
+	dd->ipath_sdma_descq_head = 0;
+	dd->ipath_sdma_descq_removed = 0;
+	dd->ipath_sdma_descq_added = 0;
+
+	/* Set SendDmaBase */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
+			 dd->ipath_sdma_descq_phys);
+	/* Set SendDmaLenGen */
+	tmp64 = dd->ipath_sdma_descq_cnt;
+	tmp64 |= 1<<18; /* enable generation checking */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
+	/* Set SendDmaTail */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
+			 dd->ipath_sdma_descq_tail);
+	/* Set SendDmaHeadAddr */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
+			 dd->ipath_sdma_head_phys);
+
+	/* Reserve all the former "kernel" piobufs */
+	n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved;
+	for (i = dd->ipath_lastport_piobuf; i < n; ++i) {
+		unsigned word = i / 64;
+		unsigned bit = i & 63;
+		BUG_ON(word >= 3);
+		senddmabufmask[word] |= 1ULL << bit;
+	}
+	ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf,
+		n - dd->ipath_lastport_piobuf, 0);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
+			 senddmabufmask[0]);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
+			 senddmabufmask[1]);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
+			 senddmabufmask[2]);
+
+	INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
+	INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
+
+	tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
+		     (unsigned long) dd);
+	tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
+		     (unsigned long) dd);
+
+	/*
+	 * No use to turn on SDMA here, as link is probably not ACTIVE
+	 * Just mark it RUNNING and enable the interrupt, and let the
+	 * ipath_restart_sdma() on link transition to ACTIVE actually
+	 * enable it.
+	 */
+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+	dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	__set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+
+done:
+	return ret;
+}
+
+void teardown_sdma(struct ipath_devdata *dd)
+{
+	struct ipath_sdma_txreq *txp, *txpnext;
+	unsigned long flags;
+	dma_addr_t sdma_head_phys = 0;
+	dma_addr_t sdma_descq_phys = 0;
+	void *sdma_descq = NULL;
+	void *sdma_head_dma = NULL;
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+	__clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
+	__set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
+	__set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+
+	tasklet_kill(&dd->ipath_sdma_abort_task);
+	tasklet_kill(&dd->ipath_sdma_notify_task);
+
+	/* turn off sdma */
+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+	dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+		dd->ipath_sendctrl);
+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+	/* dequeue all "sent" requests */
+	list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
+				 list) {
+		txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
+		if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
+			vl15_watchdog_deq(dd);
+		list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
+	}
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+
+	sdma_notify_taskbody(dd);
+
+	del_timer_sync(&dd->ipath_sdma_vl15_timer);
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+
+	dd->ipath_sdma_abort_jiffies = 0;
+
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
+
+	if (dd->ipath_sdma_head_dma) {
+		sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
+		sdma_head_phys = dd->ipath_sdma_head_phys;
+		dd->ipath_sdma_head_dma = NULL;
+		dd->ipath_sdma_head_phys = 0;
+	}
+
+	if (dd->ipath_sdma_descq) {
+		sdma_descq = dd->ipath_sdma_descq;
+		sdma_descq_phys = dd->ipath_sdma_descq_phys;
+		dd->ipath_sdma_descq = NULL;
+		dd->ipath_sdma_descq_phys = 0;
+	}
+
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+
+	if (sdma_head_dma)
+		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+				  sdma_head_dma, sdma_head_phys);
+
+	if (sdma_descq)
+		dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
+				  sdma_descq, sdma_descq_phys);
+}
+
+/*
+ * [Re]start SDMA, if we use it, and it's not already OK.
+ * This is called on transition to link ACTIVE, either the first or
+ * subsequent times.
+ */
+void ipath_restart_sdma(struct ipath_devdata *dd)
+{
+	unsigned long flags;
+	int needed = 1;
+
+	if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
+		goto bail;
+
+	/*
+	 * First, make sure we should, which is to say,
+	 * check that we are "RUNNING" (not in teardown)
+	 * and not "SHUTDOWN"
+	 */
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+	if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
+		|| test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+			needed = 0;
+	else {
+		__clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
+		__clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
+		__clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
+	}
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+	if (!needed) {
+		ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n",
+			dd->ipath_sdma_status);
+		goto bail;
+	}
+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+	/*
+	 * First clear, just to be safe. Enable is only done
+	 * in chip on 0->1 transition
+	 */
+	dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+
+bail:
+	return;
+}
+
+static inline void make_sdma_desc(struct ipath_devdata *dd,
+	u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
+{
+	WARN_ON(addr & 3);
+	/* SDmaPhyAddr[47:32] */
+	sdmadesc[1] = addr >> 32;
+	/* SDmaPhyAddr[31:0] */
+	sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
+	/* SDmaGeneration[1:0] */
+	sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
+	/* SDmaDwordCount[10:0] */
+	sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
+	/* SDmaBufOffset[12:2] */
+	sdmadesc[0] |= dwoffset & 0x7ffULL;
+}
+
+/*
+ * This function queues one IB packet onto the send DMA queue per call.
+ * The caller is responsible for checking:
+ * 1) The number of send DMA descriptor entries is less than the size of
+ *    the descriptor queue.
+ * 2) The IB SGE addresses and lengths are 32-bit aligned
+ *    (except possibly the last SGE's length)
+ * 3) The SGE addresses are suitable for passing to dma_map_single().
+ */
+int ipath_sdma_verbs_send(struct ipath_devdata *dd,
+	struct ipath_sge_state *ss, u32 dwords,
+	struct ipath_verbs_txreq *tx)
+{
+
+	unsigned long flags;
+	struct ipath_sge *sge;
+	int ret = 0;
+	u16 tail;
+	__le64 *descqp;
+	u64 sdmadesc[2];
+	u32 dwoffset;
+	dma_addr_t addr;
+
+	if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
+		ipath_dbg("packet size %X > ibmax %X, fail\n",
+			tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
+		ret = -EMSGSIZE;
+		goto fail;
+	}
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+
+retry:
+	if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
+		if (ipath_sdma_make_progress(dd))
+			goto retry;
+		ret = -ENOBUFS;
+		goto unlock;
+	}
+
+	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
+			      tx->map_len, DMA_TO_DEVICE);
+	if (dma_mapping_error(addr)) {
+		ret = -EIO;
+		goto unlock;
+	}
+
+	dwoffset = tx->map_len >> 2;
+	make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
+
+	/* SDmaFirstDesc */
+	sdmadesc[0] |= 1ULL << 12;
+	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
+		sdmadesc[0] |= 1ULL << 14;	/* SDmaUseLargeBuf */
+
+	/* write to the descq */
+	tail = dd->ipath_sdma_descq_tail;
+	descqp = &dd->ipath_sdma_descq[tail].qw[0];
+	*descqp++ = cpu_to_le64(sdmadesc[0]);
+	*descqp++ = cpu_to_le64(sdmadesc[1]);
+
+	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
+		tx->txreq.start_idx = tail;
+
+	/* increment the tail */
+	if (++tail == dd->ipath_sdma_descq_cnt) {
+		tail = 0;
+		descqp = &dd->ipath_sdma_descq[0].qw[0];
+		++dd->ipath_sdma_generation;
+	}
+
+	sge = &ss->sge;
+	while (dwords) {
+		u32 dw;
+		u32 len;
+
+		len = dwords << 2;
+		if (len > sge->length)
+			len = sge->length;
+		if (len > sge->sge_length)
+			len = sge->sge_length;
+		BUG_ON(len == 0);
+		dw = (len + 3) >> 2;
+		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+				      DMA_TO_DEVICE);
+		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+		/* SDmaUseLargeBuf has to be set in every descriptor */
+		if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
+			sdmadesc[0] |= 1ULL << 14;
+		/* write to the descq */
+		*descqp++ = cpu_to_le64(sdmadesc[0]);
+		*descqp++ = cpu_to_le64(sdmadesc[1]);
+
+		/* increment the tail */
+		if (++tail == dd->ipath_sdma_descq_cnt) {
+			tail = 0;
+			descqp = &dd->ipath_sdma_descq[0].qw[0];
+			++dd->ipath_sdma_generation;
+		}
+		sge->vaddr += len;
+		sge->length -= len;
+		sge->sge_length -= len;
+		if (sge->sge_length == 0) {
+			if (--ss->num_sge)
+				*sge = *ss->sg_list++;
+		} else if (sge->length == 0 && sge->mr != NULL) {
+			if (++sge->n >= IPATH_SEGSZ) {
+				if (++sge->m >= sge->mr->mapsz)
+					break;
+				sge->n = 0;
+			}
+			sge->vaddr =
+				sge->mr->map[sge->m]->segs[sge->n].vaddr;
+			sge->length =
+				sge->mr->map[sge->m]->segs[sge->n].length;
+		}
+
+		dwoffset += dw;
+		dwords -= dw;
+	}
+
+	if (!tail)
+		descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
+	descqp -= 2;
+	/* SDmaLastDesc */
+	descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
+	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
+		/* SDmaIntReq */
+		descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
+	}
+
+	/* Commit writes to memory and advance the tail on the chip */
+	wmb();
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
+
+	tx->txreq.next_descq_idx = tail;
+	tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
+	dd->ipath_sdma_descq_tail = tail;
+	dd->ipath_sdma_descq_added += tx->txreq.sg_count;
+	list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
+	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
+		vl15_watchdog_enq(dd);
+
+unlock:
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+fail:
+	return ret;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index f772102..e3d80ca 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -245,7 +245,8 @@
 						 sizeof(offset_addr));
 			if (ret)
 				goto bail_free;
-			udata->outbuf = (void __user *) offset_addr;
+			udata->outbuf =
+				(void __user *) (unsigned long) offset_addr;
 			ret = ib_copy_to_udata(udata, &offset,
 					       sizeof(offset));
 			if (ret)
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index d2725cd..c8e3d65 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -136,6 +136,7 @@
 	struct ipath_portdata *pd = dd->ipath_pd[0];
 	size_t blen = 0;
 	char buf[128];
+	u32 hdrqtail;
 
 	*buf = 0;
 	if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
@@ -174,17 +175,18 @@
 	if (blen)
 		ipath_dbg("%s\n", buf);
 
-	if (pd->port_head != (u32)
-	    le64_to_cpu(*dd->ipath_hdrqtailptr)) {
+	hdrqtail = ipath_get_hdrqtail(pd);
+	if (pd->port_head != hdrqtail) {
 		if (dd->ipath_lastport0rcv_cnt ==
 		    ipath_stats.sps_port0pkts) {
 			ipath_cdbg(PKT, "missing rcv interrupts? "
-				   "port0 hd=%llx tl=%x; port0pkts %llx\n",
-				   (unsigned long long)
-				   le64_to_cpu(*dd->ipath_hdrqtailptr),
-				   pd->port_head,
+				   "port0 hd=%x tl=%x; port0pkts %llx; write"
+				   " hd (w/intr)\n",
+				   pd->port_head, hdrqtail,
 				   (unsigned long long)
 				   ipath_stats.sps_port0pkts);
+			ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
+				dd->ipath_rhdrhead_intr_off, pd->port_port);
 		}
 		dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
 	}
@@ -290,11 +292,11 @@
 	    && time_after(jiffies, dd->ipath_unmasktime)) {
 		char ebuf[256];
 		int iserr;
-		iserr = ipath_decode_err(ebuf, sizeof ebuf,
-			dd->ipath_maskederrs);
+		iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
+					 dd->ipath_maskederrs);
 		if (dd->ipath_maskederrs &
-				~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-				INFINIPATH_E_PKTERRS ))
+		    ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+		      INFINIPATH_E_PKTERRS))
 			ipath_dev_err(dd, "Re-enabling masked errors "
 				      "(%s)\n", ebuf);
 		else {
@@ -306,17 +308,18 @@
 			 * level.
 			 */
 			if (iserr)
-					ipath_dbg("Re-enabling queue full errors (%s)\n",
-							ebuf);
+				ipath_dbg(
+					"Re-enabling queue full errors (%s)\n",
+					ebuf);
 			else
 				ipath_cdbg(ERRPKT, "Re-enabling packet"
-						" problem interrupt (%s)\n", ebuf);
+					" problem interrupt (%s)\n", ebuf);
 		}
 
 		/* re-enable masked errors */
 		dd->ipath_errormask |= dd->ipath_maskederrs;
 		ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-			dd->ipath_errormask);
+				 dd->ipath_errormask);
 		dd->ipath_maskederrs = 0;
 	}
 
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 56dfc8a..a6c8efb 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -34,6 +34,7 @@
 #include <linux/ctype.h>
 
 #include "ipath_kernel.h"
+#include "ipath_verbs.h"
 #include "ipath_common.h"
 
 /**
@@ -163,6 +164,15 @@
 	return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
 }
 
+static ssize_t show_localbus_info(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct ipath_devdata *dd = dev_get_drvdata(dev);
+	/* The string printed here is already newline-terminated. */
+	return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info);
+}
+
 static ssize_t show_lmc(struct device *dev,
 			struct device_attribute *attr,
 			char *buf)
@@ -311,6 +321,8 @@
 
 	dd->ipath_guid = new_guid;
 	dd->ipath_nguid = 1;
+	if (dd->verbs_dev)
+		dd->verbs_dev->ibdev.node_guid = new_guid;
 
 	ret = strlen(buf);
 	goto bail;
@@ -919,21 +931,21 @@
 	u16 val;
 
 	ret = ipath_parse_ushort(buf, &val);
-	if (ret < 0 || val > 1)
-		goto invalid;
-
-	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
-	if (r < 0) {
-		ret = r;
+	if (ret >= 0 && val > 1) {
+		ipath_dev_err(dd,
+			"attempt to set invalid Rx Polarity (enable)\n");
+		ret = -EINVAL;
 		goto bail;
 	}
 
-	goto bail;
-invalid:
-	ipath_dev_err(dd, "attempt to set invalid Rx Polarity (enable)\n");
+	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
+	if (r < 0)
+		ret = r;
+
 bail:
 	return ret;
 }
+
 /*
  * Get/Set RX lane-reversal enable. 0=no, 1=yes.
  */
@@ -988,6 +1000,75 @@
 	.attrs = driver_attributes
 };
 
+static ssize_t store_tempsense(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf,
+			       size_t count)
+{
+	struct ipath_devdata *dd = dev_get_drvdata(dev);
+	int ret, stat;
+	u16 val;
+
+	ret = ipath_parse_ushort(buf, &val);
+	if (ret <= 0) {
+		ipath_dev_err(dd, "attempt to set invalid tempsense config\n");
+		goto bail;
+	}
+	/* If anything but the highest limit, enable T_CRIT_A "interrupt" */
+	stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0);
+	if (stat) {
+		ipath_dev_err(dd, "Unable to set tempsense config\n");
+		ret = -1;
+		goto bail;
+	}
+	stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF));
+	if (stat) {
+		ipath_dev_err(dd, "Unable to set local Tcrit\n");
+		ret = -1;
+		goto bail;
+	}
+	stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8));
+	if (stat) {
+		ipath_dev_err(dd, "Unable to set remote Tcrit\n");
+		ret = -1;
+		goto bail;
+	}
+
+bail:
+	return ret;
+}
+
+/*
+ * dump tempsense regs. in decimal, to ease shell-scripts.
+ */
+static ssize_t show_tempsense(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct ipath_devdata *dd = dev_get_drvdata(dev);
+	int ret;
+	int idx;
+	u8 regvals[8];
+
+	ret = -ENXIO;
+	for (idx = 0; idx < 8; ++idx) {
+		if (idx == 6)
+			continue;
+		ret = ipath_tempsense_read(dd, idx);
+		if (ret < 0)
+			break;
+		regvals[idx] = ret;
+	}
+	if (idx == 8)
+		ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
+			*(signed char *)(regvals),
+			*(signed char *)(regvals + 1),
+			regvals[2], regvals[3],
+			*(signed char *)(regvals + 5),
+			*(signed char *)(regvals + 7));
+	return ret;
+}
+
 struct attribute_group *ipath_driver_attr_groups[] = {
 	&driver_attr_group,
 	NULL,
@@ -1011,10 +1092,13 @@
 static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
 static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
 static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
+static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
 static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
 		   show_jint_max_packets, store_jint_max_packets);
 static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
 		   show_jint_idle_ticks, store_jint_idle_ticks);
+static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO,
+		   show_tempsense, store_tempsense);
 
 static struct attribute *dev_attributes[] = {
 	&dev_attr_guid.attr,
@@ -1034,6 +1118,8 @@
 	&dev_attr_rx_pol_inv.attr,
 	&dev_attr_led_override.attr,
 	&dev_attr_logged_errors.attr,
+	&dev_attr_tempsense.attr,
+	&dev_attr_localbus_info.attr,
 	NULL
 };
 
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 2dd8de2..bfe8926 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -94,7 +94,7 @@
 				qp->s_state =
 					OP(SEND_ONLY_WITH_IMMEDIATE);
 				/* Immediate data comes after the BTH */
-				ohdr->u.imm_data = wqe->wr.imm_data;
+				ohdr->u.imm_data = wqe->wr.ex.imm_data;
 				hwords += 1;
 			}
 			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -123,7 +123,7 @@
 				qp->s_state =
 					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
 				/* Immediate data comes after the RETH */
-				ohdr->u.rc.imm_data = wqe->wr.imm_data;
+				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
 				hwords += 1;
 				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 					bth0 |= 1 << 23;
@@ -152,7 +152,7 @@
 		else {
 			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
 			/* Immediate data comes after the BTH */
-			ohdr->u.imm_data = wqe->wr.imm_data;
+			ohdr->u.imm_data = wqe->wr.ex.imm_data;
 			hwords += 1;
 		}
 		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -177,7 +177,7 @@
 			qp->s_state =
 				OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
 			/* Immediate data comes after the BTH */
-			ohdr->u.imm_data = wqe->wr.imm_data;
+			ohdr->u.imm_data = wqe->wr.ex.imm_data;
 			hwords += 1;
 			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 				bth0 |= 1 << 23;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index de67eed..8b6a261 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -95,7 +95,7 @@
 
 	if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
 		wc.wc_flags = IB_WC_WITH_IMM;
-		wc.imm_data = swqe->wr.imm_data;
+		wc.imm_data = swqe->wr.ex.imm_data;
 	} else {
 		wc.wc_flags = 0;
 		wc.imm_data = 0;
@@ -303,6 +303,7 @@
 	qp->s_hdrwords = 7;
 	qp->s_cur_size = wqe->length;
 	qp->s_cur_sge = &qp->s_sge;
+	qp->s_dmult = ah_attr->static_rate;
 	qp->s_wqe = wqe;
 	qp->s_sge.sge = wqe->sg_list[0];
 	qp->s_sge.sg_list = wqe->sg_list + 1;
@@ -326,7 +327,7 @@
 	}
 	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
 		qp->s_hdrwords++;
-		ohdr->u.ud.imm_data = wqe->wr.imm_data;
+		ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
 		bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
 	} else
 		bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
new file mode 100644
index 0000000..86e0169
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -0,0 +1,879 @@
+/*
+ * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include "ipath_kernel.h"
+#include "ipath_user_sdma.h"
+
+/* minimum size of header */
+#define IPATH_USER_SDMA_MIN_HEADER_LENGTH	64
+/* expected size of headers (for dma_pool) */
+#define IPATH_USER_SDMA_EXP_HEADER_LENGTH	64
+/* length mask in PBC (lower 11 bits) */
+#define IPATH_PBC_LENGTH_MASK			((1 << 11) - 1)
+
+struct ipath_user_sdma_pkt {
+	u8 naddr;		/* dimension of addr (1..3) ... */
+	u32 counter;		/* sdma pkts queued counter for this entry */
+	u64 added;		/* global descq number of entries */
+
+	struct {
+		u32 offset;			/* offset for kvaddr, addr */
+		u32 length;			/* length in page */
+		u8  put_page;			/* should we put_page? */
+		u8  dma_mapped;			/* is page dma_mapped? */
+		struct page *page;		/* may be NULL (coherent mem) */
+		void *kvaddr;			/* FIXME: only for pio hack */
+		dma_addr_t addr;
+	} addr[4];   /* max pages, any more and we coalesce */
+	struct list_head list;	/* list element */
+};
+
+struct ipath_user_sdma_queue {
+	/*
+	 * pkts sent to dma engine are queued on this
+	 * list head.  the type of the elements of this
+	 * list are struct ipath_user_sdma_pkt...
+	 */
+	struct list_head sent;
+
+	/* headers with expected length are allocated from here... */
+	char header_cache_name[64];
+	struct dma_pool *header_cache;
+
+	/* packets are allocated from the slab cache... */
+	char pkt_slab_name[64];
+	struct kmem_cache *pkt_slab;
+
+	/* as packets go on the queued queue, they are counted... */
+	u32 counter;
+	u32 sent_counter;
+
+	/* dma page table */
+	struct rb_root dma_pages_root;
+
+	/* protect everything above... */
+	struct mutex lock;
+};
+
+struct ipath_user_sdma_queue *
+ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
+{
+	struct ipath_user_sdma_queue *pq =
+		kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
+
+	if (!pq)
+		goto done;
+
+	pq->counter = 0;
+	pq->sent_counter = 0;
+	INIT_LIST_HEAD(&pq->sent);
+
+	mutex_init(&pq->lock);
+
+	snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
+		 "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
+	pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
+					 sizeof(struct ipath_user_sdma_pkt),
+					 0, 0, NULL);
+
+	if (!pq->pkt_slab)
+		goto err_kfree;
+
+	snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
+		 "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
+	pq->header_cache = dma_pool_create(pq->header_cache_name,
+					   dev,
+					   IPATH_USER_SDMA_EXP_HEADER_LENGTH,
+					   4, 0);
+	if (!pq->header_cache)
+		goto err_slab;
+
+	pq->dma_pages_root = RB_ROOT;
+
+	goto done;
+
+err_slab:
+	kmem_cache_destroy(pq->pkt_slab);
+err_kfree:
+	kfree(pq);
+	pq = NULL;
+
+done:
+	return pq;
+}
+
+static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
+				      int i, size_t offset, size_t len,
+				      int put_page, int dma_mapped,
+				      struct page *page,
+				      void *kvaddr, dma_addr_t dma_addr)
+{
+	pkt->addr[i].offset = offset;
+	pkt->addr[i].length = len;
+	pkt->addr[i].put_page = put_page;
+	pkt->addr[i].dma_mapped = dma_mapped;
+	pkt->addr[i].page = page;
+	pkt->addr[i].kvaddr = kvaddr;
+	pkt->addr[i].addr = dma_addr;
+}
+
+static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
+					u32 counter, size_t offset,
+					size_t len, int dma_mapped,
+					struct page *page,
+					void *kvaddr, dma_addr_t dma_addr)
+{
+	pkt->naddr = 1;
+	pkt->counter = counter;
+	ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
+				  kvaddr, dma_addr);
+}
+
+/* we've too many pages in the iovec, coalesce to a single page */
+static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
+				    struct ipath_user_sdma_pkt *pkt,
+				    const struct iovec *iov,
+				    unsigned long niov) {
+	int ret = 0;
+	struct page *page = alloc_page(GFP_KERNEL);
+	void *mpage_save;
+	char *mpage;
+	int i;
+	int len = 0;
+	dma_addr_t dma_addr;
+
+	if (!page) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	mpage = kmap(page);
+	mpage_save = mpage;
+	for (i = 0; i < niov; i++) {
+		int cfur;
+
+		cfur = copy_from_user(mpage,
+				      iov[i].iov_base, iov[i].iov_len);
+		if (cfur) {
+			ret = -EFAULT;
+			goto free_unmap;
+		}
+
+		mpage += iov[i].iov_len;
+		len += iov[i].iov_len;
+	}
+
+	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
+				DMA_TO_DEVICE);
+	if (dma_mapping_error(dma_addr)) {
+		ret = -ENOMEM;
+		goto free_unmap;
+	}
+
+	ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
+				  dma_addr);
+	pkt->naddr = 2;
+
+	goto done;
+
+free_unmap:
+	kunmap(page);
+	__free_page(page);
+done:
+	return ret;
+}
+
+/* how many pages in this iovec element? */
+static int ipath_user_sdma_num_pages(const struct iovec *iov)
+{
+	const unsigned long addr  = (unsigned long) iov->iov_base;
+	const unsigned long  len  = iov->iov_len;
+	const unsigned long spage = addr & PAGE_MASK;
+	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
+
+	return 1 + ((epage - spage) >> PAGE_SHIFT);
+}
+
+/* truncate length to page boundry */
+static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
+{
+	const unsigned long offset = addr & ~PAGE_MASK;
+
+	return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
+}
+
+static void ipath_user_sdma_free_pkt_frag(struct device *dev,
+					  struct ipath_user_sdma_queue *pq,
+					  struct ipath_user_sdma_pkt *pkt,
+					  int frag)
+{
+	const int i = frag;
+
+	if (pkt->addr[i].page) {
+		if (pkt->addr[i].dma_mapped)
+			dma_unmap_page(dev,
+				       pkt->addr[i].addr,
+				       pkt->addr[i].length,
+				       DMA_TO_DEVICE);
+
+		if (pkt->addr[i].kvaddr)
+			kunmap(pkt->addr[i].page);
+
+		if (pkt->addr[i].put_page)
+			put_page(pkt->addr[i].page);
+		else
+			__free_page(pkt->addr[i].page);
+	} else if (pkt->addr[i].kvaddr)
+		/* free coherent mem from cache... */
+		dma_pool_free(pq->header_cache,
+			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
+}
+
+/* return number of pages pinned... */
+static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
+				     struct ipath_user_sdma_pkt *pkt,
+				     unsigned long addr, int tlen, int npages)
+{
+	struct page *pages[2];
+	int j;
+	int ret;
+
+	ret = get_user_pages(current, current->mm, addr,
+			     npages, 0, 1, pages, NULL);
+
+	if (ret != npages) {
+		int i;
+
+		for (i = 0; i < ret; i++)
+			put_page(pages[i]);
+
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	for (j = 0; j < npages; j++) {
+		/* map the pages... */
+		const int flen =
+			ipath_user_sdma_page_length(addr, tlen);
+		dma_addr_t dma_addr =
+			dma_map_page(&dd->pcidev->dev,
+				     pages[j], 0, flen, DMA_TO_DEVICE);
+		unsigned long fofs = addr & ~PAGE_MASK;
+
+		if (dma_mapping_error(dma_addr)) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
+					  pages[j], kmap(pages[j]),
+					  dma_addr);
+
+		pkt->naddr++;
+		addr += flen;
+		tlen -= flen;
+	}
+
+done:
+	return ret;
+}
+
+static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
+				   struct ipath_user_sdma_queue *pq,
+				   struct ipath_user_sdma_pkt *pkt,
+				   const struct iovec *iov,
+				   unsigned long niov)
+{
+	int ret = 0;
+	unsigned long idx;
+
+	for (idx = 0; idx < niov; idx++) {
+		const int npages = ipath_user_sdma_num_pages(iov + idx);
+		const unsigned long addr = (unsigned long) iov[idx].iov_base;
+
+		ret = ipath_user_sdma_pin_pages(dd, pkt,
+						addr, iov[idx].iov_len,
+						npages);
+		if (ret < 0)
+			goto free_pkt;
+	}
+
+	goto done;
+
+free_pkt:
+	for (idx = 0; idx < pkt->naddr; idx++)
+		ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
+
+done:
+	return ret;
+}
+
+static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
+					struct ipath_user_sdma_queue *pq,
+					struct ipath_user_sdma_pkt *pkt,
+					const struct iovec *iov,
+					unsigned long niov, int npages)
+{
+	int ret = 0;
+
+	if (npages >= ARRAY_SIZE(pkt->addr))
+		ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
+	else
+		ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
+
+	return ret;
+}
+
+/* free a packet list -- return counter value of last packet */
+static void ipath_user_sdma_free_pkt_list(struct device *dev,
+					  struct ipath_user_sdma_queue *pq,
+					  struct list_head *list)
+{
+	struct ipath_user_sdma_pkt *pkt, *pkt_next;
+
+	list_for_each_entry_safe(pkt, pkt_next, list, list) {
+		int i;
+
+		for (i = 0; i < pkt->naddr; i++)
+			ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
+
+		kmem_cache_free(pq->pkt_slab, pkt);
+	}
+}
+
+/*
+ * copy headers, coalesce etc -- pq->lock must be held
+ *
+ * we queue all the packets to list, returning the
+ * number of bytes total.  list must be empty initially,
+ * as, if there is an error we clean it...
+ */
+static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
+				      struct ipath_user_sdma_queue *pq,
+				      struct list_head *list,
+				      const struct iovec *iov,
+				      unsigned long niov,
+				      int maxpkts)
+{
+	unsigned long idx = 0;
+	int ret = 0;
+	int npkts = 0;
+	struct page *page = NULL;
+	__le32 *pbc;
+	dma_addr_t dma_addr;
+	struct ipath_user_sdma_pkt *pkt = NULL;
+	size_t len;
+	size_t nw;
+	u32 counter = pq->counter;
+	int dma_mapped = 0;
+
+	while (idx < niov && npkts < maxpkts) {
+		const unsigned long addr = (unsigned long) iov[idx].iov_base;
+		const unsigned long idx_save = idx;
+		unsigned pktnw;
+		unsigned pktnwc;
+		int nfrags = 0;
+		int npages = 0;
+		int cfur;
+
+		dma_mapped = 0;
+		len = iov[idx].iov_len;
+		nw = len >> 2;
+		page = NULL;
+
+		pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
+		if (!pkt) {
+			ret = -ENOMEM;
+			goto free_list;
+		}
+
+		if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
+		    len > PAGE_SIZE || len & 3 || addr & 3) {
+			ret = -EINVAL;
+			goto free_pkt;
+		}
+
+		if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
+			pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
+					     &dma_addr);
+		else
+			pbc = NULL;
+
+		if (!pbc) {
+			page = alloc_page(GFP_KERNEL);
+			if (!page) {
+				ret = -ENOMEM;
+				goto free_pkt;
+			}
+			pbc = kmap(page);
+		}
+
+		cfur = copy_from_user(pbc, iov[idx].iov_base, len);
+		if (cfur) {
+			ret = -EFAULT;
+			goto free_pbc;
+		}
+
+		/*
+		 * this assignment is a bit strange.  it's because the
+		 * the pbc counts the number of 32 bit words in the full
+		 * packet _except_ the first word of the pbc itself...
+		 */
+		pktnwc = nw - 1;
+
+		/*
+		 * pktnw computation yields the number of 32 bit words
+		 * that the caller has indicated in the PBC.  note that
+		 * this is one less than the total number of words that
+		 * goes to the send DMA engine as the first 32 bit word
+		 * of the PBC itself is not counted.  Armed with this count,
+		 * we can verify that the packet is consistent with the
+		 * iovec lengths.
+		 */
+		pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
+		if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
+			ret = -EINVAL;
+			goto free_pbc;
+		}
+
+
+		idx++;
+		while (pktnwc < pktnw && idx < niov) {
+			const size_t slen = iov[idx].iov_len;
+			const unsigned long faddr =
+				(unsigned long) iov[idx].iov_base;
+
+			if (slen & 3 || faddr & 3 || !slen ||
+			    slen > PAGE_SIZE) {
+				ret = -EINVAL;
+				goto free_pbc;
+			}
+
+			npages++;
+			if ((faddr & PAGE_MASK) !=
+			    ((faddr + slen - 1) & PAGE_MASK))
+				npages++;
+
+			pktnwc += slen >> 2;
+			idx++;
+			nfrags++;
+		}
+
+		if (pktnwc != pktnw) {
+			ret = -EINVAL;
+			goto free_pbc;
+		}
+
+		if (page) {
+			dma_addr = dma_map_page(&dd->pcidev->dev,
+						page, 0, len, DMA_TO_DEVICE);
+			if (dma_mapping_error(dma_addr)) {
+				ret = -ENOMEM;
+				goto free_pbc;
+			}
+
+			dma_mapped = 1;
+		}
+
+		ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
+					    page, pbc, dma_addr);
+
+		if (nfrags) {
+			ret = ipath_user_sdma_init_payload(dd, pq, pkt,
+							   iov + idx_save + 1,
+							   nfrags, npages);
+			if (ret < 0)
+				goto free_pbc_dma;
+		}
+
+		counter++;
+		npkts++;
+
+		list_add_tail(&pkt->list, list);
+	}
+
+	ret = idx;
+	goto done;
+
+free_pbc_dma:
+	if (dma_mapped)
+		dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
+free_pbc:
+	if (page) {
+		kunmap(page);
+		__free_page(page);
+	} else
+		dma_pool_free(pq->header_cache, pbc, dma_addr);
+free_pkt:
+	kmem_cache_free(pq->pkt_slab, pkt);
+free_list:
+	ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
+done:
+	return ret;
+}
+
+static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
+						 u32 c)
+{
+	pq->sent_counter = c;
+}
+
+/* try to clean out queue -- needs pq->lock */
+static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
+				       struct ipath_user_sdma_queue *pq)
+{
+	struct list_head free_list;
+	struct ipath_user_sdma_pkt *pkt;
+	struct ipath_user_sdma_pkt *pkt_prev;
+	int ret = 0;
+
+	INIT_LIST_HEAD(&free_list);
+
+	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
+		s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
+
+		if (descd < 0)
+			break;
+
+		list_move_tail(&pkt->list, &free_list);
+
+		/* one more packet cleaned */
+		ret++;
+	}
+
+	if (!list_empty(&free_list)) {
+		u32 counter;
+
+		pkt = list_entry(free_list.prev,
+				 struct ipath_user_sdma_pkt, list);
+		counter = pkt->counter;
+
+		ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+		ipath_user_sdma_set_complete_counter(pq, counter);
+	}
+
+	return ret;
+}
+
+void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
+{
+	if (!pq)
+		return;
+
+	kmem_cache_destroy(pq->pkt_slab);
+	dma_pool_destroy(pq->header_cache);
+	kfree(pq);
+}
+
+/* clean descriptor queue, returns > 0 if some elements cleaned */
+static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+	ret = ipath_sdma_make_progress(dd);
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+
+	return ret;
+}
+
+/* we're in close, drain packets so that we can cleanup successfully... */
+void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
+				 struct ipath_user_sdma_queue *pq)
+{
+	int i;
+
+	if (!pq)
+		return;
+
+	for (i = 0; i < 100; i++) {
+		mutex_lock(&pq->lock);
+		if (list_empty(&pq->sent)) {
+			mutex_unlock(&pq->lock);
+			break;
+		}
+		ipath_user_sdma_hwqueue_clean(dd);
+		ipath_user_sdma_queue_clean(dd, pq);
+		mutex_unlock(&pq->lock);
+		msleep(10);
+	}
+
+	if (!list_empty(&pq->sent)) {
+		struct list_head free_list;
+
+		printk(KERN_INFO "drain: lists not empty: forcing!\n");
+		INIT_LIST_HEAD(&free_list);
+		mutex_lock(&pq->lock);
+		list_splice_init(&pq->sent, &free_list);
+		ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+		mutex_unlock(&pq->lock);
+	}
+}
+
+static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
+					   u64 addr, u64 dwlen, u64 dwoffset)
+{
+	return cpu_to_le64(/* SDmaPhyAddr[31:0] */
+			   ((addr & 0xfffffffcULL) << 32) |
+			   /* SDmaGeneration[1:0] */
+			   ((dd->ipath_sdma_generation & 3ULL) << 30) |
+			   /* SDmaDwordCount[10:0] */
+			   ((dwlen & 0x7ffULL) << 16) |
+			   /* SDmaBufOffset[12:2] */
+			   (dwoffset & 0x7ffULL));
+}
+
+static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
+{
+	return descq | __constant_cpu_to_le64(1ULL << 12);
+}
+
+static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
+{
+					      /* last */  /* dma head */
+	return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13);
+}
+
+static inline __le64 ipath_sdma_make_desc1(u64 addr)
+{
+	/* SDmaPhyAddr[47:32] */
+	return cpu_to_le64(addr >> 32);
+}
+
+static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
+				      struct ipath_user_sdma_pkt *pkt, int idx,
+				      unsigned ofs, u16 tail)
+{
+	const u64 addr = (u64) pkt->addr[idx].addr +
+		(u64) pkt->addr[idx].offset;
+	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
+	__le64 *descqp;
+	__le64 descq0;
+
+	descqp = &dd->ipath_sdma_descq[tail].qw[0];
+
+	descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
+	if (idx == 0)
+		descq0 = ipath_sdma_make_first_desc0(descq0);
+	if (idx == pkt->naddr - 1)
+		descq0 = ipath_sdma_make_last_desc0(descq0);
+
+	descqp[0] = descq0;
+	descqp[1] = ipath_sdma_make_desc1(addr);
+}
+
+/* pq->lock must be held, get packets on the wire... */
+static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
+				     struct ipath_user_sdma_queue *pq,
+				     struct list_head *pktlist)
+{
+	int ret = 0;
+	unsigned long flags;
+	u16 tail;
+
+	if (list_empty(pktlist))
+		return 0;
+
+	if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
+		return -ECOMM;
+
+	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+
+	if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
+		ret = -ECOMM;
+		goto unlock;
+	}
+
+	tail = dd->ipath_sdma_descq_tail;
+	while (!list_empty(pktlist)) {
+		struct ipath_user_sdma_pkt *pkt =
+			list_entry(pktlist->next, struct ipath_user_sdma_pkt,
+				   list);
+		int i;
+		unsigned ofs = 0;
+		u16 dtail = tail;
+
+		if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
+			goto unlock_check_tail;
+
+		for (i = 0; i < pkt->naddr; i++) {
+			ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
+			ofs += pkt->addr[i].length >> 2;
+
+			if (++tail == dd->ipath_sdma_descq_cnt) {
+				tail = 0;
+				++dd->ipath_sdma_generation;
+			}
+		}
+
+		if ((ofs<<2) > dd->ipath_ibmaxlen) {
+			ipath_dbg("packet size %X > ibmax %X, fail\n",
+				ofs<<2, dd->ipath_ibmaxlen);
+			ret = -EMSGSIZE;
+			goto unlock;
+		}
+
+		/*
+		 * if the packet is >= 2KB mtu equivalent, we have to use
+		 * the large buffers, and have to mark each descriptor as
+		 * part of a large buffer packet.
+		 */
+		if (ofs >= IPATH_SMALLBUF_DWORDS) {
+			for (i = 0; i < pkt->naddr; i++) {
+				dd->ipath_sdma_descq[dtail].qw[0] |=
+					__constant_cpu_to_le64(1ULL << 14);
+				if (++dtail == dd->ipath_sdma_descq_cnt)
+					dtail = 0;
+			}
+		}
+
+		dd->ipath_sdma_descq_added += pkt->naddr;
+		pkt->added = dd->ipath_sdma_descq_added;
+		list_move_tail(&pkt->list, &pq->sent);
+		ret++;
+	}
+
+unlock_check_tail:
+	/* advance the tail on the chip if necessary */
+	if (dd->ipath_sdma_descq_tail != tail) {
+		wmb();
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
+		dd->ipath_sdma_descq_tail = tail;
+	}
+
+unlock:
+	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+
+	return ret;
+}
+
+int ipath_user_sdma_writev(struct ipath_devdata *dd,
+			   struct ipath_user_sdma_queue *pq,
+			   const struct iovec *iov,
+			   unsigned long dim)
+{
+	int ret = 0;
+	struct list_head list;
+	int npkts = 0;
+
+	INIT_LIST_HEAD(&list);
+
+	mutex_lock(&pq->lock);
+
+	if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
+		ipath_user_sdma_hwqueue_clean(dd);
+		ipath_user_sdma_queue_clean(dd, pq);
+	}
+
+	while (dim) {
+		const int mxp = 8;
+
+		down_write(&current->mm->mmap_sem);
+		ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+		up_write(&current->mm->mmap_sem);
+
+		if (ret <= 0)
+			goto done_unlock;
+		else {
+			dim -= ret;
+			iov += ret;
+		}
+
+		/* force packets onto the sdma hw queue... */
+		if (!list_empty(&list)) {
+			/*
+			 * lazily clean hw queue.  the 4 is a guess of about
+			 * how many sdma descriptors a packet will take (it
+			 * doesn't have to be perfect).
+			 */
+			if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
+				ipath_user_sdma_hwqueue_clean(dd);
+				ipath_user_sdma_queue_clean(dd, pq);
+			}
+
+			ret = ipath_user_sdma_push_pkts(dd, pq, &list);
+			if (ret < 0)
+				goto done_unlock;
+			else {
+				npkts += ret;
+				pq->counter += ret;
+
+				if (!list_empty(&list))
+					goto done_unlock;
+			}
+		}
+	}
+
+done_unlock:
+	if (!list_empty(&list))
+		ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
+	mutex_unlock(&pq->lock);
+
+	return (ret < 0) ? ret : npkts;
+}
+
+int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
+				  struct ipath_user_sdma_queue *pq)
+{
+	int ret = 0;
+
+	mutex_lock(&pq->lock);
+	ipath_user_sdma_hwqueue_clean(dd);
+	ret = ipath_user_sdma_queue_clean(dd, pq);
+	mutex_unlock(&pq->lock);
+
+	return ret;
+}
+
+u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
+{
+	return pq->sent_counter;
+}
+
+u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
+{
+	return pq->counter;
+}
+
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.h b/drivers/infiniband/hw/ipath/ipath_user_sdma.h
new file mode 100644
index 0000000..e70946c
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/device.h>
+
+struct ipath_user_sdma_queue;
+
+struct ipath_user_sdma_queue *
+ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
+void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
+
+int ipath_user_sdma_writev(struct ipath_devdata *dd,
+			   struct ipath_user_sdma_queue *pq,
+			   const struct iovec *iov,
+			   unsigned long dim);
+
+int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
+				  struct ipath_user_sdma_queue *pq);
+
+int ipath_user_sdma_pkt_sent(const struct ipath_user_sdma_queue *pq,
+			     u32 counter);
+void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
+				 struct ipath_user_sdma_queue *pq);
+
+u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
+u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 32d8f88..320a6d0 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -242,6 +242,93 @@
 	ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
 }
 
+/*
+ * Count the number of DMA descriptors needed to send length bytes of data.
+ * Don't modify the ipath_sge_state to get the count.
+ * Return zero if any of the segments is not aligned.
+ */
+static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
+{
+	struct ipath_sge *sg_list = ss->sg_list;
+	struct ipath_sge sge = ss->sge;
+	u8 num_sge = ss->num_sge;
+	u32 ndesc = 1;	/* count the header */
+
+	while (length) {
+		u32 len = sge.length;
+
+		if (len > length)
+			len = length;
+		if (len > sge.sge_length)
+			len = sge.sge_length;
+		BUG_ON(len == 0);
+		if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
+		    (len != length && (len & (sizeof(u32) - 1)))) {
+			ndesc = 0;
+			break;
+		}
+		ndesc++;
+		sge.vaddr += len;
+		sge.length -= len;
+		sge.sge_length -= len;
+		if (sge.sge_length == 0) {
+			if (--num_sge)
+				sge = *sg_list++;
+		} else if (sge.length == 0 && sge.mr != NULL) {
+			if (++sge.n >= IPATH_SEGSZ) {
+				if (++sge.m >= sge.mr->mapsz)
+					break;
+				sge.n = 0;
+			}
+			sge.vaddr =
+				sge.mr->map[sge.m]->segs[sge.n].vaddr;
+			sge.length =
+				sge.mr->map[sge.m]->segs[sge.n].length;
+		}
+		length -= len;
+	}
+	return ndesc;
+}
+
+/*
+ * Copy from the SGEs to the data buffer.
+ */
+static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
+				u32 length)
+{
+	struct ipath_sge *sge = &ss->sge;
+
+	while (length) {
+		u32 len = sge->length;
+
+		if (len > length)
+			len = length;
+		if (len > sge->sge_length)
+			len = sge->sge_length;
+		BUG_ON(len == 0);
+		memcpy(data, sge->vaddr, len);
+		sge->vaddr += len;
+		sge->length -= len;
+		sge->sge_length -= len;
+		if (sge->sge_length == 0) {
+			if (--ss->num_sge)
+				*sge = *ss->sg_list++;
+		} else if (sge->length == 0 && sge->mr != NULL) {
+			if (++sge->n >= IPATH_SEGSZ) {
+				if (++sge->m >= sge->mr->mapsz)
+					break;
+				sge->n = 0;
+			}
+			sge->vaddr =
+				sge->mr->map[sge->m]->segs[sge->n].vaddr;
+			sge->length =
+				sge->mr->map[sge->m]->segs[sge->n].length;
+		}
+		data += len;
+		length -= len;
+	}
+}
+
 /**
  * ipath_post_one_send - post one RC, UC, or UD send work request
  * @qp: the QP to post on
@@ -866,27 +953,257 @@
 		__raw_writel(last, piobuf);
 }
 
-static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords,
+/*
+ * Convert IB rate to delay multiplier.
+ */
+unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
+{
+	switch (rate) {
+	case IB_RATE_2_5_GBPS: return 8;
+	case IB_RATE_5_GBPS:   return 4;
+	case IB_RATE_10_GBPS:  return 2;
+	case IB_RATE_20_GBPS:  return 1;
+	default:	       return 0;
+	}
+}
+
+/*
+ * Convert delay multiplier to IB rate
+ */
+static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
+{
+	switch (mult) {
+	case 8:  return IB_RATE_2_5_GBPS;
+	case 4:  return IB_RATE_5_GBPS;
+	case 2:  return IB_RATE_10_GBPS;
+	case 1:  return IB_RATE_20_GBPS;
+	default: return IB_RATE_PORT_CURRENT;
+	}
+}
+
+static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
+{
+	struct ipath_verbs_txreq *tx = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->pending_lock, flags);
+	if (!list_empty(&dev->txreq_free)) {
+		struct list_head *l = dev->txreq_free.next;
+
+		list_del(l);
+		tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
+	}
+	spin_unlock_irqrestore(&dev->pending_lock, flags);
+	return tx;
+}
+
+static inline void put_txreq(struct ipath_ibdev *dev,
+			     struct ipath_verbs_txreq *tx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->pending_lock, flags);
+	list_add(&tx->txreq.list, &dev->txreq_free);
+	spin_unlock_irqrestore(&dev->pending_lock, flags);
+}
+
+static void sdma_complete(void *cookie, int status)
+{
+	struct ipath_verbs_txreq *tx = cookie;
+	struct ipath_qp *qp = tx->qp;
+	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+
+	/* Generate a completion queue entry if needed */
+	if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) {
+		enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
+			IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
+
+		ipath_send_complete(qp, tx->wqe, ibs);
+	}
+
+	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
+		kfree(tx->txreq.map_addr);
+	put_txreq(dev, tx);
+
+	if (atomic_dec_and_test(&qp->refcount))
+		wake_up(&qp->wait);
+}
+
+/*
+ * Compute the number of clock cycles of delay before sending the next packet.
+ * The multipliers reflect the number of clocks for the fastest rate so
+ * one tick at 4xDDR is 8 ticks at 1xSDR.
+ * If the destination port will take longer to receive a packet than
+ * the outgoing link can send it, we need to delay sending the next packet
+ * by the difference in time it takes the receiver to receive and the sender
+ * to send this packet.
+ * Note that this delay is always correct for UC and RC but not always
+ * optimal for UD. For UD, the destination HCA can be different for each
+ * packet, in which case, we could send packets to a different destination
+ * while "waiting" for the delay. The overhead for doing this without
+ * HW support is more than just paying the cost of delaying some packets
+ * unnecessarily.
+ */
+static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
+{
+	return (rcv_mult > snd_mult) ?
+		(plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
+}
+
+static int ipath_verbs_send_dma(struct ipath_qp *qp,
+				struct ipath_ib_header *hdr, u32 hdrwords,
+				struct ipath_sge_state *ss, u32 len,
+				u32 plen, u32 dwords)
+{
+	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+	struct ipath_devdata *dd = dev->dd;
+	struct ipath_verbs_txreq *tx;
+	u32 *piobuf;
+	u32 control;
+	u32 ndesc;
+	int ret;
+
+	tx = qp->s_tx;
+	if (tx) {
+		qp->s_tx = NULL;
+		/* resend previously constructed packet */
+		ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
+		if (ret)
+			qp->s_tx = tx;
+		goto bail;
+	}
+
+	tx = get_txreq(dev);
+	if (!tx) {
+		ret = -EBUSY;
+		goto bail;
+	}
+
+	/*
+	 * Get the saved delay count we computed for the previous packet
+	 * and save the delay count for this packet to be used next time
+	 * we get here.
+	 */
+	control = qp->s_pkt_delay;
+	qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
+
+	tx->qp = qp;
+	atomic_inc(&qp->refcount);
+	tx->wqe = qp->s_wqe;
+	tx->txreq.callback = sdma_complete;
+	tx->txreq.callback_cookie = tx;
+	tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
+		IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
+	if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
+		tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
+
+	/* VL15 packets bypass credit check */
+	if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
+		control |= 1ULL << 31;
+		tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
+	}
+
+	if (len) {
+		/*
+		 * Don't try to DMA if it takes more descriptors than
+		 * the queue holds.
+		 */
+		ndesc = ipath_count_sge(ss, len);
+		if (ndesc >= dd->ipath_sdma_descq_cnt)
+			ndesc = 0;
+	} else
+		ndesc = 1;
+	if (ndesc) {
+		tx->hdr.pbc[0] = cpu_to_le32(plen);
+		tx->hdr.pbc[1] = cpu_to_le32(control);
+		memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
+		tx->txreq.sg_count = ndesc;
+		tx->map_len = (hdrwords + 2) << 2;
+		tx->txreq.map_addr = &tx->hdr;
+		ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
+		if (ret) {
+			/* save ss and length in dwords */
+			tx->ss = ss;
+			tx->len = dwords;
+			qp->s_tx = tx;
+		}
+		goto bail;
+	}
+
+	/* Allocate a buffer and copy the header and payload to it. */
+	tx->map_len = (plen + 1) << 2;
+	piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
+	if (unlikely(piobuf == NULL)) {
+		ret = -EBUSY;
+		goto err_tx;
+	}
+	tx->txreq.map_addr = piobuf;
+	tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
+	tx->txreq.sg_count = 1;
+
+	*piobuf++ = (__force u32) cpu_to_le32(plen);
+	*piobuf++ = (__force u32) cpu_to_le32(control);
+	memcpy(piobuf, hdr, hdrwords << 2);
+	ipath_copy_from_sge(piobuf + hdrwords, ss, len);
+
+	ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
+	/*
+	 * If we couldn't queue the DMA request, save the info
+	 * and try again later rather than destroying the
+	 * buffer and undoing the side effects of the copy.
+	 */
+	if (ret) {
+		tx->ss = NULL;
+		tx->len = 0;
+		qp->s_tx = tx;
+	}
+	dev->n_unaligned++;
+	goto bail;
+
+err_tx:
+	if (atomic_dec_and_test(&qp->refcount))
+		wake_up(&qp->wait);
+	put_txreq(dev, tx);
+bail:
+	return ret;
+}
+
+static int ipath_verbs_send_pio(struct ipath_qp *qp,
+				struct ipath_ib_header *ibhdr, u32 hdrwords,
 				struct ipath_sge_state *ss, u32 len,
 				u32 plen, u32 dwords)
 {
 	struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
+	u32 *hdr = (u32 *) ibhdr;
 	u32 __iomem *piobuf;
 	unsigned flush_wc;
+	u32 control;
 	int ret;
 
-	piobuf = ipath_getpiobuf(dd, NULL);
+	piobuf = ipath_getpiobuf(dd, plen, NULL);
 	if (unlikely(piobuf == NULL)) {
 		ret = -EBUSY;
 		goto bail;
 	}
 
 	/*
-	 * Write len to control qword, no flags.
+	 * Get the saved delay count we computed for the previous packet
+	 * and save the delay count for this packet to be used next time
+	 * we get here.
+	 */
+	control = qp->s_pkt_delay;
+	qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
+
+	/* VL15 packets bypass credit check */
+	if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
+		control |= 1ULL << 31;
+
+	/*
+	 * Write the length to the control qword plus any needed flags.
 	 * We have to flush after the PBC for correctness on some cpus
 	 * or WC buffer can be written out of order.
 	 */
-	writeq(plen, piobuf);
+	writeq(((u64) control << 32) | plen, piobuf);
 	piobuf += 2;
 
 	flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
@@ -961,15 +1278,25 @@
 	 */
 	plen = hdrwords + dwords + 1;
 
-	/* Drop non-VL15 packets if we are not in the active state */
-	if (!(dd->ipath_flags & IPATH_LINKACTIVE) &&
-	    qp->ibqp.qp_type != IB_QPT_SMI) {
+	/*
+	 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
+	 * can defer SDMA restart until link goes ACTIVE without
+	 * worrying about just how we got there.
+	 */
+	if (qp->ibqp.qp_type == IB_QPT_SMI)
+		ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
+					   plen, dwords);
+	/* All non-VL15 packets are dropped if link is not ACTIVE */
+	else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) {
 		if (qp->s_wqe)
 			ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
 		ret = 0;
-	} else
-		ret = ipath_verbs_send_pio(qp, (u32 *) hdr, hdrwords,
-					   ss, len, plen, dwords);
+	} else if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
+		ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
+					   plen, dwords);
+	else
+		ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
+					   plen, dwords);
 
 	return ret;
 }
@@ -1038,6 +1365,12 @@
 		ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
 		ipath_snap_cntr(dd, crp->cr_badformatcnt) +
 		dd->ipath_rxfc_unsupvl_errs;
+	if (crp->cr_rxotherlocalphyerrcnt)
+		cntrs->port_rcv_errors +=
+			ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
+	if (crp->cr_rxvlerrcnt)
+		cntrs->port_rcv_errors +=
+			ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
 	cntrs->port_rcv_remphys_errors =
 		ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
 	cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
@@ -1046,9 +1379,16 @@
 	cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
 	cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
 	cntrs->local_link_integrity_errors =
-		(dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
-		dd->ipath_lli_errs : dd->ipath_lli_errors;
-	cntrs->excessive_buffer_overrun_errors = dd->ipath_overrun_thresh_errs;
+		crp->cr_locallinkintegrityerrcnt ?
+		ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
+		((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
+		 dd->ipath_lli_errs : dd->ipath_lli_errors);
+	cntrs->excessive_buffer_overrun_errors =
+		crp->cr_excessbufferovflcnt ?
+		ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
+		dd->ipath_overrun_thresh_errs;
+	cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
+		ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
 
 	ret = 0;
 
@@ -1183,7 +1523,9 @@
 	props->sm_lid = dev->sm_lid;
 	props->sm_sl = dev->sm_sl;
 	ibcstat = dd->ipath_lastibcstat;
-	props->state = ((ibcstat >> 4) & 0x3) + 1;
+	/* map LinkState to IB portinfo values.  */
+	props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
+
 	/* See phys_state_show() */
 	props->phys_state = /* MEA: assumes shift == 0 */
 		ipath_cvt_physportstate[dd->ipath_lastibcstat &
@@ -1195,18 +1537,13 @@
 	props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
 		dev->z_pkey_violations;
 	props->qkey_viol_cntr = dev->qkey_violations;
-	props->active_width = IB_WIDTH_4X;
+	props->active_width = dd->ipath_link_width_active;
 	/* See rate_show() */
-	props->active_speed = 1;	/* Regular 10Mbs speed. */
+	props->active_speed = dd->ipath_link_speed_active;
 	props->max_vl_num = 1;		/* VLCap = VL0 */
 	props->init_type_reply = 0;
 
-	/*
-	 * Note: the chip supports a maximum MTU of 4096, but the driver
-	 * hasn't implemented this feature yet, so set the maximum value
-	 * to 2048.
-	 */
-	props->max_mtu = IB_MTU_2048;
+	props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
 	switch (dd->ipath_ibmtu) {
 	case 4096:
 		mtu = IB_MTU_4096;
@@ -1399,6 +1736,7 @@
 
 	/* ib_create_ah() will initialize ah->ibah. */
 	ah->attr = *ah_attr;
+	ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
 
 	ret = &ah->ibah;
 
@@ -1432,6 +1770,7 @@
 	struct ipath_ah *ah = to_iah(ibah);
 
 	*ah_attr = ah->attr;
+	ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
 
 	return 0;
 }
@@ -1581,6 +1920,8 @@
 	struct ipath_verbs_counters cntrs;
 	struct ipath_ibdev *idev;
 	struct ib_device *dev;
+	struct ipath_verbs_txreq *tx;
+	unsigned i;
 	int ret;
 
 	idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
@@ -1591,6 +1932,17 @@
 
 	dev = &idev->ibdev;
 
+	if (dd->ipath_sdma_descq_cnt) {
+		tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx,
+			     GFP_KERNEL);
+		if (tx == NULL) {
+			ret = -ENOMEM;
+			goto err_tx;
+		}
+	} else
+		tx = NULL;
+	idev->txreq_bufs = tx;
+
 	/* Only need to initialize non-zero fields. */
 	spin_lock_init(&idev->n_pds_lock);
 	spin_lock_init(&idev->n_ahs_lock);
@@ -1631,15 +1983,17 @@
 	INIT_LIST_HEAD(&idev->pending[2]);
 	INIT_LIST_HEAD(&idev->piowait);
 	INIT_LIST_HEAD(&idev->rnrwait);
+	INIT_LIST_HEAD(&idev->txreq_free);
 	idev->pending_index = 0;
 	idev->port_cap_flags =
 		IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
+	if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
+		idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
 	idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
 	idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
 	idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
 	idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
 	idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-	idev->link_width_enabled = 3;	/* 1x or 4x */
 
 	/* Snapshot current HW counters to "clear" them. */
 	ipath_get_counters(dd, &cntrs);
@@ -1661,6 +2015,9 @@
 		cntrs.excessive_buffer_overrun_errors;
 	idev->z_vl15_dropped = cntrs.vl15_dropped;
 
+	for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
+		list_add(&tx->txreq.list, &idev->txreq_free);
+
 	/*
 	 * The system image GUID is supposed to be the same for all
 	 * IB HCAs in a single system but since there can be other
@@ -1710,6 +2067,7 @@
 	dev->phys_port_cnt = 1;
 	dev->num_comp_vectors = 1;
 	dev->dma_device = &dd->pcidev->dev;
+	dev->class_dev.dev = dev->dma_device;
 	dev->query_device = ipath_query_device;
 	dev->modify_device = ipath_modify_device;
 	dev->query_port = ipath_query_port;
@@ -1774,6 +2132,8 @@
 err_lk:
 	kfree(idev->qp_table.table);
 err_qp:
+	kfree(idev->txreq_bufs);
+err_tx:
 	ib_dealloc_device(dev);
 	ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
 	idev = NULL;
@@ -1808,6 +2168,7 @@
 	ipath_free_all_qps(&dev->qp_table);
 	kfree(dev->qp_table.table);
 	kfree(dev->lk_table.table);
+	kfree(dev->txreq_bufs);
 	ib_dealloc_device(ibdev);
 }
 
@@ -1855,13 +2216,15 @@
 		      "RC stalls   %d\n"
 		      "piobuf wait %d\n"
 		      "no piobuf   %d\n"
+		      "unaligned   %d\n"
 		      "PKT drops   %d\n"
 		      "WQE errs    %d\n",
 		      dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
 		      dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
 		      dev->n_other_naks, dev->n_timeouts,
 		      dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
-		      dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
+		      dev->n_no_piobuf, dev->n_unaligned,
+		      dev->n_pkt_drops, dev->n_wqe_errs);
 	for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
 		const struct ipath_opcode_stats *si = &dev->opstats[i];
 
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 3d59736..6514aa8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -138,6 +138,11 @@
 	} u;
 } __attribute__ ((packed));
 
+struct ipath_pio_header {
+	__le32 pbc[2];
+	struct ipath_ib_header hdr;
+} __attribute__ ((packed));
+
 /*
  * There is one struct ipath_mcast for each multicast GID.
  * All attached QPs are then stored as a list of
@@ -319,6 +324,7 @@
 	struct ipath_sge *sg_list;      /* next SGE to be used if any */
 	struct ipath_sge sge;   /* progress state for the current SGE */
 	u8 num_sge;
+	u8 static_rate;
 };
 
 /*
@@ -356,6 +362,7 @@
 	struct tasklet_struct s_task;
 	struct ipath_mmap_info *ip;
 	struct ipath_sge_state *s_cur_sge;
+	struct ipath_verbs_txreq *s_tx;
 	struct ipath_sge_state s_sge;	/* current send request data */
 	struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
 	struct ipath_sge_state s_ack_rdma_sge;
@@ -363,7 +370,8 @@
 	struct ipath_sge_state r_sge;	/* current receive data */
 	spinlock_t s_lock;
 	unsigned long s_busy;
-	u32 s_hdrwords;		/* size of s_hdr in 32 bit words */
+	u16 s_pkt_delay;
+	u16 s_hdrwords;		/* size of s_hdr in 32 bit words */
 	u32 s_cur_size;		/* size of send packet in bytes */
 	u32 s_len;		/* total length of s_sge */
 	u32 s_rdma_read_len;	/* total length of s_rdma_read_sge */
@@ -387,7 +395,6 @@
 	u8 r_nak_state;		/* non-zero if NAK is pending */
 	u8 r_min_rnr_timer;	/* retry timeout value for RNR NAKs */
 	u8 r_reuse_sge;		/* for UC receive errors */
-	u8 r_sge_inx;		/* current index into sg_list */
 	u8 r_wrid_valid;	/* r_wrid set but CQ entry not yet made */
 	u8 r_max_rd_atomic;	/* max number of RDMA read/atomic to receive */
 	u8 r_head_ack_queue;	/* index into s_ack_queue[] */
@@ -403,6 +410,7 @@
 	u8 s_num_rd_atomic;	/* number of RDMA read/atomic pending */
 	u8 s_tail_ack_queue;	/* index into s_ack_queue[] */
 	u8 s_flags;
+	u8 s_dmult;
 	u8 timeout;		/* Timeout for this QP */
 	enum ib_mtu path_mtu;
 	u32 remote_qpn;
@@ -510,6 +518,8 @@
 	struct ipath_lkey_table lk_table;
 	struct list_head pending[3];	/* FIFO of QPs waiting for ACKs */
 	struct list_head piowait;	/* list for wait PIO buf */
+	struct list_head txreq_free;
+	void *txreq_bufs;
 	/* list of QPs waiting for RNR timer */
 	struct list_head rnrwait;
 	spinlock_t pending_lock;
@@ -570,6 +580,7 @@
 	u32 n_rdma_dup_busy;
 	u32 n_piowait;
 	u32 n_no_piobuf;
+	u32 n_unaligned;
 	u32 port_cap_flags;
 	u32 pma_sample_start;
 	u32 pma_sample_interval;
@@ -581,7 +592,6 @@
 	u16 pending_index;	/* which pending queue is active */
 	u8 pma_sample_status;
 	u8 subnet_timeout;
-	u8 link_width_enabled;
 	u8 vl_high_limit;
 	struct ipath_opcode_stats opstats[128];
 };
@@ -602,6 +612,16 @@
 	u32 vl15_dropped;
 };
 
+struct ipath_verbs_txreq {
+	struct ipath_qp         *qp;
+	struct ipath_swqe       *wqe;
+	u32                      map_len;
+	u32                      len;
+	struct ipath_sge_state  *ss;
+	struct ipath_pio_header  hdr;
+	struct ipath_sdma_txreq  txreq;
+};
+
 static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
 {
 	return container_of(ibmr, struct ipath_mr, ibmr);
@@ -694,11 +714,11 @@
 
 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
 
+unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
+
 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
 		     u32 hdrwords, struct ipath_sge_state *ss, u32 len);
 
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
-
 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
 
 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 7360bba..3557e7e 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -85,6 +85,82 @@
 	return get_sw_cqe(cq, cq->mcq.cons_index);
 }
 
+int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+	struct mlx4_ib_cq *mcq = to_mcq(cq);
+	struct mlx4_ib_dev *dev = to_mdev(cq->device);
+
+	return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
+}
+
+static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
+{
+	int err;
+
+	err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
+			     PAGE_SIZE * 2, &buf->buf);
+
+	if (err)
+		goto out;
+
+	err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
+				    &buf->mtt);
+	if (err)
+		goto err_buf;
+
+	err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
+	if (err)
+		goto err_mtt;
+
+	return 0;
+
+err_mtt:
+	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
+
+err_buf:
+	mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
+			      &buf->buf);
+
+out:
+	return err;
+}
+
+static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
+{
+	mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
+}
+
+static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
+			       struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
+			       u64 buf_addr, int cqe)
+{
+	int err;
+
+	*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
+			    IB_ACCESS_LOCAL_WRITE);
+	if (IS_ERR(*umem))
+		return PTR_ERR(*umem);
+
+	err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
+			    ilog2((*umem)->page_size), &buf->mtt);
+	if (err)
+		goto err_buf;
+
+	err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
+	if (err)
+		goto err_mtt;
+
+	return 0;
+
+err_mtt:
+	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
+
+err_buf:
+	ib_umem_release(*umem);
+
+	return err;
+}
+
 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
 				struct ib_ucontext *context,
 				struct ib_udata *udata)
@@ -92,7 +168,6 @@
 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
 	struct mlx4_ib_cq *cq;
 	struct mlx4_uar *uar;
-	int buf_size;
 	int err;
 
 	if (entries < 1 || entries > dev->dev->caps.max_cqes)
@@ -104,8 +179,10 @@
 
 	entries      = roundup_pow_of_two(entries + 1);
 	cq->ibcq.cqe = entries - 1;
-	buf_size     = entries * sizeof (struct mlx4_cqe);
+	mutex_init(&cq->resize_mutex);
 	spin_lock_init(&cq->lock);
+	cq->resize_buf = NULL;
+	cq->resize_umem = NULL;
 
 	if (context) {
 		struct mlx4_ib_create_cq ucmd;
@@ -115,21 +192,10 @@
 			goto err_cq;
 		}
 
-		cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size,
-				       IB_ACCESS_LOCAL_WRITE);
-		if (IS_ERR(cq->umem)) {
-			err = PTR_ERR(cq->umem);
+		err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
+					  ucmd.buf_addr, entries);
+		if (err)
 			goto err_cq;
-		}
-
-		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem),
-				    ilog2(cq->umem->page_size), &cq->buf.mtt);
-		if (err)
-			goto err_buf;
-
-		err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem);
-		if (err)
-			goto err_mtt;
 
 		err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
 					  &cq->db);
@@ -147,19 +213,9 @@
 		*cq->mcq.set_ci_db = 0;
 		*cq->mcq.arm_db    = 0;
 
-		if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) {
-			err = -ENOMEM;
+		err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
+		if (err)
 			goto err_db;
-		}
-
-		err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift,
-				    &cq->buf.mtt);
-		if (err)
-			goto err_buf;
-
-		err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf);
-		if (err)
-			goto err_mtt;
 
 		uar = &dev->priv_uar;
 	}
@@ -187,12 +243,10 @@
 err_mtt:
 	mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
 
-err_buf:
 	if (context)
 		ib_umem_release(cq->umem);
 	else
-		mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe),
-			      &cq->buf.buf);
+		mlx4_ib_free_cq_buf(dev, &cq->buf, entries);
 
 err_db:
 	if (!context)
@@ -204,6 +258,170 @@
 	return ERR_PTR(err);
 }
 
+static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
+				  int entries)
+{
+	int err;
+
+	if (cq->resize_buf)
+		return -EBUSY;
+
+	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+	if (!cq->resize_buf)
+		return -ENOMEM;
+
+	err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
+	if (err) {
+		kfree(cq->resize_buf);
+		cq->resize_buf = NULL;
+		return err;
+	}
+
+	cq->resize_buf->cqe = entries - 1;
+
+	return 0;
+}
+
+static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
+				   int entries, struct ib_udata *udata)
+{
+	struct mlx4_ib_resize_cq ucmd;
+	int err;
+
+	if (cq->resize_umem)
+		return -EBUSY;
+
+	if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+		return -EFAULT;
+
+	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+	if (!cq->resize_buf)
+		return -ENOMEM;
+
+	err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
+				  &cq->resize_umem, ucmd.buf_addr, entries);
+	if (err) {
+		kfree(cq->resize_buf);
+		cq->resize_buf = NULL;
+		return err;
+	}
+
+	cq->resize_buf->cqe = entries - 1;
+
+	return 0;
+}
+
+static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
+{
+	u32 i;
+
+	i = cq->mcq.cons_index;
+	while (get_sw_cqe(cq, i & cq->ibcq.cqe))
+		++i;
+
+	return i - cq->mcq.cons_index;
+}
+
+static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
+{
+	struct mlx4_cqe *cqe;
+	int i;
+
+	i = cq->mcq.cons_index;
+	cqe = get_cqe(cq, i & cq->ibcq.cqe);
+	while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
+		memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
+					(i + 1) & cq->resize_buf->cqe),
+			get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
+		cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
+	}
+	++cq->mcq.cons_index;
+}
+
+int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+	struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
+	struct mlx4_ib_cq *cq = to_mcq(ibcq);
+	int outst_cqe;
+	int err;
+
+	mutex_lock(&cq->resize_mutex);
+
+	if (entries < 1 || entries > dev->dev->caps.max_cqes) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	entries = roundup_pow_of_two(entries + 1);
+	if (entries == ibcq->cqe + 1) {
+		err = 0;
+		goto out;
+	}
+
+	if (ibcq->uobject) {
+		err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
+		if (err)
+			goto out;
+	} else {
+		/* Can't be smaller then the number of outstanding CQEs */
+		outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
+		if (entries < outst_cqe + 1) {
+			err = 0;
+			goto out;
+		}
+
+		err = mlx4_alloc_resize_buf(dev, cq, entries);
+		if (err)
+			goto out;
+	}
+
+	err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
+	if (err)
+		goto err_buf;
+
+	if (ibcq->uobject) {
+		cq->buf      = cq->resize_buf->buf;
+		cq->ibcq.cqe = cq->resize_buf->cqe;
+		ib_umem_release(cq->umem);
+		cq->umem     = cq->resize_umem;
+
+		kfree(cq->resize_buf);
+		cq->resize_buf = NULL;
+		cq->resize_umem = NULL;
+	} else {
+		spin_lock_irq(&cq->lock);
+		if (cq->resize_buf) {
+			mlx4_ib_cq_resize_copy_cqes(cq);
+			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+			cq->buf      = cq->resize_buf->buf;
+			cq->ibcq.cqe = cq->resize_buf->cqe;
+
+			kfree(cq->resize_buf);
+			cq->resize_buf = NULL;
+		}
+		spin_unlock_irq(&cq->lock);
+	}
+
+	goto out;
+
+err_buf:
+	if (!ibcq->uobject)
+		mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
+				    cq->resize_buf->cqe);
+
+	kfree(cq->resize_buf);
+	cq->resize_buf = NULL;
+
+	if (cq->resize_umem) {
+		ib_umem_release(cq->resize_umem);
+		cq->resize_umem = NULL;
+	}
+
+out:
+	mutex_unlock(&cq->resize_mutex);
+	return err;
+}
+
 int mlx4_ib_destroy_cq(struct ib_cq *cq)
 {
 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
@@ -216,8 +434,7 @@
 		mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
 		ib_umem_release(mcq->umem);
 	} else {
-		mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe),
-			      &mcq->buf.buf);
+		mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
 		mlx4_ib_db_free(dev, &mcq->db);
 	}
 
@@ -297,6 +514,20 @@
 	wc->vendor_err = cqe->vendor_err_syndrome;
 }
 
+static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum)
+{
+	return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4	|
+				      MLX4_CQE_IPOIB_STATUS_IPV4F	|
+				      MLX4_CQE_IPOIB_STATUS_IPV4OPT	|
+				      MLX4_CQE_IPOIB_STATUS_IPV6	|
+				      MLX4_CQE_IPOIB_STATUS_IPOK)) ==
+		cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4	|
+			    MLX4_CQE_IPOIB_STATUS_IPOK))		&&
+		(status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP	|
+				      MLX4_CQE_IPOIB_STATUS_TCP))	&&
+		checksum == cpu_to_be16(0xffff);
+}
+
 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 			    struct mlx4_ib_qp **cur_qp,
 			    struct ib_wc *wc)
@@ -310,6 +541,7 @@
 	u32 g_mlpath_rqpn;
 	u16 wqe_ctr;
 
+repoll:
 	cqe = next_cqe_sw(cq);
 	if (!cqe)
 		return -EAGAIN;
@@ -332,6 +564,22 @@
 		return -EINVAL;
 	}
 
+	/* Resize CQ in progress */
+	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
+		if (cq->resize_buf) {
+			struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
+
+			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+			cq->buf      = cq->resize_buf->buf;
+			cq->ibcq.cqe = cq->resize_buf->cqe;
+
+			kfree(cq->resize_buf);
+			cq->resize_buf = NULL;
+		}
+
+		goto repoll;
+	}
+
 	if (!*cur_qp ||
 	    (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
 		/*
@@ -406,6 +654,9 @@
 		case MLX4_OPCODE_BIND_MW:
 			wc->opcode    = IB_WC_BIND_MW;
 			break;
+		case MLX4_OPCODE_LSO:
+			wc->opcode    = IB_WC_LSO;
+			break;
 		}
 	} else {
 		wc->byte_len = be32_to_cpu(cqe->byte_cnt);
@@ -434,6 +685,8 @@
 		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
 		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
+		wc->csum_ok	   = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status,
+							   cqe->checksum);
 	}
 
 	return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 0ed02b7..4c1e72f 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -165,7 +165,7 @@
 			event.device	       = ibdev;
 			event.element.port_num = port_num;
 
-			if(pinfo->clientrereg_resv_subnetto & 0x80)
+			if (pinfo->clientrereg_resv_subnetto & 0x80)
 				event.event    = IB_EVENT_CLIENT_REREGISTER;
 			else
 				event.event    = IB_EVENT_LID_CHANGE;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 96a39b5..136c76c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -44,8 +44,8 @@
 #include "user.h"
 
 #define DRV_NAME	"mlx4_ib"
-#define DRV_VERSION	"0.01"
-#define DRV_RELDATE	"May 1, 2006"
+#define DRV_VERSION	"1.0"
+#define DRV_RELDATE	"April 4, 2008"
 
 MODULE_AUTHOR("Roland Dreier");
 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -99,6 +99,10 @@
 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
 		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
+	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
+		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+	if (dev->dev->caps.max_gso_sz)
+		props->device_cap_flags |= IB_DEVICE_UD_TSO;
 
 	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
 		0xffffff;
@@ -567,6 +571,7 @@
 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
+		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
@@ -605,6 +610,8 @@
 	ibdev->ib_dev.post_send		= mlx4_ib_post_send;
 	ibdev->ib_dev.post_recv		= mlx4_ib_post_recv;
 	ibdev->ib_dev.create_cq		= mlx4_ib_create_cq;
+	ibdev->ib_dev.modify_cq		= mlx4_ib_modify_cq;
+	ibdev->ib_dev.resize_cq		= mlx4_ib_resize_cq;
 	ibdev->ib_dev.destroy_cq	= mlx4_ib_destroy_cq;
 	ibdev->ib_dev.poll_cq		= mlx4_ib_poll_cq;
 	ibdev->ib_dev.req_notify_cq	= mlx4_ib_arm_cq;
@@ -675,18 +682,20 @@
 }
 
 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
-			  enum mlx4_dev_event event, int subtype,
-			  int port)
+			  enum mlx4_dev_event event, int port)
 {
 	struct ib_event ibev;
 
 	switch (event) {
-	case MLX4_EVENT_TYPE_PORT_CHANGE:
-		ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
-			IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+	case MLX4_DEV_EVENT_PORT_UP:
+		ibev.event = IB_EVENT_PORT_ACTIVE;
 		break;
 
-	case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR:
+	case MLX4_DEV_EVENT_PORT_DOWN:
+		ibev.event = IB_EVENT_PORT_ERR;
+		break;
+
+	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
 		ibev.event = IB_EVENT_DEVICE_FATAL;
 		break;
 
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 3726e45..9e63732 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -78,13 +78,21 @@
 	struct mlx4_mtt		mtt;
 };
 
+struct mlx4_ib_cq_resize {
+	struct mlx4_ib_cq_buf	buf;
+	int			cqe;
+};
+
 struct mlx4_ib_cq {
 	struct ib_cq		ibcq;
 	struct mlx4_cq		mcq;
 	struct mlx4_ib_cq_buf	buf;
+	struct mlx4_ib_cq_resize *resize_buf;
 	struct mlx4_ib_db	db;
 	spinlock_t		lock;
+	struct mutex		resize_mutex;
 	struct ib_umem	       *umem;
+	struct ib_umem	       *resize_umem;
 };
 
 struct mlx4_ib_mr {
@@ -110,6 +118,10 @@
 	unsigned		tail;
 };
 
+enum mlx4_ib_qp_flags {
+	MLX4_IB_QP_LSO		= 1 << 0
+};
+
 struct mlx4_ib_qp {
 	struct ib_qp		ibqp;
 	struct mlx4_qp		mqp;
@@ -129,6 +141,7 @@
 	struct mlx4_mtt		mtt;
 	int			buf_size;
 	struct mutex		mutex;
+	u32			flags;
 	u8			port;
 	u8			alt_port;
 	u8			atomic_rd_en;
@@ -249,6 +262,8 @@
 				  struct ib_udata *udata);
 int mlx4_ib_dereg_mr(struct ib_mr *mr);
 
+int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
 				struct ib_ucontext *context,
 				struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 958e205..b75efae7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,6 +71,7 @@
 
 static const __be32 mlx4_ib_opcode[] = {
 	[IB_WR_SEND]			= __constant_cpu_to_be32(MLX4_OPCODE_SEND),
+	[IB_WR_LSO]			= __constant_cpu_to_be32(MLX4_OPCODE_LSO),
 	[IB_WR_SEND_WITH_IMM]		= __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
 	[IB_WR_RDMA_WRITE]		= __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
 	[IB_WR_RDMA_WRITE_WITH_IMM]	= __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
@@ -122,7 +123,7 @@
  */
 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
 {
-	u32 *wqe;
+	__be32 *wqe;
 	int i;
 	int s;
 	int ind;
@@ -143,7 +144,7 @@
 		buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
 		for (i = 64; i < s; i += 64) {
 			wqe = buf + i;
-			*wqe = 0xffffffff;
+			*wqe = cpu_to_be32(0xffffffff);
 		}
 	}
 }
@@ -242,7 +243,7 @@
 	}
 }
 
-static int send_wqe_overhead(enum ib_qp_type type)
+static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
 {
 	/*
 	 * UD WQEs must have a datagram segment.
@@ -253,7 +254,8 @@
 	switch (type) {
 	case IB_QPT_UD:
 		return sizeof (struct mlx4_wqe_ctrl_seg) +
-			sizeof (struct mlx4_wqe_datagram_seg);
+			sizeof (struct mlx4_wqe_datagram_seg) +
+			((flags & MLX4_IB_QP_LSO) ? 64 : 0);
 	case IB_QPT_UC:
 		return sizeof (struct mlx4_wqe_ctrl_seg) +
 			sizeof (struct mlx4_wqe_raddr_seg);
@@ -315,7 +317,7 @@
 	/* Sanity check SQ size before proceeding */
 	if (cap->max_send_wr	 > dev->dev->caps.max_wqes  ||
 	    cap->max_send_sge	 > dev->dev->caps.max_sq_sg ||
-	    cap->max_inline_data + send_wqe_overhead(type) +
+	    cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
 	    sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
 		return -EINVAL;
 
@@ -329,7 +331,7 @@
 
 	s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
 		cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
-		send_wqe_overhead(type);
+		send_wqe_overhead(type, qp->flags);
 
 	/*
 	 * Hermon supports shrinking WQEs, such that a single work
@@ -394,7 +396,8 @@
 	}
 
 	qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
-			 send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg);
+			 send_wqe_overhead(type, qp->flags)) /
+		sizeof (struct mlx4_wqe_data_seg);
 
 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
 		(qp->sq.wqe_cnt << qp->sq.wqe_shift);
@@ -503,6 +506,9 @@
 	} else {
 		qp->sq_no_prefetch = 0;
 
+		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
+			qp->flags |= MLX4_IB_QP_LSO;
+
 		err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
 		if (err)
 			goto err;
@@ -673,6 +679,13 @@
 	struct mlx4_ib_qp *qp;
 	int err;
 
+	/* We only support LSO, and only for kernel UD QPs. */
+	if (init_attr->create_flags & ~IB_QP_CREATE_IPOIB_UD_LSO)
+		return ERR_PTR(-EINVAL);
+	if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO &&
+	    (pd->uobject || init_attr->qp_type != IB_QPT_UD))
+		return ERR_PTR(-EINVAL);
+
 	switch (init_attr->qp_type) {
 	case IB_QPT_RC:
 	case IB_QPT_UC:
@@ -876,10 +889,15 @@
 		}
 	}
 
-	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
-	    ibqp->qp_type == IB_QPT_UD)
+	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
 		context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
-	else if (attr_mask & IB_QP_PATH_MTU) {
+	else if (ibqp->qp_type == IB_QPT_UD) {
+		if (qp->flags & MLX4_IB_QP_LSO)
+			context->mtu_msgmax = (IB_MTU_4096 << 5) |
+					      ilog2(dev->dev->caps.max_gso_sz);
+		else
+			context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
+	} else if (attr_mask & IB_QP_PATH_MTU) {
 		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
 			printk(KERN_ERR "path MTU (%u) is invalid\n",
 			       attr->path_mtu);
@@ -1182,7 +1200,7 @@
 }
 
 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
-			    void *wqe)
+			    void *wqe, unsigned *mlx_seg_len)
 {
 	struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev;
 	struct mlx4_wqe_mlx_seg *mlx = wqe;
@@ -1231,7 +1249,7 @@
 	case IB_WR_SEND_WITH_IMM:
 		sqp->ud_header.bth.opcode	 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
 		sqp->ud_header.immediate_present = 1;
-		sqp->ud_header.immediate_data    = wr->imm_data;
+		sqp->ud_header.immediate_data    = wr->ex.imm_data;
 		break;
 	default:
 		return -EINVAL;
@@ -1303,7 +1321,9 @@
 		i = 2;
 	}
 
-	return ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
+	*mlx_seg_len =
+		ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
+	return 0;
 }
 
 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
@@ -1396,6 +1416,34 @@
 	dseg->addr       = cpu_to_be64(sg->addr);
 }
 
+static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
+			 struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
+{
+	unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
+
+	/*
+	 * This is a temporary limitation and will be removed in
+	 * a forthcoming FW release:
+	 */
+	if (unlikely(halign > 64))
+		return -EINVAL;
+
+	if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
+		     wr->num_sge > qp->sq.max_gs - (halign >> 4)))
+		return -EINVAL;
+
+	memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+
+	/* make sure LSO header is written before overwriting stamping */
+	wmb();
+
+	wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
+					wr->wr.ud.hlen);
+
+	*lso_seg_len = halign;
+	return 0;
+}
+
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		      struct ib_send_wr **bad_wr)
 {
@@ -1409,6 +1457,7 @@
 	unsigned ind;
 	int uninitialized_var(stamp);
 	int uninitialized_var(size);
+	unsigned seglen;
 	int i;
 
 	spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1436,11 +1485,14 @@
 			 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
 			(wr->send_flags & IB_SEND_SOLICITED ?
 			 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
+			((wr->send_flags & IB_SEND_IP_CSUM) ?
+			 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+				     MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
 			qp->sq_signal_bits;
 
 		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
 		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
-			ctrl->imm = wr->imm_data;
+			ctrl->imm = wr->ex.imm_data;
 		else
 			ctrl->imm = 0;
 
@@ -1484,19 +1536,27 @@
 			set_datagram_seg(wqe, wr);
 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
+
+			if (wr->opcode == IB_WR_LSO) {
+				err = build_lso_seg(wqe, wr, qp, &seglen);
+				if (unlikely(err)) {
+					*bad_wr = wr;
+					goto out;
+				}
+				wqe  += seglen;
+				size += seglen / 16;
+			}
 			break;
 
 		case IB_QPT_SMI:
 		case IB_QPT_GSI:
-			err = build_mlx_header(to_msqp(qp), wr, ctrl);
-			if (err < 0) {
+			err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
+			if (unlikely(err)) {
 				*bad_wr = wr;
 				goto out;
 			}
-			wqe  += err;
-			size += err / 16;
-
-			err = 0;
+			wqe  += seglen;
+			size += seglen / 16;
 			break;
 
 		default:
@@ -1725,7 +1785,9 @@
 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
 	struct mlx4_qp_context context;
 	int mlx4_state;
-	int err;
+	int err = 0;
+
+	mutex_lock(&qp->mutex);
 
 	if (qp->state == IB_QPS_RESET) {
 		qp_attr->qp_state = IB_QPS_RESET;
@@ -1733,12 +1795,15 @@
 	}
 
 	err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
-	if (err)
-		return -EINVAL;
+	if (err) {
+		err = -EINVAL;
+		goto out;
+	}
 
 	mlx4_state = be32_to_cpu(context.flags) >> 28;
 
-	qp_attr->qp_state	     = to_ib_qp_state(mlx4_state);
+	qp->state		     = to_ib_qp_state(mlx4_state);
+	qp_attr->qp_state	     = qp->state;
 	qp_attr->path_mtu	     = context.mtu_msgmax >> 5;
 	qp_attr->path_mig_state	     =
 		to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
@@ -1797,6 +1862,8 @@
 
 	qp_init_attr->cap	     = qp_attr->cap;
 
-	return 0;
+out:
+	mutex_unlock(&qp->mutex);
+	return err;
 }
 
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 09a30dd..54d230e 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -219,7 +219,7 @@
 	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
 					       (1 << HCA_E_BIT)                 |
 					       (op_modifier << HCR_OPMOD_SHIFT) |
-					        op),                      ptr + offs[6]);
+						op),			  ptr + offs[6]);
 	wmb();
 	__raw_writel((__force u32) 0,                                     ptr + offs[7]);
 	wmb();
@@ -1339,6 +1339,10 @@
 	/* Check port for UD address vector: */
 	*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1);
 
+	/* Enable IPoIB checksumming if we can: */
+	if (dev->device_cap_flags & IB_DEVICE_UD_IP_CSUM)
+		*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(7 << 3);
+
 	/* We leave wqe_quota, responder_exu, etc as 0 (default) */
 
 	/* QPC/EEC/CQC/EQC/RDB attributes */
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 2f976f2..8928ca4 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -103,6 +103,7 @@
 	DEV_LIM_FLAG_RAW_IPV6           = 1 << 4,
 	DEV_LIM_FLAG_RAW_ETHER          = 1 << 5,
 	DEV_LIM_FLAG_SRQ                = 1 << 6,
+	DEV_LIM_FLAG_IPOIB_CSUM		= 1 << 7,
 	DEV_LIM_FLAG_BAD_PKEY_CNTR      = 1 << 8,
 	DEV_LIM_FLAG_BAD_QKEY_CNTR      = 1 << 9,
 	DEV_LIM_FLAG_MW                 = 1 << 16,
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 1e1e336..20401d2 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -119,7 +119,8 @@
 	__be32 my_qpn;
 	__be32 my_ee;
 	__be32 rqpn;
-	__be16 sl_g_mlpath;
+	u8     sl_ipok;
+	u8     g_mlpath;
 	__be16 rlid;
 	__be32 imm_etype_pkey_eec;
 	__be32 byte_cnt;
@@ -493,6 +494,7 @@
 	int is_send;
 	int free_cqe = 1;
 	int err = 0;
+	u16 checksum;
 
 	cqe = next_cqe_sw(cq);
 	if (!cqe)
@@ -635,12 +637,14 @@
 			break;
 		}
 		entry->slid 	   = be16_to_cpu(cqe->rlid);
-		entry->sl   	   = be16_to_cpu(cqe->sl_g_mlpath) >> 12;
+		entry->sl   	   = cqe->sl_ipok >> 4;
 		entry->src_qp 	   = be32_to_cpu(cqe->rqpn) & 0xffffff;
-		entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;
+		entry->dlid_path_bits = cqe->g_mlpath & 0x7f;
 		entry->pkey_index  = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
-		entry->wc_flags   |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?
-					IB_WC_GRH : 0;
+		entry->wc_flags   |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0;
+		checksum = (be32_to_cpu(cqe->rqpn) >> 24) |
+				((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00);
+		entry->csum_ok = (cqe->sl_ipok & 1 && checksum == 0xffff);
 	}
 
 	entry->status = IB_WC_SUCCESS;
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 7bbdd1f..0e842e0 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -54,8 +54,8 @@
 
 #define DRV_NAME	"ib_mthca"
 #define PFX		DRV_NAME ": "
-#define DRV_VERSION	"0.08"
-#define DRV_RELDATE	"February 14, 2006"
+#define DRV_VERSION	"1.0"
+#define DRV_RELDATE	"April 4, 2008"
 
 enum {
 	MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
@@ -390,11 +390,11 @@
 	do {                                                          \
 		void *__p = (char *) (source) + (offset);             \
 		switch (sizeof (dest)) {                              \
-			case 1: (dest) = *(u8 *) __p;       break;    \
-			case 2: (dest) = be16_to_cpup(__p); break;    \
-			case 4: (dest) = be32_to_cpup(__p); break;    \
-			case 8: (dest) = be64_to_cpup(__p); break;    \
-			default: __buggy_use_of_MTHCA_GET();          \
+		case 1: (dest) = *(u8 *) __p;       break;	      \
+		case 2: (dest) = be16_to_cpup(__p); break;	      \
+		case 4: (dest) = be32_to_cpup(__p); break;	      \
+		case 8: (dest) = be64_to_cpup(__p); break;	      \
+		default: __buggy_use_of_MTHCA_GET();		      \
 		}                                                     \
 	} while (0)
 
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index b60eb5d..8bde7f9 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -232,9 +232,9 @@
 	return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
 }
 
-static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq)
+static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
 {
-	struct mthca_eqe* eqe;
+	struct mthca_eqe *eqe;
 	eqe = get_eqe(eq, eq->cons_index);
 	return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
 }
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index acfa41d..8b7e83e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -125,7 +125,7 @@
 			event.device           = ibdev;
 			event.element.port_num = port_num;
 
-			if(pinfo->clientrereg_resv_subnetto & 0x80)
+			if (pinfo->clientrereg_resv_subnetto & 0x80)
 				event.event    = IB_EVENT_CLIENT_REREGISTER;
 			else
 				event.event    = IB_EVENT_LID_CHANGE;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index cd3d8ad..9ebadd6 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -267,11 +267,16 @@
 	if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
 		mdev->mthca_flags |= MTHCA_FLAG_SRQ;
 
+	if (mthca_is_memfree(mdev))
+		if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
+			mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+
 	return 0;
 }
 
 static int mthca_init_tavor(struct mthca_dev *mdev)
 {
+	s64 size;
 	u8 status;
 	int err;
 	struct mthca_dev_lim        dev_lim;
@@ -324,9 +329,11 @@
 	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
 		profile.num_srq = dev_lim.max_srqs;
 
-	err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
-	if (err < 0)
+	size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
+	if (size < 0) {
+		err = size;
 		goto err_disable;
+	}
 
 	err = mthca_INIT_HCA(mdev, &init_hca, &status);
 	if (err) {
@@ -605,7 +612,7 @@
 	struct mthca_dev_lim        dev_lim;
 	struct mthca_profile        profile;
 	struct mthca_init_hca_param init_hca;
-	u64 icm_size;
+	s64 icm_size;
 	u8 status;
 	int err;
 
@@ -653,7 +660,7 @@
 		profile.num_srq = dev_lim.max_srqs;
 
 	icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
-	if ((int) icm_size < 0) {
+	if (icm_size < 0) {
 		err = icm_size;
 		goto err_stop_fw;
 	}
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 252db08..b224079 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -359,12 +359,14 @@
 					      int use_lowmem, int use_coherent)
 {
 	struct mthca_icm_table *table;
+	int obj_per_chunk;
 	int num_icm;
 	unsigned chunk_size;
 	int i;
 	u8 status;
 
-	num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;
+	obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
+	num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
 
 	table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
 	if (!table)
@@ -412,7 +414,7 @@
 		if (table->icm[i]) {
 			mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
 					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
-				        &status);
+					&status);
 			mthca_free_icm(dev, table->icm[i], table->coherent);
 		}
 
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 26bf86d..605a8d5 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -63,7 +63,7 @@
 	MTHCA_NUM_PDS = 1 << 15
 };
 
-u64 mthca_make_profile(struct mthca_dev *dev,
+s64 mthca_make_profile(struct mthca_dev *dev,
 		       struct mthca_profile *request,
 		       struct mthca_dev_lim *dev_lim,
 		       struct mthca_init_hca_param *init_hca)
@@ -77,7 +77,7 @@
 	};
 
 	u64 mem_base, mem_avail;
-	u64 total_size = 0;
+	s64 total_size = 0;
 	struct mthca_resource *profile;
 	struct mthca_resource tmp;
 	int i, j;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h
index 9464180..e76cb62 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.h
+++ b/drivers/infiniband/hw/mthca/mthca_profile.h
@@ -53,7 +53,7 @@
 	int fmr_reserved_mtts;
 };
 
-u64 mthca_make_profile(struct mthca_dev *mdev,
+s64 mthca_make_profile(struct mthca_dev *mdev,
 		       struct mthca_profile *request,
 		       struct mthca_dev_lim *dev_lim,
 		       struct mthca_init_hca_param *init_hca);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 9e491df..81b257e 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -60,7 +60,7 @@
 	struct ib_smp *in_mad  = NULL;
 	struct ib_smp *out_mad = NULL;
 	int err = -ENOMEM;
-	struct mthca_dev* mdev = to_mdev(ibdev);
+	struct mthca_dev *mdev = to_mdev(ibdev);
 
 	u8 status;
 
@@ -540,6 +540,9 @@
 	struct mthca_qp *qp;
 	int err;
 
+	if (init_attr->create_flags)
+		return ERR_PTR(-EINVAL);
+
 	switch (init_attr->qp_type) {
 	case IB_QPT_RC:
 	case IB_QPT_UC:
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index db5595b..09dc361 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -437,29 +437,34 @@
 	int mthca_state;
 	u8 status;
 
+	mutex_lock(&qp->mutex);
+
 	if (qp->state == IB_QPS_RESET) {
 		qp_attr->qp_state = IB_QPS_RESET;
 		goto done;
 	}
 
 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
-	if (IS_ERR(mailbox))
-		return PTR_ERR(mailbox);
+	if (IS_ERR(mailbox)) {
+		err = PTR_ERR(mailbox);
+		goto out;
+	}
 
 	err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
 	if (err)
-		goto out;
+		goto out_mailbox;
 	if (status) {
 		mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
 		err = -EINVAL;
-		goto out;
+		goto out_mailbox;
 	}
 
 	qp_param    = mailbox->buf;
 	context     = &qp_param->context;
 	mthca_state = be32_to_cpu(context->flags) >> 28;
 
-	qp_attr->qp_state 	     = to_ib_qp_state(mthca_state);
+	qp->state		     = to_ib_qp_state(mthca_state);
+	qp_attr->qp_state	     = qp->state;
 	qp_attr->path_mtu 	     = context->mtu_msgmax >> 5;
 	qp_attr->path_mig_state      =
 		to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
@@ -506,8 +511,11 @@
 
 	qp_init_attr->cap	     = qp_attr->cap;
 
-out:
+out_mailbox:
 	mthca_free_mailbox(dev, mailbox);
+
+out:
+	mutex_unlock(&qp->mutex);
 	return err;
 }
 
@@ -1532,7 +1540,7 @@
 	case IB_WR_SEND_WITH_IMM:
 		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
 		sqp->ud_header.immediate_present = 1;
-		sqp->ud_header.immediate_data = wr->imm_data;
+		sqp->ud_header.immediate_data = wr->ex.imm_data;
 		break;
 	default:
 		return -EINVAL;
@@ -1679,7 +1687,7 @@
 			cpu_to_be32(1);
 		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
 		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
-			((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
+			((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
 
 		wqe += sizeof (struct mthca_next_seg);
 		size = sizeof (struct mthca_next_seg) / 16;
@@ -2015,10 +2023,12 @@
 			 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
 			((wr->send_flags & IB_SEND_SOLICITED) ?
 			 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
+			((wr->send_flags & IB_SEND_IP_CSUM) ?
+			 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
 			cpu_to_be32(1);
 		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
 		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
-			((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
+			((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
 
 		wqe += sizeof (struct mthca_next_seg);
 		size = sizeof (struct mthca_next_seg) / 16;
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
index f6a66fe..b3551a8 100644
--- a/drivers/infiniband/hw/mthca/mthca_wqe.h
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -38,14 +38,16 @@
 #include <linux/types.h>
 
 enum {
-	MTHCA_NEXT_DBD       = 1 << 7,
-	MTHCA_NEXT_FENCE     = 1 << 6,
-	MTHCA_NEXT_CQ_UPDATE = 1 << 3,
-	MTHCA_NEXT_EVENT_GEN = 1 << 2,
-	MTHCA_NEXT_SOLICIT   = 1 << 1,
+	MTHCA_NEXT_DBD		= 1 << 7,
+	MTHCA_NEXT_FENCE	= 1 << 6,
+	MTHCA_NEXT_CQ_UPDATE	= 1 << 3,
+	MTHCA_NEXT_EVENT_GEN	= 1 << 2,
+	MTHCA_NEXT_SOLICIT	= 1 << 1,
+	MTHCA_NEXT_IP_CSUM	= 1 << 4,
+	MTHCA_NEXT_TCP_UDP_CSUM = 1 << 5,
 
-	MTHCA_MLX_VL15       = 1 << 17,
-	MTHCA_MLX_SLR        = 1 << 16
+	MTHCA_MLX_VL15		= 1 << 17,
+	MTHCA_MLX_SLR		= 1 << 16
 };
 
 enum {
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b2112f5..b00b0e3 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -65,7 +65,6 @@
 MODULE_VERSION(DRV_VERSION);
 
 int max_mtu = 9000;
-int nics_per_function = 1;
 int interrupt_mod_interval = 0;
 
 
@@ -93,15 +92,9 @@
 MODULE_PARM_DESC(debug_level, "Enable debug output level");
 
 LIST_HEAD(nes_adapter_list);
-LIST_HEAD(nes_dev_list);
+static LIST_HEAD(nes_dev_list);
 
 atomic_t qps_destroyed;
-atomic_t cqp_reqs_allocated;
-atomic_t cqp_reqs_freed;
-atomic_t cqp_reqs_dynallocated;
-atomic_t cqp_reqs_dynfreed;
-atomic_t cqp_reqs_queued;
-atomic_t cqp_reqs_redriven;
 
 static void nes_print_macaddr(struct net_device *netdev);
 static irqreturn_t nes_interrupt(int, void *);
@@ -310,7 +303,7 @@
 
 	if (atomic_read(&nesqp->refcount) == 0) {
 		printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n",
-				__FUNCTION__, ibqp->qp_num, nesqp->last_aeq);
+				__func__, ibqp->qp_num, nesqp->last_aeq);
 		BUG();
 	}
 
@@ -751,13 +744,13 @@
 
 	list_del(&nesdev->list);
 	nes_destroy_cqp(nesdev);
+
+	free_irq(pcidev->irq, nesdev);
 	tasklet_kill(&nesdev->dpc_tasklet);
 
 	/* Deallocate the Adapter Structure */
 	nes_destroy_adapter(nesdev->nesadapter);
 
-	free_irq(pcidev->irq, nesdev);
-
 	if (nesdev->msi_enabled) {
 		pci_disable_msi(pcidev);
 	}
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index a48b288..1626124 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -143,12 +143,12 @@
 #ifdef CONFIG_INFINIBAND_NES_DEBUG
 #define nes_debug(level, fmt, args...) \
 	if (level & nes_debug_level) \
-		printk(KERN_ERR PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args)
+		printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args)
 
 #define assert(expr)                                                \
 if (!(expr)) {                                                       \
 	printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n",  \
-		   #expr, __FILE__, __FUNCTION__, __LINE__);                \
+		   #expr, __FILE__, __func__, __LINE__);                \
 }
 
 #define NES_EVENT_TIMEOUT   1200000
@@ -166,7 +166,6 @@
 #include "nes_cm.h"
 
 extern int max_mtu;
-extern int nics_per_function;
 #define max_frame_len (max_mtu+ETH_HLEN)
 extern int interrupt_mod_interval;
 extern int nes_if_count;
@@ -177,9 +176,6 @@
 extern unsigned int nes_debug_level;
 
 extern struct list_head nes_adapter_list;
-extern struct list_head nes_dev_list;
-
-extern struct nes_cm_core *g_cm_core;
 
 extern atomic_t cm_connects;
 extern atomic_t cm_accepts;
@@ -209,7 +205,6 @@
 extern atomic_t cm_accel_dropped_pkts;
 extern atomic_t cm_resets_recvd;
 
-extern u32 crit_err_count;
 extern u32 int_mod_timer_init;
 extern u32 int_mod_cq_depth_256;
 extern u32 int_mod_cq_depth_128;
@@ -219,14 +214,6 @@
 extern u32 int_mod_cq_depth_4;
 extern u32 int_mod_cq_depth_1;
 
-extern atomic_t cqp_reqs_allocated;
-extern atomic_t cqp_reqs_freed;
-extern atomic_t cqp_reqs_dynallocated;
-extern atomic_t cqp_reqs_dynfreed;
-extern atomic_t cqp_reqs_queued;
-extern atomic_t cqp_reqs_redriven;
-
-
 struct nes_device {
 	struct nes_adapter	   *nesadapter;
 	void __iomem           *regs;
@@ -412,7 +399,7 @@
 	if (resource_num >= max_resources) {
 		resource_num = find_first_zero_bit(resource_array, max_resources);
 		if (resource_num >= max_resources) {
-			printk(KERN_ERR PFX "%s: No available resourcess.\n", __FUNCTION__);
+			printk(KERN_ERR PFX "%s: No available resourcess.\n", __func__);
 			spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
 			return -EMFILE;
 		}
@@ -510,9 +497,6 @@
 /* nes_hw.c */
 struct nes_adapter *nes_init_adapter(struct nes_device *, u8);
 void  nes_nic_init_timer_defaults(struct nes_device *, u8);
-unsigned int nes_reset_adapter_ne020(struct nes_device *, u8 *);
-int nes_init_serdes(struct nes_device *, u8, u8, u8);
-void nes_init_csr_ne020(struct nes_device *, u8, u8);
 void nes_destroy_adapter(struct nes_adapter *);
 int nes_init_cqp(struct nes_device *);
 int nes_init_phy(struct nes_device *);
@@ -520,20 +504,12 @@
 void nes_destroy_nic_qp(struct nes_vnic *);
 int nes_napi_isr(struct nes_device *);
 void nes_dpc(unsigned long);
-void nes_process_ceq(struct nes_device *, struct nes_hw_ceq *);
-void nes_process_aeq(struct nes_device *, struct nes_hw_aeq *);
-void nes_process_mac_intr(struct nes_device *, u32);
-void nes_nic_napi_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
 void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
-void nes_cqp_ce_handler(struct nes_device *, struct nes_hw_cq *);
-void nes_process_iwarp_aeqe(struct nes_device *, struct nes_hw_aeqe *);
 void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
 int nes_destroy_cqp(struct nes_device *);
 int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
 
 /* nes_nic.c */
-void nes_netdev_set_multicast_list(struct net_device *);
-void nes_netdev_exit(struct nes_vnic *);
 struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
 void nes_netdev_destroy(struct net_device *);
 int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
@@ -544,7 +520,6 @@
 void nes_update_arp(unsigned char *, u32, u32, u16, u16);
 void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32);
 void nes_sock_release(struct nes_qp *, unsigned long *);
-struct nes_cm_core *nes_cm_alloc_core(void);
 void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32);
 int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32);
 int nes_cm_disconn(struct nes_qp *);
@@ -556,7 +531,6 @@
 struct nes_ib_device *nes_init_ofa_device(struct net_device *);
 void nes_destroy_ofa_device(struct nes_ib_device *);
 int nes_register_ofa_device(struct nes_ib_device *);
-void nes_unregister_ofa_device(struct nes_ib_device *);
 
 /* nes_util.c */
 int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 0bef878..d073862 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -80,7 +80,30 @@
 static int add_ref_cm_node(struct nes_cm_node *);
 static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
 static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
+static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
+				     void *, u32, void *, u32, u8);
+static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node);
 
+static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *,
+					   struct nes_vnic *,
+					   struct ietf_mpa_frame *,
+					   struct nes_cm_info *);
+static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *,
+			  struct nes_cm_node *);
+static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *,
+			  struct nes_cm_node *);
+static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
+static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *,
+			    struct sk_buff *);
+static int mini_cm_dealloc_core(struct nes_cm_core *);
+static int mini_cm_get(struct nes_cm_core *);
+static int mini_cm_set(struct nes_cm_core *, u32, u32);
+static int nes_cm_disconn_true(struct nes_qp *);
+static int nes_cm_post_event(struct nes_cm_event *event);
+static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
+static void nes_disconnect_worker(struct work_struct *work);
+static int send_ack(struct nes_cm_node *cm_node);
+static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb);
 
 /* External CM API Interface */
 /* instance of function pointers for client API */
@@ -99,7 +122,7 @@
 	mini_cm_set
 };
 
-struct nes_cm_core *g_cm_core;
+static struct nes_cm_core *g_cm_core;
 
 atomic_t cm_connects;
 atomic_t cm_accepts;
@@ -149,7 +172,7 @@
 /**
  * send_mpa_request
  */
-int send_mpa_request(struct nes_cm_node *cm_node)
+static int send_mpa_request(struct nes_cm_node *cm_node)
 {
 	struct sk_buff *skb;
 	int ret;
@@ -243,8 +266,9 @@
  * form_cm_frame - get a free packet and build empty frame Use
  * node info to build.
  */
-struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
-		void *options, u32 optionsize, void *data, u32 datasize, u8 flags)
+static struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
+				     void *options, u32 optionsize, void *data,
+				     u32 datasize, u8 flags)
 {
 	struct tcphdr *tcph;
 	struct iphdr *iph;
@@ -342,7 +366,6 @@
 	if (!core)
 		return;
 	nes_debug(NES_DBG_CM, "---------------------------------------------\n");
-	nes_debug(NES_DBG_CM, "Session ID    : %u \n", atomic_read(&core->session_id));
 
 	nes_debug(NES_DBG_CM, "State         : %u \n",  core->state);
 
@@ -395,7 +418,7 @@
 	}
 
 	if (type == NES_TIMER_TYPE_SEND) {
-		new_send->seq_num = htonl(tcp_hdr(skb)->seq);
+		new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
 		atomic_inc(&new_send->skb->users);
 
 		ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
@@ -420,7 +443,7 @@
 		spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
 	}
 	if (type == NES_TIMER_TYPE_RECV) {
-		new_send->seq_num = htonl(tcp_hdr(skb)->seq);
+		new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
 		new_send->timetosend = jiffies;
 		spin_lock_irqsave(&cm_node->recv_list_lock, flags);
 		list_add_tail(&new_send->list, &cm_node->recv_list);
@@ -442,7 +465,7 @@
 /**
  * nes_cm_timer_tick
  */
-void nes_cm_timer_tick(unsigned long pass)
+static void nes_cm_timer_tick(unsigned long pass)
 {
 	unsigned long flags, qplockflags;
 	unsigned long nexttimeout = jiffies + NES_LONG_TIME;
@@ -644,7 +667,7 @@
 /**
  * send_syn
  */
-int send_syn(struct nes_cm_node *cm_node, u32 sendack)
+static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
 {
 	int ret;
 	int flags = SET_SYN;
@@ -710,7 +733,7 @@
 /**
  * send_reset
  */
-int send_reset(struct nes_cm_node *cm_node)
+static int send_reset(struct nes_cm_node *cm_node)
 {
 	int ret;
 	struct sk_buff *skb = get_free_pkt(cm_node);
@@ -732,7 +755,7 @@
 /**
  * send_ack
  */
-int send_ack(struct nes_cm_node *cm_node)
+static int send_ack(struct nes_cm_node *cm_node)
 {
 	int ret;
 	struct sk_buff *skb = get_free_pkt(cm_node);
@@ -752,7 +775,7 @@
 /**
  * send_fin
  */
-int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
+static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
 {
 	int ret;
 
@@ -775,7 +798,7 @@
 /**
  * get_free_pkt
  */
-struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
+static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
 {
 	struct sk_buff *skb, *new_skb;
 
@@ -820,7 +843,6 @@
 {
 	unsigned long flags;
 	u32 hashkey;
-	struct list_head *list_pos;
 	struct list_head *hte;
 	struct nes_cm_node *cm_node;
 
@@ -835,8 +857,7 @@
 
 	/* walk list and find cm_node associated with this session ID */
 	spin_lock_irqsave(&cm_core->ht_lock, flags);
-	list_for_each(list_pos, hte) {
-		cm_node = container_of(list_pos, struct nes_cm_node, list);
+	list_for_each_entry(cm_node, hte, list) {
 		/* compare quad, return node handle if a match */
 		nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
 				cm_node->loc_addr, cm_node->loc_port,
@@ -864,13 +885,11 @@
 		nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
 {
 	unsigned long flags;
-	struct list_head *listen_list;
 	struct nes_cm_listener *listen_node;
 
 	/* walk list and find cm_node associated with this session ID */
 	spin_lock_irqsave(&cm_core->listen_list_lock, flags);
-	list_for_each(listen_list, &cm_core->listen_list.list) {
-		listen_node = container_of(listen_list, struct nes_cm_listener, list);
+	list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
 		/* compare node pair, return node handle if a match */
 		if (((listen_node->loc_addr == dst_addr) ||
 				listen_node->loc_addr == 0x00000000) &&
@@ -1014,7 +1033,7 @@
 	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
 	if (ip_route_output_key(&init_net, &rt, &fl)) {
 		printk("%s: ip_route_output_key failed for 0x%08X\n",
-				__FUNCTION__, dst_ip);
+				__func__, dst_ip);
 		return;
 	}
 
@@ -1077,8 +1096,6 @@
 	cm_node->tcp_cntxt.rcv_nxt = 0;
 	/* get a unique session ID , add thread_id to an upcounter to handle race */
 	atomic_inc(&cm_core->node_cnt);
-	atomic_inc(&cm_core->session_id);
-	cm_node->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
 	cm_node->conn_type = cm_info->conn_type;
 	cm_node->apbvt_set = 0;
 	cm_node->accept_pend = 0;
@@ -1239,7 +1256,7 @@
 				continue;
 			case OPTION_NUMBER_MSS:
 				nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n",
-						__FUNCTION__,
+						__func__,
 						all_options->as_mss.length, offset, optionsize);
 				got_mss_option = 1;
 				if (all_options->as_mss.length != 4) {
@@ -1272,8 +1289,8 @@
 /**
  * process_packet
  */
-int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
-		struct nes_cm_core *cm_core)
+static int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
+			  struct nes_cm_core *cm_core)
 {
 	int optionsize;
 	int datasize;
@@ -1360,7 +1377,7 @@
 	if (optionsize) {
 		u8 *optionsloc = (u8 *)&tcph[1];
 		if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) {
-			nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __FUNCTION__, cm_node);
+			nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __func__, cm_node);
 			send_reset(cm_node);
 			if (cm_node->state != NES_CM_STATE_SYN_SENT)
 			rem_ref_cm_node(cm_core, cm_node);
@@ -1605,9 +1622,7 @@
 	listener->cm_core = cm_core;
 	listener->nesvnic = nesvnic;
 	atomic_inc(&cm_core->node_cnt);
-	atomic_inc(&cm_core->session_id);
 
-	listener->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
 	listener->conn_type = cm_info->conn_type;
 	listener->backlog = cm_info->backlog;
 	listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE;
@@ -1631,9 +1646,10 @@
 /**
  * mini_cm_connect - make a connection node with params
  */
-struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
-		struct nes_vnic *nesvnic, struct ietf_mpa_frame *mpa_frame,
-		struct nes_cm_info *cm_info)
+static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+					   struct nes_vnic *nesvnic,
+					   struct ietf_mpa_frame *mpa_frame,
+					   struct nes_cm_info *cm_info)
 {
 	int ret = 0;
 	struct nes_cm_node *cm_node;
@@ -1717,8 +1733,8 @@
  * mini_cm_accept - accept a connection
  * This function is never called
  */
-int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
-		struct nes_cm_node *cm_node)
+static int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
+			  struct nes_cm_node *cm_node)
 {
 	return 0;
 }
@@ -1727,9 +1743,9 @@
 /**
  * mini_cm_reject - reject and teardown a connection
  */
-int mini_cm_reject(struct nes_cm_core *cm_core,
-		struct ietf_mpa_frame *mpa_frame,
-		struct nes_cm_node *cm_node)
+static int mini_cm_reject(struct nes_cm_core *cm_core,
+			  struct ietf_mpa_frame *mpa_frame,
+			  struct nes_cm_node *cm_node)
 {
 	int ret = 0;
 	struct sk_buff *skb;
@@ -1761,7 +1777,7 @@
 /**
  * mini_cm_close
  */
-int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
+static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
 {
 	int ret = 0;
 
@@ -1808,8 +1824,8 @@
  * recv_pkt - recv an ETHERNET packet, and process it through CM
  * node state machine
  */
-int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
-		struct sk_buff *skb)
+static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
+			    struct sk_buff *skb)
 {
 	struct nes_cm_node *cm_node = NULL;
 	struct nes_cm_listener *listener = NULL;
@@ -1898,7 +1914,7 @@
 /**
  * nes_cm_alloc_core - allocate a top level instance of a cm core
  */
-struct nes_cm_core *nes_cm_alloc_core(void)
+static struct nes_cm_core *nes_cm_alloc_core(void)
 {
 	int i;
 
@@ -1919,7 +1935,6 @@
 	cm_core->state = NES_CM_STATE_INITED;
 	cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
 
-	atomic_set(&cm_core->session_id, 0);
 	atomic_set(&cm_core->events_posted, 0);
 
 	/* init the packet lists */
@@ -1958,7 +1973,7 @@
 /**
  * mini_cm_dealloc_core - deallocate a top level instance of a cm core
  */
-int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
+static int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
 {
 	nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core);
 
@@ -1983,7 +1998,7 @@
 /**
  * mini_cm_get
  */
-int mini_cm_get(struct nes_cm_core *cm_core)
+static int mini_cm_get(struct nes_cm_core *cm_core)
 {
 	return cm_core->state;
 }
@@ -1992,7 +2007,7 @@
 /**
  * mini_cm_set
  */
-int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
+static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
 {
 	int ret = 0;
 
@@ -2109,7 +2124,7 @@
 /**
  * nes_disconnect_worker
  */
-void nes_disconnect_worker(struct work_struct *work)
+static void nes_disconnect_worker(struct work_struct *work)
 {
 	struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
 
@@ -2122,7 +2137,7 @@
 /**
  * nes_cm_disconn_true
  */
-int nes_cm_disconn_true(struct nes_qp *nesqp)
+static int nes_cm_disconn_true(struct nes_qp *nesqp)
 {
 	unsigned long flags;
 	int ret = 0;
@@ -2265,7 +2280,7 @@
 /**
  * nes_disconnect
  */
-int nes_disconnect(struct nes_qp *nesqp, int abrupt)
+static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
 {
 	int ret = 0;
 	struct nes_vnic *nesvnic;
@@ -2482,7 +2497,7 @@
 	}
 	if (ret)
 		printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
-				__FUNCTION__, __LINE__, ret);
+				__func__, __LINE__, ret);
 
 	return 0;
 }
@@ -2650,7 +2665,7 @@
 	cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
 	if (!cm_node) {
 		printk("%s[%u] Error returned from listen API call\n",
-				__FUNCTION__, __LINE__);
+				__func__, __LINE__);
 		return -ENOMEM;
 	}
 
@@ -2740,7 +2755,7 @@
  * cm_event_connected
  * handle a connected event, setup QPs and HW
  */
-void cm_event_connected(struct nes_cm_event *event)
+static void cm_event_connected(struct nes_cm_event *event)
 {
 	u64 u64temp;
 	struct nes_qp *nesqp;
@@ -2864,7 +2879,7 @@
 
 	if (ret)
 		printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
-				__FUNCTION__, __LINE__, ret);
+				__func__, __LINE__, ret);
 	nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n",
 			nesqp->hwqp.qp_id, jiffies );
 
@@ -2877,7 +2892,7 @@
 /**
  * cm_event_connect_error
  */
-void cm_event_connect_error(struct nes_cm_event *event)
+static void cm_event_connect_error(struct nes_cm_event *event)
 {
 	struct nes_qp *nesqp;
 	struct iw_cm_id *cm_id;
@@ -2919,7 +2934,7 @@
 	nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
 	if (ret)
 		printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
-				__FUNCTION__, __LINE__, ret);
+				__func__, __LINE__, ret);
 	nes_rem_ref(&nesqp->ibqp);
 		cm_id->rem_ref(cm_id);
 
@@ -2930,7 +2945,7 @@
 /**
  * cm_event_reset
  */
-void cm_event_reset(struct nes_cm_event *event)
+static void cm_event_reset(struct nes_cm_event *event)
 {
 	struct nes_qp *nesqp;
 	struct iw_cm_id *cm_id;
@@ -2973,7 +2988,7 @@
 /**
  * cm_event_mpa_req
  */
-void cm_event_mpa_req(struct nes_cm_event *event)
+static void cm_event_mpa_req(struct nes_cm_event *event)
 {
 	struct iw_cm_id   *cm_id;
 	struct iw_cm_event cm_event;
@@ -3007,7 +3022,7 @@
 	ret = cm_id->event_handler(cm_id, &cm_event);
 	if (ret)
 		printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
-				__FUNCTION__, __LINE__, ret);
+				__func__, __LINE__, ret);
 
 	return;
 }
@@ -3019,7 +3034,7 @@
  * nes_cm_post_event
  * post an event to the cm event handler
  */
-int nes_cm_post_event(struct nes_cm_event *event)
+static int nes_cm_post_event(struct nes_cm_event *event)
 {
 	atomic_inc(&event->cm_node->cm_core->events_posted);
 	add_ref_cm_node(event->cm_node);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index a59f0a7..7717cb2 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -225,7 +225,6 @@
 
 struct nes_cm_listener {
 	struct list_head           list;
-	u64                        session_id;
 	struct nes_cm_core         *cm_core;
 	u8                         loc_mac[ETH_ALEN];
 	nes_addr_t                 loc_addr;
@@ -242,7 +241,6 @@
 
 /* per connection node and node state information */
 struct nes_cm_node {
-	u64                       session_id;
 	u32                       hashkey;
 
 	nes_addr_t                loc_addr, rem_addr;
@@ -327,7 +325,6 @@
 
 struct nes_cm_core {
 	enum nes_cm_node_state  state;
-	atomic_t                session_id;
 
 	atomic_t                listen_node_cnt;
 	struct nes_cm_node      listen_list;
@@ -383,35 +380,10 @@
 	int (*set)(struct nes_cm_core *, u32, u32);
 };
 
-
-int send_mpa_request(struct nes_cm_node *);
-struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
-		void *, u32, void *, u32, u8);
 int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
 		enum nes_timer_type, int, int);
-void nes_cm_timer_tick(unsigned long);
-int send_syn(struct nes_cm_node *, u32);
-int send_reset(struct nes_cm_node *);
-int send_ack(struct nes_cm_node *);
-int send_fin(struct nes_cm_node *, struct sk_buff *);
-struct sk_buff *get_free_pkt(struct nes_cm_node *);
-int process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
-
-struct nes_cm_node * mini_cm_connect(struct nes_cm_core *,
-		struct nes_vnic *, struct ietf_mpa_frame *, struct nes_cm_info *);
-int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
-int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
-int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
-int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
-struct nes_cm_core *mini_cm_alloc_core(struct nes_cm_info *);
-int mini_cm_dealloc_core(struct nes_cm_core *);
-int mini_cm_get(struct nes_cm_core *);
-int mini_cm_set(struct nes_cm_core *, u32, u32);
 
 int nes_cm_disconn(struct nes_qp *);
-void nes_disconnect_worker(struct work_struct *);
-int nes_cm_disconn_true(struct nes_qp *);
-int nes_disconnect(struct nes_qp *, int);
 
 int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
 int nes_reject(struct iw_cm_id *, const void *, u8);
@@ -423,11 +395,4 @@
 int nes_cm_start(void);
 int nes_cm_stop(void);
 
-/* CM event handler functions */
-void cm_event_connected(struct nes_cm_event *);
-void cm_event_connect_error(struct nes_cm_event *);
-void cm_event_reset(struct nes_cm_event *);
-void cm_event_mpa_req(struct nes_cm_event *);
-int nes_cm_post_event(struct nes_cm_event *);
-
 #endif			/* NES_CM_H */
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 49e53e4..aa53aab 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -41,7 +41,7 @@
 
 #include "nes.h"
 
-u32 crit_err_count = 0;
+static u32 crit_err_count;
 u32 int_mod_timer_init;
 u32 int_mod_cq_depth_256;
 u32 int_mod_cq_depth_128;
@@ -53,6 +53,17 @@
 
 #include "nes_cm.h"
 
+static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq);
+static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count);
+static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
+			   u8 OneG_Mode);
+static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
+static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq);
+static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq);
+static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
+				   struct nes_hw_aeqe *aeqe);
+static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
+static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
 
 #ifdef CONFIG_INFINIBAND_NES_DEBUG
 static unsigned char *nes_iwarp_state_str[] = {
@@ -370,7 +381,7 @@
 		nesadapter->et_use_adaptive_rx_coalesce = 1;
 		nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
 		nesadapter->et_rx_coalesce_usecs_irq = 0;
-		printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __FUNCTION__);
+		printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __func__);
 	}
 	/* Setup and enable the periodic timer */
 	if (nesadapter->et_rx_coalesce_usecs_irq)
@@ -382,7 +393,7 @@
 	nesadapter->base_pd = 1;
 
 	nesadapter->device_cap_flags =
-			IB_DEVICE_ZERO_STAG | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW;
+		IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW;
 
 	nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
 			[(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
@@ -572,7 +583,7 @@
 		if (vendor_id == 0xffff)
 			break;
 	}
-	nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __FUNCTION__,
+	nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __func__,
 		func_index, pci_name(nesdev->pcidev));
 	nesadapter->adapter_fcn_count = func_index;
 
@@ -583,7 +594,7 @@
 /**
  * nes_reset_adapter_ne020
  */
-unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
+static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
 {
 	u32 port_count;
 	u32 u32temp;
@@ -691,7 +702,8 @@
 /**
  * nes_init_serdes
  */
-int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8  OneG_Mode)
+static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
+			   u8 OneG_Mode)
 {
 	int i;
 	u32 u32temp;
@@ -739,7 +751,7 @@
 				& 0x0000000f)) != 0x0000000f) && (i++ < 5000))
 				mdelay(1);
 			if (i >= 5000) {
-				printk("%s: Init: serdes 1 not ready, status=%x\n", __FUNCTION__, u32temp);
+				printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp);
 				/* return 1; */
 			}
 			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7);
@@ -760,7 +772,7 @@
  * nes_init_csr_ne020
  * Initialize registers for ne020 hardware
  */
-void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
+static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
 {
 	u32 u32temp;
 
@@ -1204,7 +1216,7 @@
 	if (nesadapter->OneG_Mode) {
 		nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
 		if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
-			printk(PFX "%s: Programming mdc config for 1G\n", __FUNCTION__);
+			printk(PFX "%s: Programming mdc config for 1G\n", __func__);
 			tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
 			tx_config |= 0x04;
 			nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
@@ -1358,7 +1370,7 @@
 static void nes_rq_wqes_timeout(unsigned long parm)
 {
 	struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
-	printk("%s: Timer fired.\n", __FUNCTION__);
+	printk("%s: Timer fired.\n", __func__);
 	atomic_set(&nesvnic->rx_skb_timer_running, 0);
 	if (atomic_read(&nesvnic->rx_skbs_needed))
 		nes_replenish_nic_rq(nesvnic);
@@ -1909,7 +1921,7 @@
 /**
  * nes_process_ceq
  */
-void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
+static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
 {
 	u64 u64temp;
 	struct nes_hw_cq *cq;
@@ -1949,7 +1961,7 @@
 /**
  * nes_process_aeq
  */
-void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
+static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
 {
 //	u64 u64temp;
 	u32 head;
@@ -2060,7 +2072,7 @@
 /**
  * nes_process_mac_intr
  */
-void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
+static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
 {
 	unsigned long flags;
 	u32 pcs_control_status;
@@ -2163,7 +2175,7 @@
 				temp_phy_data = phy_data;
 			} while (1);
 			nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
-				__FUNCTION__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
+				__func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
 
 		} else {
 			phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0;
@@ -2205,7 +2217,7 @@
 
 
 
-void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
+static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
 {
 	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
 
@@ -2428,7 +2440,7 @@
 /**
  * nes_cqp_ce_handler
  */
-void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
+static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
 {
 	u64 u64temp;
 	unsigned long flags;
@@ -2567,7 +2579,8 @@
 /**
  * nes_process_iwarp_aeqe
  */
-void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe)
+static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
+				   struct nes_hw_aeqe *aeqe)
 {
 	u64 context;
 	u64 aeqe_context = 0;
@@ -2819,7 +2832,7 @@
 					le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
 			if (resource_allocated) {
 				printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
-						__FUNCTION__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
+						__func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
 			}
 			break;
 		case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index eee77da..34166641 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -802,7 +802,7 @@
 
 	memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
 	printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
-		   __FUNCTION__, netdev->addr_len,
+		   __func__, netdev->addr_len,
 		   mac_addr->sa_data[0], mac_addr->sa_data[1],
 		   mac_addr->sa_data[2], mac_addr->sa_data[3],
 		   mac_addr->sa_data[4], mac_addr->sa_data[5]);
@@ -832,7 +832,7 @@
 /**
  * nes_netdev_set_multicast_list
  */
-void nes_netdev_set_multicast_list(struct net_device *netdev)
+static void nes_netdev_set_multicast_list(struct net_device *netdev)
 {
 	struct nes_vnic *nesvnic = netdev_priv(netdev);
 	struct nes_device *nesdev = nesvnic->nesdev;
@@ -947,28 +947,6 @@
 	return ret;
 }
 
-
-/**
- * nes_netdev_exit - destroy network device
- */
-void nes_netdev_exit(struct nes_vnic *nesvnic)
-{
-	struct net_device *netdev = nesvnic->netdev;
-	struct nes_ib_device *nesibdev = nesvnic->nesibdev;
-
-	nes_debug(NES_DBG_SHUTDOWN, "\n");
-
-	// destroy the ibdevice if RDMA enabled
-	if ((nesvnic->rdma_enabled)&&(nesvnic->of_device_registered)) {
-		nes_destroy_ofa_device( nesibdev );
-		nesvnic->of_device_registered = 0;
-		nesvnic->nesibdev = NULL;
-	}
-	unregister_netdev(netdev);
-	nes_debug(NES_DBG_SHUTDOWN, "\n");
-}
-
-
 #define NES_ETHTOOL_STAT_COUNT 55
 static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = {
 	"Link Change Interrupts",
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index c4ec6ac..f9db07c 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -566,7 +566,7 @@
 				cqp_request);
 	} else
 		printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n",
-			   __FUNCTION__);
+			   __func__);
 
 	return cqp_request;
 }
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index a651e9d..7c27420 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -49,6 +49,7 @@
 atomic_t qps_created;
 atomic_t sw_qps_destroyed;
 
+static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
 
 /**
  * nes_alloc_mw
@@ -1043,10 +1044,10 @@
 	u8 sq_pbl_entries;
 
 	pbl_entries = nespbl->pbl_size >> 3;
-	nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%p\n",
+	nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%lx\n",
 			nespbl->pbl_size, pbl_entries,
 			(void *)nespbl->pbl_vbase,
-			(void *)nespbl->pbl_pbase);
+			(unsigned long) nespbl->pbl_pbase);
 	pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */
 	/* now lets set the sq_vbase as well as rq_vbase addrs we will assign */
 	/* the first pbl to be fro the rq_vbase... */
@@ -1074,9 +1075,9 @@
 	/* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */
 	/*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */
 
-	nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%p rq_vbase=%p rq_pbase=%p\n",
-			nesqp->hwqp.sq_vbase, (void *)nesqp->hwqp.sq_pbase,
-			nesqp->hwqp.rq_vbase, (void *)nesqp->hwqp.rq_pbase);
+	nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%lx rq_vbase=%p rq_pbase=%lx\n",
+		  nesqp->hwqp.sq_vbase, (unsigned long) nesqp->hwqp.sq_pbase,
+		  nesqp->hwqp.rq_vbase, (unsigned long) nesqp->hwqp.rq_pbase);
 	spin_lock_irqsave(&nesadapter->pbl_lock, flags);
 	if (!nesadapter->free_256pbl) {
 		pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
@@ -1251,6 +1252,9 @@
 	u8 rq_encoded_size;
 	/* int counter; */
 
+	if (init_attr->create_flags)
+		return ERR_PTR(-EINVAL);
+
 	atomic_inc(&qps_created);
 	switch (init_attr->qp_type) {
 		case IB_QPT_RC:
@@ -1908,13 +1912,13 @@
 		nesadapter->free_256pbl++;
 		if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
 			printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n",
-					__FUNCTION__, nesadapter->free_256pbl, nesadapter->max_256pbl);
+					__func__, nesadapter->free_256pbl, nesadapter->max_256pbl);
 		}
 	} else if (nescq->virtual_cq == 2) {
 		nesadapter->free_4kpbl++;
 		if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
 			printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n",
-					__FUNCTION__, nesadapter->free_4kpbl, nesadapter->max_4kpbl);
+					__func__, nesadapter->free_4kpbl, nesadapter->max_4kpbl);
 		}
 		opcode |= NES_CQP_CQ_4KB_CHUNK;
 	}
@@ -2653,10 +2657,10 @@
 
 			nespbl->pbl_vbase = (u64 *)pbl;
 			nespbl->user_base = start;
-			nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%p,"
+			nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%lx,"
 					" pbl_vbase=%p user_base=0x%lx\n",
-					nespbl->pbl_size, (void *)nespbl->pbl_pbase,
-					(void*)nespbl->pbl_vbase, nespbl->user_base);
+				  nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase,
+				  (void *) nespbl->pbl_vbase, nespbl->user_base);
 
 			list_for_each_entry(chunk, &region->chunk_list, list) {
 				for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
@@ -3895,14 +3899,11 @@
 /**
  * nes_unregister_ofa_device
  */
-void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
+static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
 {
 	struct nes_vnic *nesvnic = nesibdev->nesvnic;
 	int i;
 
-	if (nesibdev == NULL)
-		return;
-
 	for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
 		class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
 	}
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index 98ee38e..3090100 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -4,7 +4,8 @@
 						   ipoib_ib.o \
 						   ipoib_multicast.o \
 						   ipoib_verbs.o \
-						   ipoib_vlan.o
+						   ipoib_vlan.o \
+						   ipoib_ethtool.o
 ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM)		+= ipoib_cm.o
 ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG)	+= ipoib_fs.o
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 054fab8..73b2b17 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -87,6 +87,7 @@
 	IPOIB_MCAST_STARTED	  = 8,
 	IPOIB_FLAG_ADMIN_CM	  = 9,
 	IPOIB_FLAG_UMCAST	  = 10,
+	IPOIB_FLAG_CSUM		  = 11,
 
 	IPOIB_MAX_BACKOFF_SECONDS = 16,
 
@@ -241,6 +242,11 @@
 	int			num_frags;
 };
 
+struct ipoib_ethtool_st {
+	u16     coalesce_usecs;
+	u16     max_coalesced_frames;
+};
+
 /*
  * Device private locking: tx_lock protects members used in TX fast
  * path (and we use LLTX so upper layers don't do extra locking).
@@ -318,6 +324,8 @@
 	struct dentry *mcg_dentry;
 	struct dentry *path_dentry;
 #endif
+	int	hca_caps;
+	struct ipoib_ethtool_st ethtool;
 };
 
 struct ipoib_ah {
@@ -458,6 +466,8 @@
 int ipoib_pkey_dev_delay_open(struct net_device *dev);
 void ipoib_drain_cq(struct net_device *dev);
 
+void ipoib_set_ethtool_ops(struct net_device *dev);
+
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
 
 #define IPOIB_FLAGS_RC		0x80
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 2490b2d..9db7b0b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1007,9 +1007,9 @@
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ib_qp_attr qp_attr;
 	int qp_attr_mask, ret;
-	ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
+	ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
 	if (ret) {
-		ipoib_warn(priv, "pkey 0x%x not in cache: %d\n", priv->pkey, ret);
+		ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
 		return ret;
 	}
 
@@ -1383,6 +1383,10 @@
 		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
 		ipoib_warn(priv, "enabling connected mode "
 			   "will cause multicast packet drops\n");
+
+		dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+		priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
+
 		ipoib_flush_paths(dev);
 		return count;
 	}
@@ -1391,6 +1395,13 @@
 		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
 		dev->mtu = min(priv->mcast_mtu, dev->mtu);
 		ipoib_flush_paths(dev);
+
+		if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
+			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+			if (priv->hca_caps & IB_DEVICE_UD_TSO)
+				dev->features |= NETIF_F_TSO;
+		}
+
 		return count;
 	}
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
new file mode 100644
index 0000000..9a47428
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "ipoib.h"
+
+static void ipoib_get_drvinfo(struct net_device *netdev,
+			      struct ethtool_drvinfo *drvinfo)
+{
+	strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
+}
+
+static int ipoib_get_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+	coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
+	coal->tx_coalesce_usecs = priv->ethtool.coalesce_usecs;
+	coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
+	coal->tx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
+
+	return 0;
+}
+
+static int ipoib_set_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	int ret;
+
+	/*
+	 * Since IPoIB uses a single CQ for both rx and tx, we assume
+	 * that rx params dictate the configuration.  These values are
+	 * saved in the private data and returned when ipoib_get_coalesce()
+	 * is called.
+	 */
+	if (coal->rx_coalesce_usecs       > 0xffff ||
+	    coal->rx_max_coalesced_frames > 0xffff)
+		return -EINVAL;
+
+	ret = ib_modify_cq(priv->cq, coal->rx_max_coalesced_frames,
+			   coal->rx_coalesce_usecs);
+	if (ret && ret != -ENOSYS) {
+		ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
+		return ret;
+	}
+
+	coal->tx_coalesce_usecs       = coal->rx_coalesce_usecs;
+	coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
+	priv->ethtool.coalesce_usecs       = coal->rx_coalesce_usecs;
+	priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
+
+	return 0;
+}
+
+static const struct ethtool_ops ipoib_ethtool_ops = {
+	.get_drvinfo		= ipoib_get_drvinfo,
+	.get_tso		= ethtool_op_get_tso,
+	.get_coalesce		= ipoib_get_coalesce,
+	.set_coalesce		= ipoib_set_coalesce,
+};
+
+void ipoib_set_ethtool_ops(struct net_device *dev)
+{
+	SET_ETHTOOL_OPS(dev, &ipoib_ethtool_ops);
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 08c4396..0205eb7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -39,6 +39,8 @@
 #include <linux/dma-mapping.h>
 
 #include <rdma/ib_cache.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
 
 #include "ipoib.h"
 
@@ -231,6 +233,10 @@
 	skb->dev = dev;
 	/* XXX get correct PACKET_ type here */
 	skb->pkt_type = PACKET_HOST;
+
+	if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
 	netif_receive_skb(skb);
 
 repost:
@@ -245,29 +251,37 @@
 	struct sk_buff *skb = tx_req->skb;
 	u64 *mapping = tx_req->mapping;
 	int i;
+	int off;
 
-	mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
-				       DMA_TO_DEVICE);
-	if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
-		return -EIO;
+	if (skb_headlen(skb)) {
+		mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
+					       DMA_TO_DEVICE);
+		if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
+			return -EIO;
+
+		off = 1;
+	} else
+		off = 0;
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-		mapping[i + 1] = ib_dma_map_page(ca, frag->page,
+		mapping[i + off] = ib_dma_map_page(ca, frag->page,
 						 frag->page_offset, frag->size,
 						 DMA_TO_DEVICE);
-		if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1])))
+		if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
 			goto partial_error;
 	}
 	return 0;
 
 partial_error:
-	ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
-
 	for (; i > 0; --i) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
-		ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE);
+		ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
 	}
+
+	if (off)
+		ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+
 	return -EIO;
 }
 
@@ -277,12 +291,17 @@
 	struct sk_buff *skb = tx_req->skb;
 	u64 *mapping = tx_req->mapping;
 	int i;
+	int off;
 
-	ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+	if (skb_headlen(skb)) {
+		ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+		off = 1;
+	} else
+		off = 0;
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-		ib_dma_unmap_page(ca, mapping[i + 1], frag->size,
+		ib_dma_unmap_page(ca, mapping[i + off], frag->size,
 				  DMA_TO_DEVICE);
 	}
 }
@@ -388,24 +407,40 @@
 static inline int post_send(struct ipoib_dev_priv *priv,
 			    unsigned int wr_id,
 			    struct ib_ah *address, u32 qpn,
-			    u64 *mapping, int headlen,
-			    skb_frag_t *frags,
-			    int nr_frags)
+			    struct ipoib_tx_buf *tx_req,
+			    void *head, int hlen)
 {
 	struct ib_send_wr *bad_wr;
-	int i;
+	int i, off;
+	struct sk_buff *skb = tx_req->skb;
+	skb_frag_t *frags = skb_shinfo(skb)->frags;
+	int nr_frags = skb_shinfo(skb)->nr_frags;
+	u64 *mapping = tx_req->mapping;
 
-	priv->tx_sge[0].addr	     = mapping[0];
-	priv->tx_sge[0].length       = headlen;
+	if (skb_headlen(skb)) {
+		priv->tx_sge[0].addr         = mapping[0];
+		priv->tx_sge[0].length       = skb_headlen(skb);
+		off = 1;
+	} else
+		off = 0;
+
 	for (i = 0; i < nr_frags; ++i) {
-		priv->tx_sge[i + 1].addr = mapping[i + 1];
-		priv->tx_sge[i + 1].length = frags[i].size;
+		priv->tx_sge[i + off].addr = mapping[i + off];
+		priv->tx_sge[i + off].length = frags[i].size;
 	}
-	priv->tx_wr.num_sge	     = nr_frags + 1;
+	priv->tx_wr.num_sge	     = nr_frags + off;
 	priv->tx_wr.wr_id 	     = wr_id;
 	priv->tx_wr.wr.ud.remote_qpn = qpn;
 	priv->tx_wr.wr.ud.ah 	     = address;
 
+	if (head) {
+		priv->tx_wr.wr.ud.mss	 = skb_shinfo(skb)->gso_size;
+		priv->tx_wr.wr.ud.header = head;
+		priv->tx_wr.wr.ud.hlen	 = hlen;
+		priv->tx_wr.opcode	 = IB_WR_LSO;
+	} else
+		priv->tx_wr.opcode	 = IB_WR_SEND;
+
 	return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
 }
 
@@ -414,14 +449,30 @@
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_tx_buf *tx_req;
+	int hlen;
+	void *phead;
 
-	if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
-		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
-			   skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
-		++dev->stats.tx_dropped;
-		++dev->stats.tx_errors;
-		ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
-		return;
+	if (skb_is_gso(skb)) {
+		hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		phead = skb->data;
+		if (unlikely(!skb_pull(skb, hlen))) {
+			ipoib_warn(priv, "linear data too small\n");
+			++dev->stats.tx_dropped;
+			++dev->stats.tx_errors;
+			dev_kfree_skb_any(skb);
+			return;
+		}
+	} else {
+		if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
+			ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
+				   skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
+			++dev->stats.tx_dropped;
+			++dev->stats.tx_errors;
+			ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
+			return;
+		}
+		phead = NULL;
+		hlen  = 0;
 	}
 
 	ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
@@ -442,10 +493,13 @@
 		return;
 	}
 
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
+	else
+		priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
+
 	if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
-			       address->ah, qpn,
-			       tx_req->mapping, skb_headlen(skb),
-			       skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) {
+			       address->ah, qpn, tx_req, phead, hlen))) {
 		ipoib_warn(priv, "post_send failed\n");
 		++dev->stats.tx_errors;
 		ipoib_dma_unmap_tx(priv->ca, tx_req);
@@ -540,7 +594,7 @@
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	u16 pkey_index = 0;
 
-	if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
+	if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
 		clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
 	else
 		set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
@@ -781,13 +835,13 @@
 			clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
 			ipoib_ib_dev_down(dev, 0);
 			ipoib_ib_dev_stop(dev, 0);
-			ipoib_pkey_dev_delay_open(dev);
-			return;
+			if (ipoib_pkey_dev_delay_open(dev))
+				return;
 		}
-		set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
 
 		/* restart QP only if P_Key index is changed */
-		if (new_index == priv->pkey_index) {
+		if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
+		    new_index == priv->pkey_index) {
 			ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
 			return;
 		}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5728204..bd07f02 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -359,8 +359,7 @@
 	spin_lock_irq(&priv->tx_lock);
 	spin_lock(&priv->lock);
 
-	list_splice(&priv->path_list, &remove_list);
-	INIT_LIST_HEAD(&priv->path_list);
+	list_splice_init(&priv->path_list, &remove_list);
 
 	list_for_each_entry(path, &remove_list, list)
 		rb_erase(&path->rb_node, &priv->path_tree);
@@ -952,6 +951,8 @@
 	dev->set_multicast_list	 = ipoib_set_mcast_list;
 	dev->neigh_setup	 = ipoib_neigh_setup_dev;
 
+	ipoib_set_ethtool_ops(dev);
+
 	netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
 
 	dev->watchdog_timeo	 = HZ;
@@ -1105,6 +1106,7 @@
 					 struct ib_device *hca, u8 port)
 {
 	struct ipoib_dev_priv *priv;
+	struct ib_device_attr *device_attr;
 	int result = -ENOMEM;
 
 	priv = ipoib_intf_alloc(format);
@@ -1120,6 +1122,29 @@
 		goto device_init_failed;
 	}
 
+	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
+	if (!device_attr) {
+		printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
+		       hca->name, sizeof *device_attr);
+		goto device_init_failed;
+	}
+
+	result = ib_query_device(hca, device_attr);
+	if (result) {
+		printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
+		       hca->name, result);
+		kfree(device_attr);
+		goto device_init_failed;
+	}
+	priv->hca_caps = device_attr->device_cap_flags;
+
+	kfree(device_attr);
+
+	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
+		set_bit(IPOIB_FLAG_CSUM, &priv->flags);
+		priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+	}
+
 	/*
 	 * Set the full membership bit, so that we join the right
 	 * broadcast group, etc.
@@ -1137,7 +1162,6 @@
 	} else
 		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
 
-
 	result = ipoib_dev_init(priv->dev, hca, port);
 	if (result < 0) {
 		printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
@@ -1155,6 +1179,9 @@
 		goto event_failed;
 	}
 
+	if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
+		priv->dev->features |= NETIF_F_TSO;
+
 	result = register_netdev(priv->dev);
 	if (result) {
 		printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index a3aeb91..8a20e37 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -192,6 +192,9 @@
 	init_attr.send_cq = priv->cq;
 	init_attr.recv_cq = priv->cq;
 
+	if (priv->hca_caps & IB_DEVICE_UD_TSO)
+		init_attr.create_flags = IB_QP_CREATE_IPOIB_UD_LSO;
+
 	if (dev->features & NETIF_F_SG)
 		init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
 
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 83247f1..08dc81c 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -405,7 +405,7 @@
 	struct iser_dto *send_dto = NULL;
 	unsigned long buf_offset;
 	unsigned long data_seg_len;
-	unsigned int itt;
+	uint32_t itt;
 	int err = 0;
 
 	if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
@@ -416,7 +416,7 @@
 	if (iser_check_xmit(conn, ctask))
 		return -ENOBUFS;
 
-	itt = ntohl(hdr->itt);
+	itt = (__force uint32_t)hdr->itt;
 	data_seg_len = ntoh24(hdr->dlength);
 	buf_offset   = ntohl(hdr->offset);
 
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 993f0a8..d19cfe6 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -473,11 +473,8 @@
 		iser_connect_error(cma_id);
 		break;
 	case RDMA_CM_EVENT_DISCONNECTED:
-		iser_disconnected_handler(cma_id);
-		break;
 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
-		iser_err("Device removal is currently unsupported\n");
-		BUG();
+		iser_disconnected_handler(cma_id);
 		break;
 	default:
 		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index fd4a49f..125765a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -68,7 +68,7 @@
 
 module_param(srp_sg_tablesize, int, 0444);
 MODULE_PARM_DESC(srp_sg_tablesize,
-		 "Max number of gather/scatter entries per I/O (default is 12)");
+		 "Max number of gather/scatter entries per I/O (default is 12, max 255)");
 
 static int topspin_workarounds = 1;
 
@@ -2138,6 +2138,11 @@
 {
 	int ret;
 
+	if (srp_sg_tablesize > 255) {
+		printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
+		srp_sg_tablesize = 255;
+	}
+
 	ib_srp_transport_template =
 		srp_attach_transport(&ib_srp_transport_functions);
 	if (!ib_srp_transport_template)
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index bd8a1d1..82add26 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -79,6 +79,7 @@
 	int				sleeping;
 	struct semaphore		lock;
 #ifdef CONFIG_BLK_DEV_IDE_PMAC
+	ide_hwif_t			*cd_port;
 	void __iomem			*cd_base;
 	int				cd_irq;
 	int				cd_retry;
@@ -448,7 +449,7 @@
 }
 
 int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
-			    int irq, int index)
+			    int irq, ide_hwif_t *hwif)
 {
 	int	i;
 
@@ -456,10 +457,11 @@
 		struct media_bay_info* bay = &media_bays[i];
 
 		if (bay->mdev && which_bay == bay->mdev->ofdev.node) {
-			int timeout = 5000;
+			int timeout = 5000, index = hwif->index;
 			
 			down(&bay->lock);
 
+			bay->cd_port	= hwif;
  			bay->cd_base	= (void __iomem *) base;
 			bay->cd_irq	= irq;
 
@@ -551,15 +553,10 @@
 			bay->timer = 0;
 			bay->state = mb_up;
 			if (bay->cd_index < 0) {
-				hw_regs_t hw;
-
 				printk("mediabay %d, registering IDE...\n", i);
 				pmu_suspend();
-				ide_init_hwif_ports(&hw, (unsigned long) bay->cd_base, (unsigned long) 0, NULL);
-				hw.irq = bay->cd_irq;
-				hw.chipset = ide_pmac;
-				bay->cd_index =
-					ide_register_hw(&hw, NULL, NULL);
+				ide_port_scan(bay->cd_port);
+				bay->cd_index = bay->cd_port->index;
 				pmu_resume();
 			}
 			if (bay->cd_index == -1) {
@@ -589,7 +586,7 @@
     	        if (bay->cd_index >= 0) {
 			printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i,
 			       bay->cd_index);
-			ide_unregister(bay->cd_index, 1, 1);
+			ide_port_unregister_devices(bay->cd_port);
 			bay->cd_index = -1;
 		}
 	    	if (bay->cd_retry) {
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index c6be6eb..db3c892 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -79,7 +79,7 @@
 /*
  *  cmd line parameters
  */
-static int mpt_msi_enable;
+static int mpt_msi_enable = -1;
 module_param(mpt_msi_enable, int, 0);
 MODULE_PARM_DESC(mpt_msi_enable, " MSI Support Enable (default=0)");
 
@@ -1686,6 +1686,11 @@
 		ioc->bus_type = SAS;
 	}
 
+	if (ioc->bus_type == SAS && mpt_msi_enable == -1)
+		ioc->msi_enable = 1;
+	else
+		ioc->msi_enable = mpt_msi_enable;
+
 	if (ioc->errata_flag_1064)
 		pci_disable_io_access(pdev);
 
@@ -1831,7 +1836,7 @@
 	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
 
 	free_irq(ioc->pci_irq, ioc);
-	if (mpt_msi_enable)
+	if (ioc->msi_enable)
 		pci_disable_msi(ioc->pcidev);
 	ioc->pci_irq = -1;
 	pci_save_state(pdev);
@@ -2057,15 +2062,17 @@
 	if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
 		ioc->pci_irq = -1;
 		if (ioc->pcidev->irq) {
-			if (mpt_msi_enable && !pci_enable_msi(ioc->pcidev))
+			if (ioc->msi_enable && !pci_enable_msi(ioc->pcidev))
 				printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n",
 				    ioc->name);
+			else
+				ioc->msi_enable = 0;
 			rc = request_irq(ioc->pcidev->irq, mpt_interrupt,
 			    IRQF_SHARED, ioc->name, ioc);
 			if (rc < 0) {
 				printk(MYIOC_s_ERR_FMT "Unable to allocate "
 				    "interrupt %d!\n", ioc->name, ioc->pcidev->irq);
-				if (mpt_msi_enable)
+				if (ioc->msi_enable)
 					pci_disable_msi(ioc->pcidev);
 				return -EBUSY;
 			}
@@ -2173,7 +2180,7 @@
 		/*
 		 * Initalize link list for inactive raid volumes.
 		 */
-		init_MUTEX(&ioc->raid_data.inactive_list_mutex);
+		mutex_init(&ioc->raid_data.inactive_list_mutex);
 		INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
 
 		if (ioc->bus_type == SAS) {
@@ -2261,7 +2268,7 @@
  out:
 	if ((ret != 0) && irq_allocated) {
 		free_irq(ioc->pci_irq, ioc);
-		if (mpt_msi_enable)
+		if (ioc->msi_enable)
 			pci_disable_msi(ioc->pcidev);
 	}
 	return ret;
@@ -2443,7 +2450,7 @@
 
 	if (ioc->pci_irq != -1) {
 		free_irq(ioc->pci_irq, ioc);
-		if (mpt_msi_enable)
+		if (ioc->msi_enable)
 			pci_disable_msi(ioc->pcidev);
 		ioc->pci_irq = -1;
 	}
@@ -5159,13 +5166,13 @@
 	if (list_empty(&ioc->raid_data.inactive_list))
 		return;
 
-	down(&ioc->raid_data.inactive_list_mutex);
+	mutex_lock(&ioc->raid_data.inactive_list_mutex);
 	list_for_each_entry_safe(component_info, pNext,
 	    &ioc->raid_data.inactive_list, list) {
 		list_del(&component_info->list);
 		kfree(component_info);
 	}
-	up(&ioc->raid_data.inactive_list_mutex);
+	mutex_unlock(&ioc->raid_data.inactive_list_mutex);
 }
 
 /**
@@ -5224,7 +5231,7 @@
 	if (!handle_inactive_volumes)
 		goto out;
 
-	down(&ioc->raid_data.inactive_list_mutex);
+	mutex_lock(&ioc->raid_data.inactive_list_mutex);
 	for (i = 0; i < buffer->NumPhysDisks; i++) {
 		if(mpt_raid_phys_disk_pg0(ioc,
 		    buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
@@ -5244,7 +5251,7 @@
 		list_add_tail(&component_info->list,
 		    &ioc->raid_data.inactive_list);
 	}
-	up(&ioc->raid_data.inactive_list_mutex);
+	mutex_unlock(&ioc->raid_data.inactive_list_mutex);
 
  out:
 	if (buffer)
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index caadc68..a8f6174 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -51,6 +51,7 @@
 
 #include <linux/kernel.h>
 #include <linux/pci.h>
+#include <linux/mutex.h>
 
 #include "lsi/mpi_type.h"
 #include "lsi/mpi.h"		/* Fusion MPI(nterface) basic defs */
@@ -531,7 +532,7 @@
 typedef	struct _RaidCfgData {
 	IOCPage2_t	*pIocPg2;		/* table of Raid Volumes */
 	IOCPage3_t	*pIocPg3;		/* table of physical disks */
-	struct semaphore	inactive_list_mutex;
+	struct mutex	inactive_list_mutex;
 	struct list_head	inactive_list; /* link list for physical
 						disk that belong in
 						inactive volumes */
@@ -630,6 +631,7 @@
 	int			 mtrr_reg;
 	struct pci_dev		*pcidev;	/* struct pci_dev pointer */
 	int			bars;		/* bitmask of BAR's that must be configured */
+	int			msi_enable;
 	u8			__iomem *memmap;	/* mmap address */
 	struct Scsi_Host	*sh;		/* Scsi Host pointer */
 	SpiCfgData		spi_data;	/* Scsi config. data */
@@ -693,7 +695,6 @@
 	struct mutex		 sas_discovery_mutex;
 	u8			 sas_discovery_runtime;
 	u8			 sas_discovery_ignore_events;
-	u16			 handle;
 	int			 sas_index; /* index refrencing */
 	MPT_SAS_MGMT		 sas_mgmt;
 	struct work_struct	 sas_persist_task;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 78734e2..4684807 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -230,6 +230,20 @@
 	return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
 }
 
+static struct mptsas_portinfo *
+mptsas_get_hba_portinfo(MPT_ADAPTER *ioc)
+{
+	struct list_head	*head = &ioc->sas_topology;
+	struct mptsas_portinfo	*pi = NULL;
+
+	/* always the first entry on sas_topology list */
+
+	if (!list_empty(head))
+		pi = list_entry(head->next, struct mptsas_portinfo, list);
+
+	return pi;
+}
+
 /*
  * mptsas_find_portinfo_by_handle
  *
@@ -1290,7 +1304,7 @@
 		struct mptsas_portinfo *port_info;
 
 		mutex_lock(&ioc->sas_topology_mutex);
-		port_info = mptsas_find_portinfo_by_handle(ioc, ioc->handle);
+		port_info = mptsas_get_hba_portinfo(ioc);
 		if (port_info && port_info->phy_info)
 			sas_address =
 				port_info->phy_info[0].phy->identify.sas_address;
@@ -2028,8 +2042,7 @@
 			int i;
 
 			mutex_lock(&ioc->sas_topology_mutex);
-			port_info = mptsas_find_portinfo_by_handle(ioc,
-								   ioc->handle);
+			port_info = mptsas_get_hba_portinfo(ioc);
 			mutex_unlock(&ioc->sas_topology_mutex);
 
 			for (i = 0; i < port_info->num_phys; i++)
@@ -2099,8 +2112,7 @@
 
 	mptsas_sas_io_unit_pg1(ioc);
 	mutex_lock(&ioc->sas_topology_mutex);
-	ioc->handle = hba->phy_info[0].handle;
-	port_info = mptsas_find_portinfo_by_handle(ioc, ioc->handle);
+	port_info = mptsas_get_hba_portinfo(ioc);
 	if (!port_info) {
 		port_info = hba;
 		list_add_tail(&port_info->list, &ioc->sas_topology);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index c207bda..89c6314 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2304,14 +2304,14 @@
 	if (list_empty(&ioc->raid_data.inactive_list))
 		goto out;
 
-	down(&ioc->raid_data.inactive_list_mutex);
+	mutex_lock(&ioc->raid_data.inactive_list_mutex);
 	list_for_each_entry(component_info, &ioc->raid_data.inactive_list,
 	    list) {
 		if ((component_info->d.PhysDiskID == id) &&
 		    (component_info->d.PhysDiskBus == channel))
 			rc = 1;
 	}
-	up(&ioc->raid_data.inactive_list_mutex);
+	mutex_unlock(&ioc->raid_data.inactive_list_mutex);
 
  out:
 	return rc;
@@ -2341,14 +2341,14 @@
 	if (list_empty(&ioc->raid_data.inactive_list))
 		goto out;
 
-	down(&ioc->raid_data.inactive_list_mutex);
+	mutex_lock(&ioc->raid_data.inactive_list_mutex);
 	list_for_each_entry(component_info, &ioc->raid_data.inactive_list,
 	    list) {
 		if ((component_info->d.PhysDiskID == id) &&
 		    (component_info->d.PhysDiskBus == channel))
 			rc = component_info->d.PhysDiskNum;
 	}
-	up(&ioc->raid_data.inactive_list_mutex);
+	mutex_unlock(&ioc->raid_data.inactive_list_mutex);
 
  out:
 	return rc;
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 3b12f5d..bbc69fd 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -22,3 +22,4 @@
 obj-$(CONFIG_EEPROM_93CX6)	+= eeprom_93cx6.o
 obj-$(CONFIG_INTEL_MENLOW)	+= intel_menlow.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
+obj-$(CONFIG_KGDB_TESTS)	+= kgdbts.o
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
new file mode 100644
index 0000000..6d6286c
--- /dev/null
+++ b/drivers/misc/kgdbts.c
@@ -0,0 +1,1090 @@
+/*
+ * kgdbts is a test suite for kgdb for the sole purpose of validating
+ * that key pieces of the kgdb internals are working properly such as
+ * HW/SW breakpoints, single stepping, and NMI.
+ *
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/* Information about the kgdb test suite.
+ * -------------------------------------
+ *
+ * The kgdb test suite is designed as a KGDB I/O module which
+ * simulates the communications that a debugger would have with kgdb.
+ * The tests are broken up in to a line by line and referenced here as
+ * a "get" which is kgdb requesting input and "put" which is kgdb
+ * sending a response.
+ *
+ * The kgdb suite can be invoked from the kernel command line
+ * arguments system or executed dynamically at run time.  The test
+ * suite uses the variable "kgdbts" to obtain the information about
+ * which tests to run and to configure the verbosity level.  The
+ * following are the various characters you can use with the kgdbts=
+ * line:
+ *
+ * When using the "kgdbts=" you only choose one of the following core
+ * test types:
+ * A = Run all the core tests silently
+ * V1 = Run all the core tests with minimal output
+ * V2 = Run all the core tests in debug mode
+ *
+ * You can also specify optional tests:
+ * N## = Go to sleep with interrupts of for ## seconds
+ *       to test the HW NMI watchdog
+ * F## = Break at do_fork for ## iterations
+ * S## = Break at sys_open for ## iterations
+ *
+ * NOTE: that the do_fork and sys_open tests are mutually exclusive.
+ *
+ * To invoke the kgdb test suite from boot you use a kernel start
+ * argument as follows:
+ * 	kgdbts=V1 kgdbwait
+ * Or if you wanted to perform the NMI test for 6 seconds and do_fork
+ * test for 100 forks, you could use:
+ * 	kgdbts=V1N6F100 kgdbwait
+ *
+ * The test suite can also be invoked at run time with:
+ *	echo kgdbts=V1N6F100 > /sys/module/kgdbts/parameters/kgdbts
+ * Or as another example:
+ *	echo kgdbts=V2 > /sys/module/kgdbts/parameters/kgdbts
+ *
+ * When developing a new kgdb arch specific implementation or
+ * using these tests for the purpose of regression testing,
+ * several invocations are required.
+ *
+ * 1) Boot with the test suite enabled by using the kernel arguments
+ *       "kgdbts=V1F100 kgdbwait"
+ *    ## If kgdb arch specific implementation has NMI use
+ *       "kgdbts=V1N6F100
+ *
+ * 2) After the system boot run the basic test.
+ * echo kgdbts=V1 > /sys/module/kgdbts/parameters/kgdbts
+ *
+ * 3) Run the concurrency tests.  It is best to use n+1
+ *    while loops where n is the number of cpus you have
+ *    in your system.  The example below uses only two
+ *    loops.
+ *
+ * ## This tests break points on sys_open
+ * while [ 1 ] ; do find / > /dev/null 2>&1 ; done &
+ * while [ 1 ] ; do find / > /dev/null 2>&1 ; done &
+ * echo kgdbts=V1S10000 > /sys/module/kgdbts/parameters/kgdbts
+ * fg # and hit control-c
+ * fg # and hit control-c
+ * ## This tests break points on do_fork
+ * while [ 1 ] ; do date > /dev/null ; done &
+ * while [ 1 ] ; do date > /dev/null ; done &
+ * echo kgdbts=V1F1000 > /sys/module/kgdbts/parameters/kgdbts
+ * fg # and hit control-c
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/kgdb.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+
+#define v1printk(a...) do { \
+	if (verbose) \
+		printk(KERN_INFO a); \
+	} while (0)
+#define v2printk(a...) do { \
+	if (verbose > 1) \
+		printk(KERN_INFO a); \
+		touch_nmi_watchdog();	\
+	} while (0)
+#define eprintk(a...) do { \
+		printk(KERN_ERR a); \
+		WARN_ON(1); \
+	} while (0)
+#define MAX_CONFIG_LEN		40
+
+static const char hexchars[] = "0123456789abcdef";
+static struct kgdb_io kgdbts_io_ops;
+static char get_buf[BUFMAX];
+static int get_buf_cnt;
+static char put_buf[BUFMAX];
+static int put_buf_cnt;
+static char scratch_buf[BUFMAX];
+static int verbose;
+static int repeat_test;
+static int test_complete;
+static int send_ack;
+static int final_ack;
+static int hw_break_val;
+static int hw_break_val2;
+#if defined(CONFIG_ARM) || defined(CONFIG_MIPS)
+static int arch_needs_sstep_emulation = 1;
+#else
+static int arch_needs_sstep_emulation;
+#endif
+static unsigned long sstep_addr;
+static int sstep_state;
+
+/* Storage for the registers, in GDB format. */
+static unsigned long kgdbts_gdb_regs[(NUMREGBYTES +
+					sizeof(unsigned long) - 1) /
+					sizeof(unsigned long)];
+static struct pt_regs kgdbts_regs;
+
+/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
+static int configured		= -1;
+
+#ifdef CONFIG_KGDB_TESTS_BOOT_STRING
+static char config[MAX_CONFIG_LEN] = CONFIG_KGDB_TESTS_BOOT_STRING;
+#else
+static char config[MAX_CONFIG_LEN];
+#endif
+static struct kparam_string kps = {
+	.string			= config,
+	.maxlen			= MAX_CONFIG_LEN,
+};
+
+static void fill_get_buf(char *buf);
+
+struct test_struct {
+	char *get;
+	char *put;
+	void (*get_handler)(char *);
+	int (*put_handler)(char *, char *);
+};
+
+struct test_state {
+	char *name;
+	struct test_struct *tst;
+	int idx;
+	int (*run_test) (int, int);
+	int (*validate_put) (char *);
+};
+
+static struct test_state ts;
+
+static int kgdbts_unreg_thread(void *ptr)
+{
+	/* Wait until the tests are complete and then ungresiter the I/O
+	 * driver.
+	 */
+	while (!final_ack)
+		msleep_interruptible(1500);
+
+	if (configured)
+		kgdb_unregister_io_module(&kgdbts_io_ops);
+	configured = 0;
+
+	return 0;
+}
+
+/* This is noinline such that it can be used for a single location to
+ * place a breakpoint
+ */
+static noinline void kgdbts_break_test(void)
+{
+	v2printk("kgdbts: breakpoint complete\n");
+}
+
+/* Lookup symbol info in the kernel */
+static unsigned long lookup_addr(char *arg)
+{
+	unsigned long addr = 0;
+
+	if (!strcmp(arg, "kgdbts_break_test"))
+		addr = (unsigned long)kgdbts_break_test;
+	else if (!strcmp(arg, "sys_open"))
+		addr = (unsigned long)sys_open;
+	else if (!strcmp(arg, "do_fork"))
+		addr = (unsigned long)do_fork;
+	else if (!strcmp(arg, "hw_break_val"))
+		addr = (unsigned long)&hw_break_val;
+	return addr;
+}
+
+static void break_helper(char *bp_type, char *arg, unsigned long vaddr)
+{
+	unsigned long addr;
+
+	if (arg)
+		addr = lookup_addr(arg);
+	else
+		addr = vaddr;
+
+	sprintf(scratch_buf, "%s,%lx,%i", bp_type, addr,
+		BREAK_INSTR_SIZE);
+	fill_get_buf(scratch_buf);
+}
+
+static void sw_break(char *arg)
+{
+	break_helper("Z0", arg, 0);
+}
+
+static void sw_rem_break(char *arg)
+{
+	break_helper("z0", arg, 0);
+}
+
+static void hw_break(char *arg)
+{
+	break_helper("Z1", arg, 0);
+}
+
+static void hw_rem_break(char *arg)
+{
+	break_helper("z1", arg, 0);
+}
+
+static void hw_write_break(char *arg)
+{
+	break_helper("Z2", arg, 0);
+}
+
+static void hw_rem_write_break(char *arg)
+{
+	break_helper("z2", arg, 0);
+}
+
+static void hw_access_break(char *arg)
+{
+	break_helper("Z4", arg, 0);
+}
+
+static void hw_rem_access_break(char *arg)
+{
+	break_helper("z4", arg, 0);
+}
+
+static void hw_break_val_access(void)
+{
+	hw_break_val2 = hw_break_val;
+}
+
+static void hw_break_val_write(void)
+{
+	hw_break_val++;
+}
+
+static int check_and_rewind_pc(char *put_str, char *arg)
+{
+	unsigned long addr = lookup_addr(arg);
+	int offset = 0;
+
+	kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
+		 NUMREGBYTES);
+	gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
+	v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs));
+#ifdef CONFIG_X86
+	/* On x86 a breakpoint stop requires it to be decremented */
+	if (addr + 1 == kgdbts_regs.ip)
+		offset = -1;
+#endif
+	if (strcmp(arg, "silent") &&
+		instruction_pointer(&kgdbts_regs) + offset != addr) {
+		eprintk("kgdbts: BP mismatch %lx expected %lx\n",
+			   instruction_pointer(&kgdbts_regs) + offset, addr);
+		return 1;
+	}
+#ifdef CONFIG_X86
+	/* On x86 adjust the instruction pointer if needed */
+	kgdbts_regs.ip += offset;
+#endif
+	return 0;
+}
+
+static int check_single_step(char *put_str, char *arg)
+{
+	unsigned long addr = lookup_addr(arg);
+	/*
+	 * From an arch indepent point of view the instruction pointer
+	 * should be on a different instruction
+	 */
+	kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
+		 NUMREGBYTES);
+	gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
+	v2printk("Singlestep stopped at IP: %lx\n",
+		   instruction_pointer(&kgdbts_regs));
+	if (instruction_pointer(&kgdbts_regs) == addr) {
+		eprintk("kgdbts: SingleStep failed at %lx\n",
+			   instruction_pointer(&kgdbts_regs));
+		return 1;
+	}
+
+	return 0;
+}
+
+static void write_regs(char *arg)
+{
+	memset(scratch_buf, 0, sizeof(scratch_buf));
+	scratch_buf[0] = 'G';
+	pt_regs_to_gdb_regs(kgdbts_gdb_regs, &kgdbts_regs);
+	kgdb_mem2hex((char *)kgdbts_gdb_regs, &scratch_buf[1], NUMREGBYTES);
+	fill_get_buf(scratch_buf);
+}
+
+static void skip_back_repeat_test(char *arg)
+{
+	int go_back = simple_strtol(arg, NULL, 10);
+
+	repeat_test--;
+	if (repeat_test <= 0)
+		ts.idx++;
+	else
+		ts.idx -= go_back;
+	fill_get_buf(ts.tst[ts.idx].get);
+}
+
+static int got_break(char *put_str, char *arg)
+{
+	test_complete = 1;
+	if (!strncmp(put_str+1, arg, 2)) {
+		if (!strncmp(arg, "T0", 2))
+			test_complete = 2;
+		return 0;
+	}
+	return 1;
+}
+
+static void emul_sstep_get(char *arg)
+{
+	if (!arch_needs_sstep_emulation) {
+		fill_get_buf(arg);
+		return;
+	}
+	switch (sstep_state) {
+	case 0:
+		v2printk("Emulate single step\n");
+		/* Start by looking at the current PC */
+		fill_get_buf("g");
+		break;
+	case 1:
+		/* set breakpoint */
+		break_helper("Z0", 0, sstep_addr);
+		break;
+	case 2:
+		/* Continue */
+		fill_get_buf("c");
+		break;
+	case 3:
+		/* Clear breakpoint */
+		break_helper("z0", 0, sstep_addr);
+		break;
+	default:
+		eprintk("kgdbts: ERROR failed sstep get emulation\n");
+	}
+	sstep_state++;
+}
+
+static int emul_sstep_put(char *put_str, char *arg)
+{
+	if (!arch_needs_sstep_emulation) {
+		if (!strncmp(put_str+1, arg, 2))
+			return 0;
+		return 1;
+	}
+	switch (sstep_state) {
+	case 1:
+		/* validate the "g" packet to get the IP */
+		kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
+			 NUMREGBYTES);
+		gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
+		v2printk("Stopped at IP: %lx\n",
+			 instruction_pointer(&kgdbts_regs));
+		/* Want to stop at IP + break instruction size by default */
+		sstep_addr = instruction_pointer(&kgdbts_regs) +
+			BREAK_INSTR_SIZE;
+		break;
+	case 2:
+		if (strncmp(put_str, "$OK", 3)) {
+			eprintk("kgdbts: failed sstep break set\n");
+			return 1;
+		}
+		break;
+	case 3:
+		if (strncmp(put_str, "$T0", 3)) {
+			eprintk("kgdbts: failed continue sstep\n");
+			return 1;
+		}
+		break;
+	case 4:
+		if (strncmp(put_str, "$OK", 3)) {
+			eprintk("kgdbts: failed sstep break unset\n");
+			return 1;
+		}
+		/* Single step is complete so continue on! */
+		sstep_state = 0;
+		return 0;
+	default:
+		eprintk("kgdbts: ERROR failed sstep put emulation\n");
+	}
+
+	/* Continue on the same test line until emulation is complete */
+	ts.idx--;
+	return 0;
+}
+
+static int final_ack_set(char *put_str, char *arg)
+{
+	if (strncmp(put_str+1, arg, 2))
+		return 1;
+	final_ack = 1;
+	return 0;
+}
+/*
+ * Test to plant a breakpoint and detach, which should clear out the
+ * breakpoint and restore the original instruction.
+ */
+static struct test_struct plant_and_detach_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+	{ "D", "OK" }, /* Detach */
+	{ "", "" },
+};
+
+/*
+ * Simple test to write in a software breakpoint, check for the
+ * correct stop location and detach.
+ */
+static struct test_struct sw_breakpoint_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+	{ "c", "T0*", }, /* Continue */
+	{ "g", "kgdbts_break_test", 0, check_and_rewind_pc },
+	{ "write", "OK", write_regs },
+	{ "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
+	{ "D", "OK" }, /* Detach */
+	{ "D", "OK", 0,  got_break }, /* If the test worked we made it here */
+	{ "", "" },
+};
+
+/*
+ * Test a known bad memory read location to test the fault handler and
+ * read bytes 1-8 at the bad address
+ */
+static struct test_struct bad_read_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "m0,1", "E*" }, /* read 1 byte at address 1 */
+	{ "m0,2", "E*" }, /* read 1 byte at address 2 */
+	{ "m0,3", "E*" }, /* read 1 byte at address 3 */
+	{ "m0,4", "E*" }, /* read 1 byte at address 4 */
+	{ "m0,5", "E*" }, /* read 1 byte at address 5 */
+	{ "m0,6", "E*" }, /* read 1 byte at address 6 */
+	{ "m0,7", "E*" }, /* read 1 byte at address 7 */
+	{ "m0,8", "E*" }, /* read 1 byte at address 8 */
+	{ "D", "OK" }, /* Detach which removes all breakpoints and continues */
+	{ "", "" },
+};
+
+/*
+ * Test for hitting a breakpoint, remove it, single step, plant it
+ * again and detach.
+ */
+static struct test_struct singlestep_break_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+	{ "c", "T0*", }, /* Continue */
+	{ "g", "kgdbts_break_test", 0, check_and_rewind_pc },
+	{ "write", "OK", write_regs }, /* Write registers */
+	{ "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
+	{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
+	{ "g", "kgdbts_break_test", 0, check_single_step },
+	{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+	{ "c", "T0*", }, /* Continue */
+	{ "g", "kgdbts_break_test", 0, check_and_rewind_pc },
+	{ "write", "OK", write_regs }, /* Write registers */
+	{ "D", "OK" }, /* Remove all breakpoints and continues */
+	{ "", "" },
+};
+
+/*
+ * Test for hitting a breakpoint at do_fork for what ever the number
+ * of iterations required by the variable repeat_test.
+ */
+static struct test_struct do_fork_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "do_fork", "OK", sw_break, }, /* set sw breakpoint */
+	{ "c", "T0*", }, /* Continue */
+	{ "g", "do_fork", 0, check_and_rewind_pc }, /* check location */
+	{ "write", "OK", write_regs }, /* Write registers */
+	{ "do_fork", "OK", sw_rem_break }, /*remove breakpoint */
+	{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
+	{ "g", "do_fork", 0, check_single_step },
+	{ "do_fork", "OK", sw_break, }, /* set sw breakpoint */
+	{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
+	{ "D", "OK", 0, final_ack_set }, /* detach and unregister I/O */
+	{ "", "" },
+};
+
+/* Test for hitting a breakpoint at sys_open for what ever the number
+ * of iterations required by the variable repeat_test.
+ */
+static struct test_struct sys_open_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+	{ "c", "T0*", }, /* Continue */
+	{ "g", "sys_open", 0, check_and_rewind_pc }, /* check location */
+	{ "write", "OK", write_regs }, /* Write registers */
+	{ "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
+	{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
+	{ "g", "sys_open", 0, check_single_step },
+	{ "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+	{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
+	{ "D", "OK", 0, final_ack_set }, /* detach and unregister I/O */
+	{ "", "" },
+};
+
+/*
+ * Test for hitting a simple hw breakpoint
+ */
+static struct test_struct hw_breakpoint_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */
+	{ "c", "T0*", }, /* Continue */
+	{ "g", "kgdbts_break_test", 0, check_and_rewind_pc },
+	{ "write", "OK", write_regs },
+	{ "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */
+	{ "D", "OK" }, /* Detach */
+	{ "D", "OK", 0,  got_break }, /* If the test worked we made it here */
+	{ "", "" },
+};
+
+/*
+ * Test for hitting a hw write breakpoint
+ */
+static struct test_struct hw_write_break_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */
+	{ "c", "T0*", 0, got_break }, /* Continue */
+	{ "g", "silent", 0, check_and_rewind_pc },
+	{ "write", "OK", write_regs },
+	{ "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */
+	{ "D", "OK" }, /* Detach */
+	{ "D", "OK", 0,  got_break }, /* If the test worked we made it here */
+	{ "", "" },
+};
+
+/*
+ * Test for hitting a hw access breakpoint
+ */
+static struct test_struct hw_access_break_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */
+	{ "c", "T0*", 0, got_break }, /* Continue */
+	{ "g", "silent", 0, check_and_rewind_pc },
+	{ "write", "OK", write_regs },
+	{ "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */
+	{ "D", "OK" }, /* Detach */
+	{ "D", "OK", 0,  got_break }, /* If the test worked we made it here */
+	{ "", "" },
+};
+
+/*
+ * Test for hitting a hw access breakpoint
+ */
+static struct test_struct nmi_sleep_test[] = {
+	{ "?", "S0*" }, /* Clear break points */
+	{ "c", "T0*", 0, got_break }, /* Continue */
+	{ "D", "OK" }, /* Detach */
+	{ "D", "OK", 0,  got_break }, /* If the test worked we made it here */
+	{ "", "" },
+};
+
+static void fill_get_buf(char *buf)
+{
+	unsigned char checksum = 0;
+	int count = 0;
+	char ch;
+
+	strcpy(get_buf, "$");
+	strcat(get_buf, buf);
+	while ((ch = buf[count])) {
+		checksum += ch;
+		count++;
+	}
+	strcat(get_buf, "#");
+	get_buf[count + 2] = hexchars[checksum >> 4];
+	get_buf[count + 3] = hexchars[checksum & 0xf];
+	get_buf[count + 4] = '\0';
+	v2printk("get%i: %s\n", ts.idx, get_buf);
+}
+
+static int validate_simple_test(char *put_str)
+{
+	char *chk_str;
+
+	if (ts.tst[ts.idx].put_handler)
+		return ts.tst[ts.idx].put_handler(put_str,
+			ts.tst[ts.idx].put);
+
+	chk_str = ts.tst[ts.idx].put;
+	if (*put_str == '$')
+		put_str++;
+
+	while (*chk_str != '\0' && *put_str != '\0') {
+		/* If someone does a * to match the rest of the string, allow
+		 * it, or stop if the recieved string is complete.
+		 */
+		if (*put_str == '#' || *chk_str == '*')
+			return 0;
+		if (*put_str != *chk_str)
+			return 1;
+
+		chk_str++;
+		put_str++;
+	}
+	if (*chk_str == '\0' && (*put_str == '\0' || *put_str == '#'))
+		return 0;
+
+	return 1;
+}
+
+static int run_simple_test(int is_get_char, int chr)
+{
+	int ret = 0;
+	if (is_get_char) {
+		/* Send an ACK on the get if a prior put completed and set the
+		 * send ack variable
+		 */
+		if (send_ack) {
+			send_ack = 0;
+			return '+';
+		}
+		/* On the first get char, fill the transmit buffer and then
+		 * take from the get_string.
+		 */
+		if (get_buf_cnt == 0) {
+			if (ts.tst[ts.idx].get_handler)
+				ts.tst[ts.idx].get_handler(ts.tst[ts.idx].get);
+			else
+				fill_get_buf(ts.tst[ts.idx].get);
+		}
+
+		if (get_buf[get_buf_cnt] == '\0') {
+			eprintk("kgdbts: ERROR GET: EOB on '%s' at %i\n",
+			   ts.name, ts.idx);
+			get_buf_cnt = 0;
+			fill_get_buf("D");
+		}
+		ret = get_buf[get_buf_cnt];
+		get_buf_cnt++;
+		return ret;
+	}
+
+	/* This callback is a put char which is when kgdb sends data to
+	 * this I/O module.
+	 */
+	if (ts.tst[ts.idx].get[0] == '\0' &&
+		ts.tst[ts.idx].put[0] == '\0') {
+		eprintk("kgdbts: ERROR: beyond end of test on"
+			   " '%s' line %i\n", ts.name, ts.idx);
+		return 0;
+	}
+
+	if (put_buf_cnt >= BUFMAX) {
+		eprintk("kgdbts: ERROR: put buffer overflow on"
+			   " '%s' line %i\n", ts.name, ts.idx);
+		put_buf_cnt = 0;
+		return 0;
+	}
+	/* Ignore everything until the first valid packet start '$' */
+	if (put_buf_cnt == 0 && chr != '$')
+		return 0;
+
+	put_buf[put_buf_cnt] = chr;
+	put_buf_cnt++;
+
+	/* End of packet == #XX so look for the '#' */
+	if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') {
+		put_buf[put_buf_cnt] = '\0';
+		v2printk("put%i: %s\n", ts.idx, put_buf);
+		/* Trigger check here */
+		if (ts.validate_put && ts.validate_put(put_buf)) {
+			eprintk("kgdbts: ERROR PUT: end of test "
+			   "buffer on '%s' line %i expected %s got %s\n",
+			   ts.name, ts.idx, ts.tst[ts.idx].put, put_buf);
+		}
+		ts.idx++;
+		put_buf_cnt = 0;
+		get_buf_cnt = 0;
+		send_ack = 1;
+	}
+	return 0;
+}
+
+static void init_simple_test(void)
+{
+	memset(&ts, 0, sizeof(ts));
+	ts.run_test = run_simple_test;
+	ts.validate_put = validate_simple_test;
+}
+
+static void run_plant_and_detach_test(int is_early)
+{
+	char before[BREAK_INSTR_SIZE];
+	char after[BREAK_INSTR_SIZE];
+
+	probe_kernel_read(before, (char *)kgdbts_break_test,
+	  BREAK_INSTR_SIZE);
+	init_simple_test();
+	ts.tst = plant_and_detach_test;
+	ts.name = "plant_and_detach_test";
+	/* Activate test with initial breakpoint */
+	if (!is_early)
+		kgdb_breakpoint();
+	probe_kernel_read(after, (char *)kgdbts_break_test,
+	  BREAK_INSTR_SIZE);
+	if (memcmp(before, after, BREAK_INSTR_SIZE)) {
+		printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
+		panic("kgdb memory corruption");
+	}
+
+	/* complete the detach test */
+	if (!is_early)
+		kgdbts_break_test();
+}
+
+static void run_breakpoint_test(int is_hw_breakpoint)
+{
+	test_complete = 0;
+	init_simple_test();
+	if (is_hw_breakpoint) {
+		ts.tst = hw_breakpoint_test;
+		ts.name = "hw_breakpoint_test";
+	} else {
+		ts.tst = sw_breakpoint_test;
+		ts.name = "sw_breakpoint_test";
+	}
+	/* Activate test with initial breakpoint */
+	kgdb_breakpoint();
+	/* run code with the break point in it */
+	kgdbts_break_test();
+	kgdb_breakpoint();
+
+	if (test_complete)
+		return;
+
+	eprintk("kgdbts: ERROR %s test failed\n", ts.name);
+}
+
+static void run_hw_break_test(int is_write_test)
+{
+	test_complete = 0;
+	init_simple_test();
+	if (is_write_test) {
+		ts.tst = hw_write_break_test;
+		ts.name = "hw_write_break_test";
+	} else {
+		ts.tst = hw_access_break_test;
+		ts.name = "hw_access_break_test";
+	}
+	/* Activate test with initial breakpoint */
+	kgdb_breakpoint();
+	hw_break_val_access();
+	if (is_write_test) {
+		if (test_complete == 2)
+			eprintk("kgdbts: ERROR %s broke on access\n",
+				ts.name);
+		hw_break_val_write();
+	}
+	kgdb_breakpoint();
+
+	if (test_complete == 1)
+		return;
+
+	eprintk("kgdbts: ERROR %s test failed\n", ts.name);
+}
+
+static void run_nmi_sleep_test(int nmi_sleep)
+{
+	unsigned long flags;
+
+	init_simple_test();
+	ts.tst = nmi_sleep_test;
+	ts.name = "nmi_sleep_test";
+	/* Activate test with initial breakpoint */
+	kgdb_breakpoint();
+	local_irq_save(flags);
+	mdelay(nmi_sleep*1000);
+	touch_nmi_watchdog();
+	local_irq_restore(flags);
+	if (test_complete != 2)
+		eprintk("kgdbts: ERROR nmi_test did not hit nmi\n");
+	kgdb_breakpoint();
+	if (test_complete == 1)
+		return;
+
+	eprintk("kgdbts: ERROR %s test failed\n", ts.name);
+}
+
+static void run_bad_read_test(void)
+{
+	init_simple_test();
+	ts.tst = bad_read_test;
+	ts.name = "bad_read_test";
+	/* Activate test with initial breakpoint */
+	kgdb_breakpoint();
+}
+
+static void run_do_fork_test(void)
+{
+	init_simple_test();
+	ts.tst = do_fork_test;
+	ts.name = "do_fork_test";
+	/* Activate test with initial breakpoint */
+	kgdb_breakpoint();
+}
+
+static void run_sys_open_test(void)
+{
+	init_simple_test();
+	ts.tst = sys_open_test;
+	ts.name = "sys_open_test";
+	/* Activate test with initial breakpoint */
+	kgdb_breakpoint();
+}
+
+static void run_singlestep_break_test(void)
+{
+	init_simple_test();
+	ts.tst = singlestep_break_test;
+	ts.name = "singlestep_breakpoint_test";
+	/* Activate test with initial breakpoint */
+	kgdb_breakpoint();
+	kgdbts_break_test();
+	kgdbts_break_test();
+}
+
+static void kgdbts_run_tests(void)
+{
+	char *ptr;
+	int fork_test = 0;
+	int sys_open_test = 0;
+	int nmi_sleep = 0;
+
+	ptr = strstr(config, "F");
+	if (ptr)
+		fork_test = simple_strtol(ptr+1, NULL, 10);
+	ptr = strstr(config, "S");
+	if (ptr)
+		sys_open_test = simple_strtol(ptr+1, NULL, 10);
+	ptr = strstr(config, "N");
+	if (ptr)
+		nmi_sleep = simple_strtol(ptr+1, NULL, 10);
+
+	/* required internal KGDB tests */
+	v1printk("kgdbts:RUN plant and detach test\n");
+	run_plant_and_detach_test(0);
+	v1printk("kgdbts:RUN sw breakpoint test\n");
+	run_breakpoint_test(0);
+	v1printk("kgdbts:RUN bad memory access test\n");
+	run_bad_read_test();
+	v1printk("kgdbts:RUN singlestep breakpoint test\n");
+	run_singlestep_break_test();
+
+	/* ===Optional tests=== */
+
+	/* All HW break point tests */
+	if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
+		v1printk("kgdbts:RUN hw breakpoint test\n");
+		run_breakpoint_test(1);
+		v1printk("kgdbts:RUN hw write breakpoint test\n");
+		run_hw_break_test(1);
+		v1printk("kgdbts:RUN access write breakpoint test\n");
+		run_hw_break_test(0);
+	}
+
+	if (nmi_sleep) {
+		v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep);
+		run_nmi_sleep_test(nmi_sleep);
+	}
+
+	/* If the do_fork test is run it will be the last test that is
+	 * executed because a kernel thread will be spawned at the very
+	 * end to unregister the debug hooks.
+	 */
+	if (fork_test) {
+		repeat_test = fork_test;
+		printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n",
+			repeat_test);
+		kthread_run(kgdbts_unreg_thread, 0, "kgdbts_unreg");
+		run_do_fork_test();
+		return;
+	}
+
+	/* If the sys_open test is run it will be the last test that is
+	 * executed because a kernel thread will be spawned at the very
+	 * end to unregister the debug hooks.
+	 */
+	if (sys_open_test) {
+		repeat_test = sys_open_test;
+		printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n",
+			repeat_test);
+		kthread_run(kgdbts_unreg_thread, 0, "kgdbts_unreg");
+		run_sys_open_test();
+		return;
+	}
+	/* Shutdown and unregister */
+	kgdb_unregister_io_module(&kgdbts_io_ops);
+	configured = 0;
+}
+
+static int kgdbts_option_setup(char *opt)
+{
+	if (strlen(opt) > MAX_CONFIG_LEN) {
+		printk(KERN_ERR "kgdbts: config string too long\n");
+		return -ENOSPC;
+	}
+	strcpy(config, opt);
+
+	verbose = 0;
+	if (strstr(config, "V1"))
+		verbose = 1;
+	if (strstr(config, "V2"))
+		verbose = 2;
+
+	return 0;
+}
+
+__setup("kgdbts=", kgdbts_option_setup);
+
+static int configure_kgdbts(void)
+{
+	int err = 0;
+
+	if (!strlen(config) || isspace(config[0]))
+		goto noconfig;
+	err = kgdbts_option_setup(config);
+	if (err)
+		goto noconfig;
+
+	final_ack = 0;
+	run_plant_and_detach_test(1);
+
+	err = kgdb_register_io_module(&kgdbts_io_ops);
+	if (err) {
+		configured = 0;
+		return err;
+	}
+	configured = 1;
+	kgdbts_run_tests();
+
+	return err;
+
+noconfig:
+	config[0] = 0;
+	configured = 0;
+
+	return err;
+}
+
+static int __init init_kgdbts(void)
+{
+	/* Already configured? */
+	if (configured == 1)
+		return 0;
+
+	return configure_kgdbts();
+}
+
+static void cleanup_kgdbts(void)
+{
+	if (configured == 1)
+		kgdb_unregister_io_module(&kgdbts_io_ops);
+}
+
+static int kgdbts_get_char(void)
+{
+	int val = 0;
+
+	if (ts.run_test)
+		val = ts.run_test(1, 0);
+
+	return val;
+}
+
+static void kgdbts_put_char(u8 chr)
+{
+	if (ts.run_test)
+		ts.run_test(0, chr);
+}
+
+static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
+{
+	int len = strlen(kmessage);
+
+	if (len >= MAX_CONFIG_LEN) {
+		printk(KERN_ERR "kgdbts: config string too long\n");
+		return -ENOSPC;
+	}
+
+	/* Only copy in the string if the init function has not run yet */
+	if (configured < 0) {
+		strcpy(config, kmessage);
+		return 0;
+	}
+
+	if (kgdb_connected) {
+		printk(KERN_ERR
+	       "kgdbts: Cannot reconfigure while KGDB is connected.\n");
+
+		return -EBUSY;
+	}
+
+	strcpy(config, kmessage);
+	/* Chop out \n char as a result of echo */
+	if (config[len - 1] == '\n')
+		config[len - 1] = '\0';
+
+	if (configured == 1)
+		cleanup_kgdbts();
+
+	/* Go and configure with the new params. */
+	return configure_kgdbts();
+}
+
+static void kgdbts_pre_exp_handler(void)
+{
+	/* Increment the module count when the debugger is active */
+	if (!kgdb_connected)
+		try_module_get(THIS_MODULE);
+}
+
+static void kgdbts_post_exp_handler(void)
+{
+	/* decrement the module count when the debugger detaches */
+	if (!kgdb_connected)
+		module_put(THIS_MODULE);
+}
+
+static struct kgdb_io kgdbts_io_ops = {
+	.name			= "kgdbts",
+	.read_char		= kgdbts_get_char,
+	.write_char		= kgdbts_put_char,
+	.pre_exception		= kgdbts_pre_exp_handler,
+	.post_exception		= kgdbts_post_exp_handler,
+};
+
+module_init(init_kgdbts);
+module_exit(cleanup_kgdbts);
+module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644);
+MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]");
+MODULE_DESCRIPTION("KGDB Test Suite");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wind River Systems, Inc.");
+
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 6b32ec9..aa95287 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -69,7 +69,7 @@
 	if (readl(priv->catas_err.map)) {
 		dump_err_buf(dev);
 
-		mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
+		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
 
 		if (internal_err_reset) {
 			spin_lock(&catas_lock);
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index db49051..70dff94 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -106,7 +106,8 @@
 	u16			token;
 };
 
-static int mlx4_status_to_errno(u8 status) {
+static int mlx4_status_to_errno(u8 status)
+{
 	static const int trans_table[] = {
 		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
 		[CMD_STAT_BAD_OP]	  = -EPERM,
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index d4441fe..caa5bcf 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -38,6 +38,7 @@
 #include <linux/hardirq.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/cq.h>
 
 #include "mlx4.h"
 #include "icm.h"
@@ -47,21 +48,19 @@
 	u16			reserved1[3];
 	__be16			page_offset;
 	__be32			logsize_usrpage;
-	u8			reserved2;
-	u8			cq_period;
-	u8			reserved3;
-	u8			cq_max_count;
-	u8			reserved4[3];
+	__be16			cq_period;
+	__be16			cq_max_count;
+	u8			reserved2[3];
 	u8			comp_eqn;
 	u8			log_page_size;
-	u8			reserved5[2];
+	u8			reserved3[2];
 	u8			mtt_base_addr_h;
 	__be32			mtt_base_addr_l;
 	__be32			last_notified_index;
 	__be32			solicit_producer_index;
 	__be32			consumer_index;
 	__be32			producer_index;
-	u32			reserved6[2];
+	u32			reserved4[2];
 	__be64			db_rec_addr;
 };
 
@@ -121,6 +120,13 @@
 			MLX4_CMD_TIME_CLASS_A);
 }
 
+static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+			 int cq_num, u32 opmod)
+{
+	return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
+			MLX4_CMD_TIME_CLASS_A);
+}
+
 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			 int cq_num)
 {
@@ -129,6 +135,58 @@
 			    MLX4_CMD_TIME_CLASS_A);
 }
 
+int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
+		   u16 count, u16 period)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_cq_context *cq_context;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	cq_context = mailbox->buf;
+	memset(cq_context, 0, sizeof *cq_context);
+
+	cq_context->cq_max_count = cpu_to_be16(count);
+	cq_context->cq_period    = cpu_to_be16(period);
+
+	err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_modify);
+
+int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
+		   int entries, struct mlx4_mtt *mtt)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_cq_context *cq_context;
+	u64 mtt_addr;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	cq_context = mailbox->buf;
+	memset(cq_context, 0, sizeof *cq_context);
+
+	cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
+	cq_context->log_page_size   = mtt->page_shift - 12;
+	mtt_addr = mlx4_mtt_addr(dev, mtt);
+	cq_context->mtt_base_addr_h = mtt_addr >> 32;
+	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+
+	err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_resize);
+
 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq)
 {
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 9c36c20..e141a15 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -202,7 +202,10 @@
 			break;
 
 		case MLX4_EVENT_TYPE_PORT_CHANGE:
-			mlx4_dispatch_event(dev, eqe->type, eqe->subtype,
+			mlx4_dispatch_event(dev,
+					    eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
+					    MLX4_DEV_EVENT_PORT_UP :
+					    MLX4_DEV_EVENT_PORT_DOWN,
 					    be32_to_cpu(eqe->event.port_change.port) >> 28);
 			break;
 
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 61dc495..d82f275 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -133,6 +133,7 @@
 #define QUERY_DEV_CAP_MAX_AV_OFFSET		0x27
 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET		0x29
 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET		0x2b
+#define QUERY_DEV_CAP_MAX_GSO_OFFSET		0x2d
 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET		0x2f
 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET		0x33
 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET		0x35
@@ -215,6 +216,13 @@
 	dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
 	dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
+	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
+	field &= 0x1f;
+	if (!field)
+		dev_cap->max_gso_sz = 0;
+	else
+		dev_cap->max_gso_sz = 1 << field;
+
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
 	dev_cap->max_rdma_global = 1 << (field & 0x3f);
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
@@ -377,6 +385,7 @@
 		 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
 	mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
 		 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
+	mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
 
 	dump_dev_cap_flags(dev, dev_cap->flags);
 
@@ -696,6 +705,10 @@
 	/* Check port for UD address vector: */
 	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
 
+	/* Enable IPoIB checksumming if we can: */
+	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
+		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
+
 	/* QPC/EEC/CQC/EQC/RDMARC attributes */
 
 	MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index e16dec8..306cb9b 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -96,6 +96,7 @@
 	u8  bmme_flags;
 	u32 reserved_lkey;
 	u64 max_icm_sz;
+	int max_gso_sz;
 };
 
 struct mlx4_adapter {
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index be5d9e9..4a6c4d5 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -30,8 +30,6 @@
  * SOFTWARE.
  */
 
-#include <linux/mlx4/driver.h>
-
 #include "mlx4.h"
 
 struct mlx4_device_context {
@@ -113,8 +111,7 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
 
-void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
-			 int subtype, int port)
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_device_context *dev_ctx;
@@ -124,8 +121,7 @@
 
 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
 		if (dev_ctx->intf->event)
-			dev_ctx->intf->event(dev, dev_ctx->context, type,
-					     subtype, port);
+			dev_ctx->intf->event(dev, dev_ctx->context, type, port);
 
 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
 }
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 08bfc13..49a4aca 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -76,7 +76,7 @@
 	DRV_VERSION " (" DRV_RELDATE ")\n";
 
 static struct mlx4_profile default_profile = {
-	.num_qp		= 1 << 16,
+	.num_qp		= 1 << 17,
 	.num_srq	= 1 << 16,
 	.rdmarc_per_qp	= 1 << 4,
 	.num_cq		= 1 << 16,
@@ -159,6 +159,7 @@
 	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
 	dev->caps.flags		     = dev_cap->flags;
 	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
+	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
 
 	return 0;
 }
@@ -735,8 +736,7 @@
 	}
 
 	/*
-	 * Check for BARs.  We expect 0: 1MB, 2: 8MB, 4: DDR (may not
-	 * be present)
+	 * Check for BARs.  We expect 0: 1MB
 	 */
 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
 	    pci_resource_len(pdev, 0) != 1 << 20) {
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index a99e772..57f7f1f 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -190,10 +190,6 @@
 		}
 		index += dev->caps.num_mgms;
 
-		err = mlx4_READ_MCG(dev, index, mailbox);
-		if (err)
-			goto out;
-
 		memset(mgm, 0, sizeof *mgm);
 		memcpy(mgm->gid, gid, 16);
 	}
@@ -301,12 +297,10 @@
 	mgm->qp[loc]       = mgm->qp[i - 1];
 	mgm->qp[i - 1]     = 0;
 
-	err = mlx4_WRITE_MCG(dev, index, mailbox);
-	if (err)
+	if (i != 1) {
+		err = mlx4_WRITE_MCG(dev, index, mailbox);
 		goto out;
-
-	if (i != 1)
-		goto out;
+	}
 
 	if (prev == -1) {
 		/* Remove entry from MGM */
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 53a1cdd..7333681 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -42,6 +42,7 @@
 #include <linux/timer.h>
 
 #include <linux/mlx4/device.h>
+#include <linux/mlx4/driver.h>
 #include <linux/mlx4/doorbell.h>
 
 #define DRV_NAME	"mlx4_core"
@@ -313,8 +314,7 @@
 int mlx4_restart_one(struct pci_dev *pdev);
 int mlx4_register_device(struct mlx4_dev *dev);
 void mlx4_unregister_device(struct mlx4_dev *dev);
-void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
-			 int subtype, int port);
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port);
 
 struct mlx4_dev_cap;
 struct mlx4_init_hca_param;
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index e879b21..0788319 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -20,6 +20,7 @@
 config DASD
 	tristate "Support for DASD devices"
 	depends on CCW && BLOCK
+	select IOSCHED_DEADLINE
 	help
 	  Enable this option if you want to access DASDs directly utilizing
 	  S/390s channel subsystem commands. This is necessary for running
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ccf46c9..ac6d4d3 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -980,12 +980,12 @@
 			break;
 		case -ETIMEDOUT:
 			printk(KERN_WARNING"%s(%s): request timed out\n",
-			       __FUNCTION__, cdev->dev.bus_id);
+			       __func__, cdev->dev.bus_id);
 			//FIXME - dasd uses own timeout interface...
 			break;
 		default:
 			printk(KERN_WARNING"%s(%s): unknown error %ld\n",
-			       __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
+			       __func__, cdev->dev.bus_id, PTR_ERR(irb));
 		}
 		return;
 	}
@@ -1956,6 +1956,7 @@
 	block->request_queue->queuedata = block;
 
 	elevator_exit(block->request_queue->elevator);
+	block->request_queue->elevator = NULL;
 	rc = elevator_init(block->request_queue, "deadline");
 	if (rc) {
 		blk_cleanup_queue(block->request_queue);
@@ -2298,9 +2299,8 @@
 	 * in the other openers.
 	 */
 	if (device->block) {
-		struct dasd_block *block = device->block;
-		max_count = block->bdev ? 0 : -1;
-		open_count = (int) atomic_read(&block->open_count);
+		max_count = device->block->bdev ? 0 : -1;
+		open_count = atomic_read(&device->block->open_count);
 		if (open_count > max_count) {
 			if (open_count > 0)
 				printk(KERN_WARNING "Can't offline dasd "
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index b19db20..e6700df 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1996,6 +1996,36 @@
 }				/* end dasd_3990_erp_compound */
 
 /*
+ *DASD_3990_ERP_HANDLE_SIM
+ *
+ *DESCRIPTION
+ *  inspects the SIM SENSE data and starts an appropriate action
+ *
+ * PARAMETER
+ *   sense	   sense data of the actual error
+ *
+ * RETURN VALUES
+ *   none
+ */
+void
+dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
+{
+	/* print message according to log or message to operator mode */
+	if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
+
+		/* print SIM SRC from RefCode */
+		DEV_MESSAGE(KERN_ERR, device, "SIM - SRC: "
+			    "%02x%02x%02x%02x", sense[22],
+			    sense[23], sense[11], sense[12]);
+	} else if (sense[24] & DASD_SIM_LOG) {
+		/* print SIM SRC Refcode */
+		DEV_MESSAGE(KERN_WARNING, device, "SIM - SRC: "
+			    "%02x%02x%02x%02x", sense[22],
+			    sense[23], sense[11], sense[12]);
+	}
+}
+
+/*
  * DASD_3990_ERP_INSPECT_32
  *
  * DESCRIPTION
@@ -2018,6 +2048,10 @@
 
 	erp->function = dasd_3990_erp_inspect_32;
 
+	/* check for SIM sense data */
+	if ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)
+		dasd_3990_erp_handle_sim(device, sense);
+
 	if (sense[25] & DASD_SENSE_BIT_0) {
 
 		/* compound program action codes (byte25 bit 0 == '1') */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 3a40bee..2d8df0b 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -745,6 +745,19 @@
 	spin_unlock_irqrestore(&lcu->lock, flags);
 }
 
+static void __stop_device_on_lcu(struct dasd_device *device,
+				 struct dasd_device *pos)
+{
+	/* If pos == device then device is already locked! */
+	if (pos == device) {
+		pos->stopped |= DASD_STOPPED_SU;
+		return;
+	}
+	spin_lock(get_ccwdev_lock(pos->cdev));
+	pos->stopped |= DASD_STOPPED_SU;
+	spin_unlock(get_ccwdev_lock(pos->cdev));
+}
+
 /*
  * This function is called in interrupt context, so the
  * cdev lock for device is already locked!
@@ -755,35 +768,15 @@
 	struct alias_pav_group *pavgroup;
 	struct dasd_device *pos;
 
-	list_for_each_entry(pos, &lcu->active_devices, alias_list) {
-		if (pos != device)
-			spin_lock(get_ccwdev_lock(pos->cdev));
-		pos->stopped |= DASD_STOPPED_SU;
-		if (pos != device)
-			spin_unlock(get_ccwdev_lock(pos->cdev));
-	}
-	list_for_each_entry(pos, &lcu->inactive_devices, alias_list) {
-		if (pos != device)
-			spin_lock(get_ccwdev_lock(pos->cdev));
-		pos->stopped |= DASD_STOPPED_SU;
-		if (pos != device)
-			spin_unlock(get_ccwdev_lock(pos->cdev));
-	}
+	list_for_each_entry(pos, &lcu->active_devices, alias_list)
+		__stop_device_on_lcu(device, pos);
+	list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
+		__stop_device_on_lcu(device, pos);
 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-		list_for_each_entry(pos, &pavgroup->baselist, alias_list) {
-			if (pos != device)
-				spin_lock(get_ccwdev_lock(pos->cdev));
-			pos->stopped |= DASD_STOPPED_SU;
-			if (pos != device)
-				spin_unlock(get_ccwdev_lock(pos->cdev));
-		}
-		list_for_each_entry(pos, &pavgroup->aliaslist, alias_list) {
-			if (pos != device)
-				spin_lock(get_ccwdev_lock(pos->cdev));
-			pos->stopped |= DASD_STOPPED_SU;
-			if (pos != device)
-				spin_unlock(get_ccwdev_lock(pos->cdev));
-		}
+		list_for_each_entry(pos, &pavgroup->baselist, alias_list)
+			__stop_device_on_lcu(device, pos);
+		list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
+			__stop_device_on_lcu(device, pos);
 	}
 }
 
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 61f1693..a0edae0 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1415,6 +1415,13 @@
 		return;
 	}
 
+
+	/* service information message SIM */
+	if ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE) {
+		dasd_3990_erp_handle_sim(device, irb->ecw);
+		return;
+	}
+
 	/* just report other unsolicited interrupts */
 	DEV_MESSAGE(KERN_DEBUG, device, "%s",
 		    "unsolicited interrupt received");
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index d13ea05..1166115 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -125,7 +125,8 @@
 
 	private = (struct dasd_fba_private *) device->private;
 	if (private == NULL) {
-		private = kzalloc(sizeof(struct dasd_fba_private), GFP_KERNEL);
+		private = kzalloc(sizeof(struct dasd_fba_private),
+				  GFP_KERNEL | GFP_DMA);
 		if (private == NULL) {
 			DEV_MESSAGE(KERN_WARNING, device, "%s",
 				    "memory allocation failed for private "
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 44b2984..6c624bf 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -72,6 +72,11 @@
 #define DASD_SENSE_BIT_2 0x20
 #define DASD_SENSE_BIT_3 0x10
 
+/* BIT DEFINITIONS FOR SIM SENSE */
+#define DASD_SIM_SENSE 0x0F
+#define DASD_SIM_MSG_TO_OP 0x03
+#define DASD_SIM_LOG 0x0C
+
 /*
  * SECTION: MACROs for klogd and s390 debug feature (dbf)
  */
@@ -621,6 +626,7 @@
 
 /* externals in dasd_3990_erp.c */
 struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
+void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
 
 /* externals in dasd_eer.c */
 #ifdef CONFIG_DASD_EER
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index e6c94db..04787ea 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -142,57 +142,6 @@
 	return NULL;
 }
 
-/*
- * print appropriate error message for segment_load()/segment_type()
- * return code
- */
-static void
-dcssblk_segment_warn(int rc, char* seg_name)
-{
-	switch (rc) {
-	case -ENOENT:
-		PRINT_WARN("cannot load/query segment %s, does not exist\n",
-			   seg_name);
-		break;
-	case -ENOSYS:
-		PRINT_WARN("cannot load/query segment %s, not running on VM\n",
-			   seg_name);
-		break;
-	case -EIO:
-		PRINT_WARN("cannot load/query segment %s, hardware error\n",
-			   seg_name);
-		break;
-	case -ENOTSUPP:
-		PRINT_WARN("cannot load/query segment %s, is a multi-part "
-			   "segment\n", seg_name);
-		break;
-	case -ENOSPC:
-		PRINT_WARN("cannot load/query segment %s, overlaps with "
-			   "storage\n", seg_name);
-		break;
-	case -EBUSY:
-		PRINT_WARN("cannot load/query segment %s, overlaps with "
-			   "already loaded dcss\n", seg_name);
-		break;
-	case -EPERM:
-		PRINT_WARN("cannot load/query segment %s, already loaded in "
-			   "incompatible mode\n", seg_name);
-		break;
-	case -ENOMEM:
-		PRINT_WARN("cannot load/query segment %s, out of memory\n",
-			   seg_name);
-		break;
-	case -ERANGE:
-		PRINT_WARN("cannot load/query segment %s, exceeds kernel "
-			   "mapping range\n", seg_name);
-		break;
-	default:
-		PRINT_WARN("cannot load/query segment %s, return value %i\n",
-			   seg_name, rc);
-		break;
-	}
-}
-
 static void dcssblk_unregister_callback(struct device *dev)
 {
 	device_unregister(dev);
@@ -423,7 +372,7 @@
 	rc = segment_load(local_buf, SEGMENT_SHARED,
 				&dev_info->start, &dev_info->end);
 	if (rc < 0) {
-		dcssblk_segment_warn(rc, dev_info->segment_name);
+		segment_warning(rc, dev_info->segment_name);
 		goto dealloc_gendisk;
 	}
 	seg_byte_size = (dev_info->end - dev_info->start + 1);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 67009bf..1e1f506 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -111,56 +111,6 @@
 	ASCEBC(ebcdic_name, 8);
 }
 
-/*
- * print appropriate error message for segment_load()/segment_type()
- * return code
- */
-static void mon_segment_warn(int rc, char* seg_name)
-{
-	switch (rc) {
-	case -ENOENT:
-		P_WARNING("cannot load/query segment %s, does not exist\n",
-			  seg_name);
-		break;
-	case -ENOSYS:
-		P_WARNING("cannot load/query segment %s, not running on VM\n",
-			  seg_name);
-		break;
-	case -EIO:
-		P_WARNING("cannot load/query segment %s, hardware error\n",
-			  seg_name);
-		break;
-	case -ENOTSUPP:
-		P_WARNING("cannot load/query segment %s, is a multi-part "
-			  "segment\n", seg_name);
-		break;
-	case -ENOSPC:
-		P_WARNING("cannot load/query segment %s, overlaps with "
-			  "storage\n", seg_name);
-		break;
-	case -EBUSY:
-		P_WARNING("cannot load/query segment %s, overlaps with "
-			  "already loaded dcss\n", seg_name);
-		break;
-	case -EPERM:
-		P_WARNING("cannot load/query segment %s, already loaded in "
-			  "incompatible mode\n", seg_name);
-		break;
-	case -ENOMEM:
-		P_WARNING("cannot load/query segment %s, out of memory\n",
-			  seg_name);
-		break;
-	case -ERANGE:
-		P_WARNING("cannot load/query segment %s, exceeds kernel "
-			  "mapping range\n", seg_name);
-		break;
-	default:
-		P_WARNING("cannot load/query segment %s, return value %i\n",
-			  seg_name, rc);
-		break;
-	}
-}
-
 static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
 {
 	return *(u32 *) &monmsg->msg.rmmsg;
@@ -585,7 +535,7 @@
 
 	rc = segment_type(mon_dcss_name);
 	if (rc < 0) {
-		mon_segment_warn(rc, mon_dcss_name);
+		segment_warning(rc, mon_dcss_name);
 		goto out_iucv;
 	}
 	if (rc != SEG_TYPE_SC) {
@@ -598,7 +548,7 @@
 	rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
 			  &mon_dcss_start, &mon_dcss_end);
 	if (rc < 0) {
-		mon_segment_warn(rc, mon_dcss_name);
+		segment_warning(rc, mon_dcss_name);
 		rc = -EINVAL;
 		goto out_iucv;
 	}
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 2e616e3..e3b3d39 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -332,7 +332,7 @@
 		if (sclp_ttybuf == NULL) {
 			while (list_empty(&sclp_tty_pages)) {
 				spin_unlock_irqrestore(&sclp_tty_lock, flags);
-				if (in_atomic())
+				if (in_interrupt())
 					sclp_sync_wait();
 				else
 					wait_event(sclp_tty_waitq,
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f7b258d..ed50759 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -383,7 +383,7 @@
  */
 static int
 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
-		   int convertlf)
+		   int convertlf, int may_schedule)
 {
 	unsigned long flags;
 	void *page;
@@ -398,9 +398,8 @@
 		/* Create a sclp output buffer if none exists yet */
 		if (sclp_vt220_current_request == NULL) {
 			while (list_empty(&sclp_vt220_empty)) {
-				spin_unlock_irqrestore(&sclp_vt220_lock,
-						       flags);
-				if (in_atomic())
+				spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+				if (in_interrupt() || !may_schedule)
 					sclp_sync_wait();
 				else
 					wait_event(sclp_vt220_waitq,
@@ -450,7 +449,7 @@
 static int
 sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
-	return __sclp_vt220_write(buf, count, 1, 0);
+	return __sclp_vt220_write(buf, count, 1, 0, 1);
 }
 
 #define SCLP_VT220_SESSION_ENDED	0x01
@@ -529,7 +528,7 @@
 static void
 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
 {
-	__sclp_vt220_write(&ch, 1, 0, 0);
+	__sclp_vt220_write(&ch, 1, 0, 0, 1);
 }
 
 /*
@@ -746,7 +745,7 @@
 static void
 sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
 {
-	__sclp_vt220_write((const unsigned char *) buf, count, 1, 1);
+	__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
 }
 
 static struct tty_driver *
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5b47e9c..874adf3 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -394,7 +394,7 @@
 			return tape_34xx_erp_failed(request, -ENOSPC);
 		default:
 			PRINT_ERR("Invalid op in %s:%i\n",
-				  __FUNCTION__, __LINE__);
+				  __func__, __LINE__);
 			return tape_34xx_erp_failed(request, 0);
 		}
 	}
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index b830a8c..ebe8406 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -83,9 +83,9 @@
 void
 tapechar_cleanup_device(struct tape_device *device)
 {
-	unregister_tape_dev(device->rt);
+	unregister_tape_dev(&device->cdev->dev, device->rt);
 	device->rt = NULL;
-	unregister_tape_dev(device->nt);
+	unregister_tape_dev(&device->cdev->dev, device->nt);
 	device->nt = NULL;
 }
 
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index aa7f166..6dfdb7c 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -99,11 +99,10 @@
 }
 EXPORT_SYMBOL(register_tape_dev);
 
-void unregister_tape_dev(struct tape_class_device *tcd)
+void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
 {
 	if (tcd != NULL && !IS_ERR(tcd)) {
-		sysfs_remove_link(&tcd->class_device->kobj,
-				  tcd->mode_name);
+		sysfs_remove_link(&device->kobj, tcd->mode_name);
 		device_destroy(tape_class, tcd->char_device->dev);
 		cdev_del(tcd->char_device);
 		kfree(tcd);
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index e2b5ac9..707b7f4 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -56,6 +56,6 @@
 	char *			device_name,
 	char *			node_name
 );
-void unregister_tape_dev(struct tape_class_device *tcd);
+void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
 
 #endif /* __TAPE_CLASS_H__ */
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 7689b50..83ae9a8 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -100,7 +100,8 @@
 	urd->reclen = cdev->id.driver_info;
 	ccw_device_get_id(cdev, &urd->dev_id);
 	mutex_init(&urd->io_mutex);
-	mutex_init(&urd->open_mutex);
+	init_waitqueue_head(&urd->wait);
+	spin_lock_init(&urd->open_lock);
 	atomic_set(&urd->ref_count,  1);
 	urd->cdev = cdev;
 	get_device(&cdev->dev);
@@ -678,17 +679,21 @@
 	if (!urd)
 		return -ENXIO;
 
-	if (file->f_flags & O_NONBLOCK) {
-		if (!mutex_trylock(&urd->open_mutex)) {
+	spin_lock(&urd->open_lock);
+	while (urd->open_flag) {
+		spin_unlock(&urd->open_lock);
+		if (file->f_flags & O_NONBLOCK) {
 			rc = -EBUSY;
 			goto fail_put;
 		}
-	} else {
-		if (mutex_lock_interruptible(&urd->open_mutex)) {
+		if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
 			rc = -ERESTARTSYS;
 			goto fail_put;
 		}
+		spin_lock(&urd->open_lock);
 	}
+	urd->open_flag++;
+	spin_unlock(&urd->open_lock);
 
 	TRACE("ur_open\n");
 
@@ -720,7 +725,9 @@
 fail_urfile_free:
 	urfile_free(urf);
 fail_unlock:
-	mutex_unlock(&urd->open_mutex);
+	spin_lock(&urd->open_lock);
+	urd->open_flag--;
+	spin_unlock(&urd->open_lock);
 fail_put:
 	urdev_put(urd);
 	return rc;
@@ -731,7 +738,10 @@
 	struct urfile *urf = file->private_data;
 
 	TRACE("ur_release\n");
-	mutex_unlock(&urf->urd->open_mutex);
+	spin_lock(&urf->urd->open_lock);
+	urf->urd->open_flag--;
+	spin_unlock(&urf->urd->open_lock);
+	wake_up_interruptible(&urf->urd->wait);
 	urdev_put(urf->urd);
 	urfile_free(urf);
 	return 0;
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index fa95964..fa320ad 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -62,7 +62,6 @@
 struct urdev {
 	struct ccw_device *cdev;	/* Backpointer to ccw device */
 	struct mutex io_mutex;		/* Serialises device IO */
-	struct mutex open_mutex;	/* Serialises access to device */
 	struct completion *io_done;	/* do_ur_io waits; irq completes */
 	struct device *device;
 	struct cdev *char_device;
@@ -71,6 +70,9 @@
 	int class;			/* VM device class */
 	int io_request_rc;		/* return code from I/O request */
 	atomic_t ref_count;		/* reference counter */
+	wait_queue_head_t wait;		/* wait queue to serialize open */
+	int open_flag;			/* "urdev is open" flag */
+	spinlock_t open_lock;		/* serialize critical sections */
 };
 
 /*
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 6f40fac..19f8389 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -96,7 +96,7 @@
 
 	if (ret) {
 		printk(KERN_WARNING "%s: problem setting interval %d, "
-			"cmd %s\n", __FUNCTION__, vmwdt_interval,
+			"cmd %s\n", __func__, vmwdt_interval,
 			vmwdt_cmd);
 	}
 	return ret;
@@ -107,7 +107,7 @@
 	int ret = __diag288(wdt_cancel, 0, "", 0);
 	if (ret) {
 		printk(KERN_WARNING "%s: problem disabling watchdog\n",
-			__FUNCTION__);
+			__func__);
 	}
 	return ret;
 }
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index f523501..bbbd14e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -224,7 +224,7 @@
 
 	sa = kmalloc(sizeof(*sa), GFP_KERNEL);
 	if (!sa) {
-		ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__);
+		ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__);
 		return -ENOMEM;
 	}
 	if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 007aaeb..5de8690 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -217,6 +217,8 @@
 
 	if (chp_get_status(chpid) <= 0)
 		return;
+	/* Wait until previous actions have settled. */
+	css_wait_for_slow_path();
 	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
 }
 
@@ -303,7 +305,8 @@
 		sprintf(dbf_txt, "fla%x", res_data->fla);
 		CIO_TRACE_EVENT( 2, dbf_txt);
 	}
-
+	/* Wait until previous actions have settled. */
+	css_wait_for_slow_path();
 	/*
 	 * I/O resources may have become accessible.
 	 * Scan through all subchannels that may be concerned and
@@ -561,9 +564,12 @@
 	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
 	CIO_TRACE_EVENT(2, dbf_txt);
 
-	if (chp_get_status(chpid) != 0)
+	if (chp_get_status(chpid) != 0) {
+		/* Wait until previous actions have settled. */
+		css_wait_for_slow_path();
 		for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
 					   &chpid);
+	}
 }
 
 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
@@ -650,6 +656,8 @@
  */
 int chsc_chp_vary(struct chp_id chpid, int on)
 {
+	/* Wait until previous actions have settled. */
+	css_wait_for_slow_path();
 	/*
 	 * Redo PathVerification on the devices the chpid connects to
 	 */
@@ -758,7 +766,6 @@
 	if (!secm_area)
 		return -ENOMEM;
 
-	mutex_lock(&css->mutex);
 	if (enable && !css->cm_enabled) {
 		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
@@ -766,7 +773,6 @@
 			free_page((unsigned long)css->cub_addr1);
 			free_page((unsigned long)css->cub_addr2);
 			free_page((unsigned long)secm_area);
-			mutex_unlock(&css->mutex);
 			return -ENOMEM;
 		}
 	}
@@ -787,7 +793,6 @@
 		free_page((unsigned long)css->cub_addr1);
 		free_page((unsigned long)css->cub_addr2);
 	}
-	mutex_unlock(&css->mutex);
 	free_page((unsigned long)secm_area);
 	return ret;
 }
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 60590a1..23ffcc4 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -24,6 +24,7 @@
 #include <asm/ipl.h>
 #include <asm/chpid.h>
 #include <asm/airq.h>
+#include <asm/cpu.h>
 #include "cio.h"
 #include "css.h"
 #include "chsc.h"
@@ -649,13 +650,10 @@
 
 	old_regs = set_irq_regs(regs);
 	irq_enter();
-	asm volatile ("mc 0,0");
-	if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
-		/**
-		 * Make sure that the i/o interrupt did not "overtake"
-		 * the last HZ timer interrupt.
-		 */
-		account_ticks(S390_lowcore.int_clock);
+	s390_idle_check();
+	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+		/* Serve timer interrupts first. */
+		clock_comparator_work();
 	/*
 	 * Get interrupt information from lowcore
 	 */
@@ -672,10 +670,14 @@
 			continue;
 		}
 		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
-		if (sch)
-			spin_lock(sch->lock);
+		if (!sch) {
+			/* Clear pending interrupt condition. */
+			tsch(tpi_info->schid, irb);
+			continue;
+		}
+		spin_lock(sch->lock);
 		/* Store interrupt response block to lowcore. */
-		if (tsch (tpi_info->schid, irb) == 0 && sch) {
+		if (tsch(tpi_info->schid, irb) == 0) {
 			/* Keep subchannel information word up to date. */
 			memcpy (&sch->schib.scsw, &irb->scsw,
 				sizeof (irb->scsw));
@@ -683,8 +685,7 @@
 			if (sch->driver && sch->driver->irq)
 				sch->driver->irq(sch);
 		}
-		if (sch)
-			spin_unlock(sch->lock);
+		spin_unlock(sch->lock);
 		/*
 		 * Are more interrupts pending?
 		 * If so, the tpi instruction will update the lowcore
@@ -710,8 +711,9 @@
 /*
  * busy wait for the next interrupt on the console
  */
-void
-wait_cons_dev (void)
+void wait_cons_dev(void)
+	__releases(console_subchannel.lock)
+	__acquires(console_subchannel.lock)
 {
 	unsigned long cr6      __attribute__ ((aligned (8)));
 	unsigned long save_cr6 __attribute__ ((aligned (8)));
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 52afa4c..08f2235 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -100,6 +100,7 @@
 
 int cio_create_sch_lock(struct subchannel *);
 void do_adapter_IO(void);
+void do_IRQ(struct pt_regs *);
 
 /* Use with care. */
 #ifdef CONFIG_CCW_CONSOLE
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3b45bbe..c1afab5 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -533,6 +533,12 @@
 	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 }
 
+void css_wait_for_slow_path(void)
+{
+	flush_workqueue(ccw_device_notify_work);
+	flush_workqueue(slow_path_wq);
+}
+
 /* Reprobe subchannel if unregistered. */
 static int reprobe_subchannel(struct subchannel_id schid, void *data)
 {
@@ -683,10 +689,14 @@
 		   char *buf)
 {
 	struct channel_subsystem *css = to_css(dev);
+	int ret;
 
 	if (!css)
 		return 0;
-	return sprintf(buf, "%x\n", css->cm_enabled);
+	mutex_lock(&css->mutex);
+	ret = sprintf(buf, "%x\n", css->cm_enabled);
+	mutex_unlock(&css->mutex);
+	return ret;
 }
 
 static ssize_t
@@ -696,6 +706,7 @@
 	struct channel_subsystem *css = to_css(dev);
 	int ret;
 
+	mutex_lock(&css->mutex);
 	switch (buf[0]) {
 	case '0':
 		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
@@ -706,6 +717,7 @@
 	default:
 		ret = -EINVAL;
 	}
+	mutex_unlock(&css->mutex);
 	return ret < 0 ? ret : count;
 }
 
@@ -752,9 +764,11 @@
 		struct channel_subsystem *css;
 
 		css = channel_subsystems[i];
+		mutex_lock(&css->mutex);
 		if (css->cm_enabled)
 			if (chsc_secm(css, 0))
 				ret = NOTIFY_BAD;
+		mutex_unlock(&css->mutex);
 	}
 
 	return ret;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index b705545..e191351 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -144,6 +144,7 @@
 int css_sch_is_valid(struct schib *);
 
 extern struct workqueue_struct *slow_path_wq;
+void css_wait_for_slow_path(void);
 
 extern struct attribute_group *subch_attr_groups[];
 #endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index fec004f..e0c7adb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -577,7 +577,6 @@
 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
 static DEVICE_ATTR(online, 0644, online_show, online_store);
-extern struct device_attribute dev_attr_cmb_enable;
 static DEVICE_ATTR(availability, 0444, available_show, NULL);
 
 static struct attribute * subch_attrs[] = {
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index d40a2ff..cb08092 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -127,4 +127,5 @@
 void retry_set_schib(struct ccw_device *cdev);
 void cmf_retry_copy_block(struct ccw_device *);
 int cmf_reenable(struct ccw_device *);
+extern struct device_attribute dev_attr_cmb_enable;
 #endif
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 49b58eb..a1718a0 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -193,8 +193,15 @@
 			return -EACCES;
 	}
 	ret = cio_start_key (sch, cpa, lpm, key);
-	if (ret == 0)
+	switch (ret) {
+	case 0:
 		cdev->private->intparm = intparm;
+		break;
+	case -EACCES:
+	case -ENODEV:
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+		break;
+	}
 	return ret;
 }
 
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index ebe0848..4a38993 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -62,7 +62,7 @@
 	stsch (sch->schid, &sch->schib);
 
 	CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
-		      "not operational \n", __FUNCTION__,
+		      "not operational \n", __func__,
 		      sch->schid.ssid, sch->schid.sch_no,
 		      sch->schib.pmcw.pnom);
 
@@ -312,6 +312,7 @@
 {
 	struct subchannel *sch;
 	struct ccw1 *sense_ccw;
+	int rc;
 
 	sch = to_subchannel(cdev->dev.parent);
 
@@ -337,7 +338,10 @@
 	/* Reset internal retry indication. */
 	cdev->private->flags.intretry = 0;
 
-	return cio_start(sch, sense_ccw, 0xff);
+	rc = cio_start(sch, sense_ccw, 0xff);
+	if (rc == -ENODEV || rc == -EACCES)
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+	return rc;
 }
 
 /*
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 2b5bfb7..c359386 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -1399,7 +1399,7 @@
 	 * q->dev_st_chg_ind is the indicator, be it shared or not.
 	 * only clear it, if indicator is non-shared
 	 */
-	if (!spare_ind_was_set)
+	if (q->dev_st_chg_ind != &spare_indicator)
 		tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
 
 	if (q->hydra_gives_outbound_pcis) {
@@ -2217,9 +2217,78 @@
 	return cc;
 }
 
+static int
+qdio_get_ssqd_information(struct subchannel_id *schid,
+			  struct qdio_chsc_ssqd **ssqd_area)
+{
+	int result;
+
+	QDIO_DBF_TEXT0(0, setup, "getssqd");
+	*ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
+	if (!ssqd_area) {
+		QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
+				schid->sch_no);
+		return -ENOMEM;
+	}
+
+	(*ssqd_area)->request = (struct chsc_header) {
+		.length = 0x0010,
+		.code	= 0x0024,
+	};
+	(*ssqd_area)->first_sch = schid->sch_no;
+	(*ssqd_area)->last_sch = schid->sch_no;
+	(*ssqd_area)->ssid = schid->ssid;
+	result = chsc(*ssqd_area);
+
+	if (result) {
+		QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
+				result, schid->ssid, schid->sch_no);
+		goto out;
+	}
+
+	if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
+		QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
+				(*ssqd_area)->response.code,
+				schid->ssid, schid->sch_no);
+		goto out;
+	}
+	if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
+	    !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
+	    ((*ssqd_area)->sch != schid->sch_no)) {
+		QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
+				"using all SIGAs.\n",
+				schid->ssid, schid->sch_no);
+		goto out;
+	}
+	return 0;
+out:
+	return -EINVAL;
+}
+
+int
+qdio_get_ssqd_pct(struct ccw_device *cdev)
+{
+	struct qdio_chsc_ssqd *ssqd_area;
+	struct subchannel_id schid;
+	char dbf_text[15];
+	int rc;
+	int pct = 0;
+
+	QDIO_DBF_TEXT0(0, setup, "getpct");
+	schid = ccw_device_get_subchannel_id(cdev);
+	rc = qdio_get_ssqd_information(&schid, &ssqd_area);
+	if (!rc)
+		pct = (int)ssqd_area->pct;
+	if (rc != -ENOMEM)
+		mempool_free(ssqd_area, qdio_mempool_scssc);
+	sprintf(dbf_text, "pct: %d", pct);
+	QDIO_DBF_TEXT2(0, setup, dbf_text);
+	return pct;
+}
+EXPORT_SYMBOL(qdio_get_ssqd_pct);
+
 static void
-qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
-			    unsigned long token)
+qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
 {
 	struct qdio_q *q;
 	int i;
@@ -2227,7 +2296,7 @@
 	char dbf_text[15];
 
 	/*check if QEBSM is disabled */
-	if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
+	if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
 		irq_ptr->is_qebsm  = 0;
 		irq_ptr->sch_token = 0;
 		irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
@@ -2256,102 +2325,27 @@
 }
 
 static void
-qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
+qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
 {
-	int result;
-	unsigned char qdioac;
-	struct {
-		struct chsc_header request;
-		u16 reserved1:10;
-		u16 ssid:2;
-		u16 fmt:4;
-		u16 first_sch;
-		u16 reserved2;
-		u16 last_sch;
-		u32 reserved3;
-		struct chsc_header response;
-		u32 reserved4;
-		u8  flags;
-		u8  reserved5;
-		u16 sch;
-		u8  qfmt;
-		u8  parm;
-		u8  qdioac1;
-		u8  sch_class;
-		u8  reserved7;
-		u8  icnt;
-		u8  reserved8;
-		u8  ocnt;
-		u8 reserved9;
-		u8 mbccnt;
-		u16 qdioac2;
-		u64 sch_token;
-	} *ssqd_area;
+	int rc;
+	struct qdio_chsc_ssqd *ssqd_area;
 
 	QDIO_DBF_TEXT0(0,setup,"getssqd");
-	qdioac = 0;
-	ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
-	if (!ssqd_area) {
-	        QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
-				"SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
+	irq_ptr->qdioac = 0;
+	rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
+	if (rc) {
+		QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
+			irq_ptr->schid.sch_no);
 		irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
 				  CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
 				  CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
 		irq_ptr->is_qebsm = 0;
-		irq_ptr->sch_token = 0;
-		irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
-		return;
-	}
+	} else
+		irq_ptr->qdioac = ssqd_area->qdioac1;
 
-	ssqd_area->request = (struct chsc_header) {
-		.length = 0x0010,
-		.code   = 0x0024,
-	};
-	ssqd_area->first_sch = irq_ptr->schid.sch_no;
-	ssqd_area->last_sch = irq_ptr->schid.sch_no;
-	ssqd_area->ssid = irq_ptr->schid.ssid;
-	result = chsc(ssqd_area);
-
-	if (result) {
-		QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
-				"SIGAs for sch 0.%x.%x.\n", result,
-				irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
-		qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
-			CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
-			CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
-		irq_ptr->is_qebsm  = 0;
-		goto out;
-	}
-
-	if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
-		QDIO_PRINT_WARN("response upon checking SIGA needs " \
-				"is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
-				ssqd_area->response.code,
-				irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
-		qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
-			CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
-			CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
-		irq_ptr->is_qebsm  = 0;
-		goto out;
-	}
-	if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
-	    !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
-	    (ssqd_area->sch != irq_ptr->schid.sch_no)) {
-		QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
-				"using all SIGAs.\n",
-				irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
-		qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
-			CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
-			CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
-		irq_ptr->is_qebsm  = 0;
-		goto out;
-	}
-	qdioac = ssqd_area->qdioac1;
-out:
-	qdio_check_subchannel_qebsm(irq_ptr, qdioac,
-				    ssqd_area->sch_token);
-	mempool_free(ssqd_area, qdio_mempool_scssc);
-	irq_ptr->qdioac = qdioac;
+	qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
+	if (rc != -ENOMEM)
+		mempool_free(ssqd_area, qdio_mempool_scssc);
 }
 
 static unsigned int
@@ -3227,7 +3221,7 @@
 		return -EIO;
 	}
 
-	qdio_get_ssqd_information(irq_ptr);
+	qdio_get_ssqd_siga(irq_ptr);
 	/* if this gets set once, we're running under VM and can omit SVSes */
 	if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
 		omit_svs=1;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index da8a272..c3df6b2 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -406,6 +406,34 @@
 #define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
 #define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
 
+struct qdio_chsc_ssqd {
+	struct chsc_header request;
+	u16 reserved1:10;
+	u16 ssid:2;
+	u16 fmt:4;
+	u16 first_sch;
+	u16 reserved2;
+	u16 last_sch;
+	u32 reserved3;
+	struct chsc_header response;
+	u32 reserved4;
+	u8  flags;
+	u8  reserved5;
+	u16 sch;
+	u8  qfmt;
+	u8  parm;
+	u8  qdioac1;
+	u8  sch_class;
+	u8  pct;
+	u8  icnt;
+	u8  reserved7;
+	u8  ocnt;
+	u8  reserved8;
+	u8  mbccnt;
+	u16 qdioac2;
+	u64 sch_token;
+};
+
 struct qdio_perf_stats {
 #ifdef CONFIG_64BIT
 	atomic64_t tl_runs;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 7b0b819..a1ab3e3 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -45,7 +45,7 @@
 static void ap_poll_thread_stop(void);
 static void ap_request_timeout(unsigned long);
 
-/**
+/*
  * Module description.
  */
 MODULE_AUTHOR("IBM Corporation");
@@ -53,7 +53,7 @@
 		   "Copyright 2006 IBM Corporation");
 MODULE_LICENSE("GPL");
 
-/**
+/*
  * Module parameter
  */
 int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
@@ -69,7 +69,7 @@
 static DEFINE_SPINLOCK(ap_device_lock);
 static LIST_HEAD(ap_device_list);
 
-/**
+/*
  * Workqueue & timer for bus rescan.
  */
 static struct workqueue_struct *ap_work_queue;
@@ -77,7 +77,7 @@
 static int ap_config_time = AP_CONFIG_TIME;
 static DECLARE_WORK(ap_config_work, ap_scan_bus);
 
-/**
+/*
  * Tasklet & timer for AP request polling.
  */
 static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
@@ -88,9 +88,9 @@
 static DEFINE_MUTEX(ap_poll_thread_mutex);
 
 /**
- * Test if ap instructions are available.
+ * ap_intructions_available() - Test if AP instructions are available.
  *
- * Returns 0 if the ap instructions are installed.
+ * Returns 0 if the AP instructions are installed.
  */
 static inline int ap_instructions_available(void)
 {
@@ -108,12 +108,12 @@
 }
 
 /**
- * Test adjunct processor queue.
- * @qid: the ap queue number
- * @queue_depth: pointer to queue depth value
- * @device_type: pointer to device type value
+ * ap_test_queue(): Test adjunct processor queue.
+ * @qid: The AP queue number
+ * @queue_depth: Pointer to queue depth value
+ * @device_type: Pointer to device type value
  *
- * Returns ap queue status structure.
+ * Returns AP queue status structure.
  */
 static inline struct ap_queue_status
 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
@@ -130,10 +130,10 @@
 }
 
 /**
- * Reset adjunct processor queue.
- * @qid: the ap queue number
+ * ap_reset_queue(): Reset adjunct processor queue.
+ * @qid: The AP queue number
  *
- * Returns ap queue status structure.
+ * Returns AP queue status structure.
  */
 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
 {
@@ -148,16 +148,14 @@
 }
 
 /**
- * Send message to adjunct processor queue.
- * @qid: the ap queue number
- * @psmid: the program supplied message identifier
- * @msg: the message text
- * @length: the message length
+ * __ap_send(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
  *
- * Returns ap queue status structure.
- *
+ * Returns AP queue status structure.
  * Condition code 1 on NQAP can't happen because the L bit is 1.
- *
  * Condition code 2 on NQAP also means the send is incomplete,
  * because a segment boundary was reached. The NQAP is repeated.
  */
@@ -198,23 +196,20 @@
 }
 EXPORT_SYMBOL(ap_send);
 
-/*
- * Receive message from adjunct processor queue.
- * @qid: the ap queue number
- * @psmid: pointer to program supplied message identifier
- * @msg: the message text
- * @length: the message length
+/**
+ * __ap_recv(): Receive message from adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: Pointer to program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
  *
- * Returns ap queue status structure.
- *
+ * Returns AP queue status structure.
  * Condition code 1 on DQAP means the receive has taken place
  * but only partially.	The response is incomplete, hence the
  * DQAP is repeated.
- *
  * Condition code 2 on DQAP also means the receive is incomplete,
  * this time because a segment boundary was reached. Again, the
  * DQAP is repeated.
- *
  * Note that gpr2 is used by the DQAP instruction to keep track of
  * any 'residual' length, in case the instruction gets interrupted.
  * Hence it gets zeroed before the instruction.
@@ -263,11 +258,12 @@
 EXPORT_SYMBOL(ap_recv);
 
 /**
- * Check if an AP queue is available. The test is repeated for
- * AP_MAX_RESET times.
- * @qid: the ap queue number
- * @queue_depth: pointer to queue depth value
- * @device_type: pointer to device type value
+ * ap_query_queue(): Check if an AP queue is available.
+ * @qid: The AP queue number
+ * @queue_depth: Pointer to queue depth value
+ * @device_type: Pointer to device type value
+ *
+ * The test is repeated for AP_MAX_RESET times.
  */
 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
 {
@@ -308,8 +304,10 @@
 }
 
 /**
+ * ap_init_queue(): Reset an AP queue.
+ * @qid: The AP queue number
+ *
  * Reset an AP queue and wait for it to become available again.
- * @qid: the ap queue number
  */
 static int ap_init_queue(ap_qid_t qid)
 {
@@ -346,7 +344,10 @@
 }
 
 /**
- * Arm request timeout if a AP device was idle and a new request is submitted.
+ * ap_increase_queue_count(): Arm request timeout.
+ * @ap_dev: Pointer to an AP device.
+ *
+ * Arm request timeout if an AP device was idle and a new request is submitted.
  */
 static void ap_increase_queue_count(struct ap_device *ap_dev)
 {
@@ -360,7 +361,10 @@
 }
 
 /**
- * AP device is still alive, re-schedule request timeout if there are still
+ * ap_decrease_queue_count(): Decrease queue count.
+ * @ap_dev: Pointer to an AP device.
+ *
+ * If AP device is still alive, re-schedule request timeout if there are still
  * pending requests.
  */
 static void ap_decrease_queue_count(struct ap_device *ap_dev)
@@ -371,7 +375,7 @@
 	if (ap_dev->queue_count > 0)
 		mod_timer(&ap_dev->timeout, jiffies + timeout);
 	else
-		/**
+		/*
 		 * The timeout timer should to be disabled now - since
 		 * del_timer_sync() is very expensive, we just tell via the
 		 * reset flag to ignore the pending timeout timer.
@@ -379,7 +383,7 @@
 		ap_dev->reset = AP_RESET_IGNORE;
 }
 
-/**
+/*
  * AP device related attributes.
  */
 static ssize_t ap_hwtype_show(struct device *dev,
@@ -433,6 +437,10 @@
 };
 
 /**
+ * ap_bus_match()
+ * @dev: Pointer to device
+ * @drv: Pointer to device_driver
+ *
  * AP bus driver registration/unregistration.
  */
 static int ap_bus_match(struct device *dev, struct device_driver *drv)
@@ -441,7 +449,7 @@
 	struct ap_driver *ap_drv = to_ap_drv(drv);
 	struct ap_device_id *id;
 
-	/**
+	/*
 	 * Compare device type of the device with the list of
 	 * supported types of the device_driver.
 	 */
@@ -455,8 +463,12 @@
 }
 
 /**
- * uevent function for AP devices. It sets up a single environment
- * variable DEV_TYPE which contains the hardware device type.
+ * ap_uevent(): Uevent function for AP devices.
+ * @dev: Pointer to device
+ * @env: Pointer to kobj_uevent_env
+ *
+ * It sets up a single environment variable DEV_TYPE which contains the
+ * hardware device type.
  */
 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
 {
@@ -500,8 +512,10 @@
 }
 
 /**
+ * __ap_flush_queue(): Flush requests.
+ * @ap_dev: Pointer to the AP device
+ *
  * Flush all requests from the request/pending queue of an AP device.
- * @ap_dev: pointer to the AP device.
  */
 static void __ap_flush_queue(struct ap_device *ap_dev)
 {
@@ -565,7 +579,7 @@
 }
 EXPORT_SYMBOL(ap_driver_unregister);
 
-/**
+/*
  * AP bus attributes.
  */
 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
@@ -630,14 +644,16 @@
 };
 
 /**
- * Pick one of the 16 ap domains.
+ * ap_select_domain(): Select an AP domain.
+ *
+ * Pick one of the 16 AP domains.
  */
 static int ap_select_domain(void)
 {
 	int queue_depth, device_type, count, max_count, best_domain;
 	int rc, i, j;
 
-	/**
+	/*
 	 * We want to use a single domain. Either the one specified with
 	 * the "domain=" parameter or the domain with the maximum number
 	 * of devices.
@@ -669,8 +685,10 @@
 }
 
 /**
- * Find the device type if query queue returned a device type of 0.
+ * ap_probe_device_type(): Find the device type of an AP.
  * @ap_dev: pointer to the AP device.
+ *
+ * Find the device type if query queue returned a device type of 0.
  */
 static int ap_probe_device_type(struct ap_device *ap_dev)
 {
@@ -764,7 +782,11 @@
 }
 
 /**
- * Scan the ap bus for new devices.
+ * __ap_scan_bus(): Scan the AP bus.
+ * @dev: Pointer to device
+ * @data: Pointer to data
+ *
+ * Scan the AP bus for new devices.
  */
 static int __ap_scan_bus(struct device *dev, void *data)
 {
@@ -867,6 +889,8 @@
 }
 
 /**
+ * ap_schedule_poll_timer(): Schedule poll timer.
+ *
  * Set up the timer to run the poll tasklet
  */
 static inline void ap_schedule_poll_timer(void)
@@ -877,10 +901,11 @@
 }
 
 /**
- * Receive pending reply messages from an AP device.
+ * ap_poll_read(): Receive pending reply messages from an AP device.
  * @ap_dev: pointer to the AP device
  * @flags: pointer to control flags, bit 2^0 is set if another poll is
  *	   required, bit 2^1 is set if the poll timer needs to get armed
+ *
  * Returns 0 if the device is still present, -ENODEV if not.
  */
 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
@@ -925,10 +950,11 @@
 }
 
 /**
- * Send messages from the request queue to an AP device.
+ * ap_poll_write(): Send messages from the request queue to an AP device.
  * @ap_dev: pointer to the AP device
  * @flags: pointer to control flags, bit 2^0 is set if another poll is
  *	   required, bit 2^1 is set if the poll timer needs to get armed
+ *
  * Returns 0 if the device is still present, -ENODEV if not.
  */
 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
@@ -968,11 +994,13 @@
 }
 
 /**
- * Poll AP device for pending replies and send new messages. If either
- * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
+ * ap_poll_queue(): Poll AP device for pending replies and send new messages.
  * @ap_dev: pointer to the bus device
  * @flags: pointer to control flags, bit 2^0 is set if another poll is
  *	   required, bit 2^1 is set if the poll timer needs to get armed
+ *
+ * Poll AP device for pending replies and send new messages. If either
+ * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
  * Returns 0.
  */
 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
@@ -986,9 +1014,11 @@
 }
 
 /**
- * Queue a message to a device.
+ * __ap_queue_message(): Queue a message to a device.
  * @ap_dev: pointer to the AP device
  * @ap_msg: the message to be queued
+ *
+ * Queue a message to a device. Returns 0 if successful.
  */
 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
 {
@@ -1055,12 +1085,14 @@
 EXPORT_SYMBOL(ap_queue_message);
 
 /**
+ * ap_cancel_message(): Cancel a crypto request.
+ * @ap_dev: The AP device that has the message queued
+ * @ap_msg: The message that is to be removed
+ *
  * Cancel a crypto request. This is done by removing the request
- * from the devive pendingq or requestq queue. Note that the
+ * from the device pending or request queue. Note that the
  * request stays on the AP queue. When it finishes the message
  * reply will be discarded because the psmid can't be found.
- * @ap_dev: AP device that has the message queued
- * @ap_msg: the message that is to be removed
  */
 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
 {
@@ -1082,7 +1114,10 @@
 EXPORT_SYMBOL(ap_cancel_message);
 
 /**
- * AP receive polling for finished AP requests
+ * ap_poll_timeout(): AP receive polling for finished AP requests.
+ * @unused: Unused variable.
+ *
+ * Schedules the AP tasklet.
  */
 static void ap_poll_timeout(unsigned long unused)
 {
@@ -1090,6 +1125,9 @@
 }
 
 /**
+ * ap_reset(): Reset a not responding AP device.
+ * @ap_dev: Pointer to the AP device
+ *
  * Reset a not responding AP device and move all requests from the
  * pending queue to the request queue.
  */
@@ -1108,11 +1146,6 @@
 		ap_dev->unregistered = 1;
 }
 
-/**
- * Poll all AP devices on the bus in a round robin fashion. Continue
- * polling until bit 2^0 of the control flags is not set. If bit 2^1
- * of the control flags has been set arm the poll timer.
- */
 static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
 {
 	spin_lock(&ap_dev->lock);
@@ -1126,6 +1159,14 @@
 	return 0;
 }
 
+/**
+ * ap_poll_all(): Poll all AP devices.
+ * @dummy: Unused variable
+ *
+ * Poll all AP devices on the bus in a round robin fashion. Continue
+ * polling until bit 2^0 of the control flags is not set. If bit 2^1
+ * of the control flags has been set arm the poll timer.
+ */
 static void ap_poll_all(unsigned long dummy)
 {
 	unsigned long flags;
@@ -1144,6 +1185,9 @@
 }
 
 /**
+ * ap_poll_thread(): Thread that polls for finished requests.
+ * @data: Unused pointer
+ *
  * AP bus poll thread. The purpose of this thread is to poll for
  * finished requests in a loop if there is a "free" cpu - that is
  * a cpu that doesn't have anything better to do. The polling stops
@@ -1213,7 +1257,10 @@
 }
 
 /**
- * Handling of request timeouts
+ * ap_request_timeout(): Handling of request timeouts
+ * @data: Holds the AP device.
+ *
+ * Handles request timeouts.
  */
 static void ap_request_timeout(unsigned long data)
 {
@@ -1246,7 +1293,9 @@
 };
 
 /**
- * The module initialization code.
+ * ap_module_init(): The module initialization code.
+ *
+ * Initializes the module.
  */
 int __init ap_module_init(void)
 {
@@ -1288,7 +1337,7 @@
 	if (ap_select_domain() == 0)
 		ap_scan_bus(NULL);
 
-	/* Setup the ap bus rescan timer. */
+	/* Setup the AP bus rescan timer. */
 	init_timer(&ap_config_timer);
 	ap_config_timer.function = ap_config_timeout;
 	ap_config_timer.data = 0;
@@ -1325,7 +1374,9 @@
 }
 
 /**
- * The module termination code
+ * ap_modules_exit(): The module termination code
+ *
+ * Terminates the module.
  */
 void ap_module_exit(void)
 {
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 87c2d64..c1e1200 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -50,6 +50,15 @@
 #define AP_QID_QUEUE(_qid) ((_qid) & 15)
 
 /**
+ * structy ap_queue_status - Holds the AP queue status.
+ * @queue_empty: Shows if queue is empty
+ * @replies_waiting: Waiting replies
+ * @queue_full: Is 1 if the queue is full
+ * @pad: A 4 bit pad
+ * @int_enabled: Shows if interrupts are enabled for the AP
+ * @response_conde: Holds the 8 bit response code
+ * @pad2: A 16 bit pad
+ *
  * The ap queue status word is returned by all three AP functions
  * (PQAP, NQAP and DQAP).  There's a set of flags in the first
  * byte, followed by a 1 byte response code.
@@ -75,7 +84,7 @@
 #define AP_RESPONSE_NO_FIRST_PART	0x13
 #define AP_RESPONSE_MESSAGE_TOO_BIG	0x15
 
-/**
+/*
  * Known device types
  */
 #define AP_DEVICE_TYPE_PCICC	3
@@ -84,7 +93,7 @@
 #define AP_DEVICE_TYPE_CEX2A	6
 #define AP_DEVICE_TYPE_CEX2C	7
 
-/**
+/*
  * AP reset flag states
  */
 #define AP_RESET_IGNORE	0	/* request timeout will be ignored */
@@ -152,7 +161,7 @@
 	.dev_type=(dt),					\
 	.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
 
-/**
+/*
  * Note: don't use ap_send/ap_recv after using ap_queue_message
  * for the first time. Otherwise the ap message queue will get
  * confused.
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index e3625a4..4d36e80 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,10 +36,11 @@
 #include <linux/compat.h>
 #include <asm/atomic.h>
 #include <asm/uaccess.h>
+#include <linux/hw_random.h>
 
 #include "zcrypt_api.h"
 
-/**
+/*
  * Module description.
  */
 MODULE_AUTHOR("IBM Corporation");
@@ -52,7 +53,10 @@
 static int zcrypt_device_count = 0;
 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
 
-/**
+static int zcrypt_rng_device_add(void);
+static void zcrypt_rng_device_remove(void);
+
+/*
  * Device attributes common for all crypto devices.
  */
 static ssize_t zcrypt_type_show(struct device *dev,
@@ -99,6 +103,9 @@
 };
 
 /**
+ * __zcrypt_increase_preference(): Increase preference of a crypto device.
+ * @zdev: Pointer the crypto device
+ *
  * Move the device towards the head of the device list.
  * Need to be called while holding the zcrypt device list lock.
  * Note: cards with speed_rating of 0 are kept at the end of the list.
@@ -125,6 +132,9 @@
 }
 
 /**
+ * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
+ * @zdev: Pointer to a crypto device.
+ *
  * Move the device towards the tail of the device list.
  * Need to be called while holding the zcrypt device list lock.
  * Note: cards with speed_rating of 0 are kept at the end of the list.
@@ -198,7 +208,10 @@
 EXPORT_SYMBOL(zcrypt_device_free);
 
 /**
- * Register a crypto device.
+ * zcrypt_device_register() - Register a crypto device.
+ * @zdev: Pointer to a crypto device
+ *
+ * Register a crypto device. Returns 0 if successful.
  */
 int zcrypt_device_register(struct zcrypt_device *zdev)
 {
@@ -216,16 +229,37 @@
 	__zcrypt_increase_preference(zdev);
 	zcrypt_device_count++;
 	spin_unlock_bh(&zcrypt_device_lock);
+	if (zdev->ops->rng) {
+		rc = zcrypt_rng_device_add();
+		if (rc)
+			goto out_unregister;
+	}
+	return 0;
+
+out_unregister:
+	spin_lock_bh(&zcrypt_device_lock);
+	zcrypt_device_count--;
+	list_del_init(&zdev->list);
+	spin_unlock_bh(&zcrypt_device_lock);
+	sysfs_remove_group(&zdev->ap_dev->device.kobj,
+			   &zcrypt_device_attr_group);
+	put_device(&zdev->ap_dev->device);
+	zcrypt_device_put(zdev);
 out:
 	return rc;
 }
 EXPORT_SYMBOL(zcrypt_device_register);
 
 /**
+ * zcrypt_device_unregister(): Unregister a crypto device.
+ * @zdev: Pointer to crypto device
+ *
  * Unregister a crypto device.
  */
 void zcrypt_device_unregister(struct zcrypt_device *zdev)
 {
+	if (zdev->ops->rng)
+		zcrypt_rng_device_remove();
 	spin_lock_bh(&zcrypt_device_lock);
 	zcrypt_device_count--;
 	list_del_init(&zdev->list);
@@ -238,7 +272,9 @@
 EXPORT_SYMBOL(zcrypt_device_unregister);
 
 /**
- * zcrypt_read is not be supported beyond zcrypt 1.3.1
+ * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
+ *
+ * This function is not supported beyond zcrypt 1.3.1.
  */
 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
 			   size_t count, loff_t *f_pos)
@@ -247,6 +283,8 @@
 }
 
 /**
+ * zcrypt_write(): Not allowed.
+ *
  * Write is is not allowed
  */
 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
@@ -256,7 +294,9 @@
 }
 
 /**
- * Device open/close functions to count number of users.
+ * zcrypt_open(): Count number of users.
+ *
+ * Device open function to count number of users.
  */
 static int zcrypt_open(struct inode *inode, struct file *filp)
 {
@@ -264,13 +304,18 @@
 	return 0;
 }
 
+/**
+ * zcrypt_release(): Count number of users.
+ *
+ * Device close function to count number of users.
+ */
 static int zcrypt_release(struct inode *inode, struct file *filp)
 {
 	atomic_dec(&zcrypt_open_count);
 	return 0;
 }
 
-/**
+/*
  * zcrypt ioctls.
  */
 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
@@ -280,7 +325,7 @@
 
 	if (mex->outputdatalength < mex->inputdatalength)
 		return -EINVAL;
-	/**
+	/*
 	 * As long as outputdatalength is big enough, we can set the
 	 * outputdatalength equal to the inputdatalength, since that is the
 	 * number of bytes we will copy in any case
@@ -326,7 +371,7 @@
 	if (crt->outputdatalength < crt->inputdatalength ||
 	    (crt->inputdatalength & 1))
 		return -EINVAL;
-	/**
+	/*
 	 * As long as outputdatalength is big enough, we can set the
 	 * outputdatalength equal to the inputdatalength, since that is the
 	 * number of bytes we will copy in any case
@@ -343,7 +388,7 @@
 		    zdev->max_mod_size < crt->inputdatalength)
 			continue;
 		if (zdev->short_crt && crt->inputdatalength > 240) {
-			/**
+			/*
 			 * Check inputdata for leading zeros for cards
 			 * that can't handle np_prime, bp_key, or
 			 * u_mult_inv > 128 bytes.
@@ -359,7 +404,7 @@
 				    copy_from_user(&z3, crt->u_mult_inv, len))
 					return -EFAULT;
 				copied = 1;
-				/**
+				/*
 				 * We have to restart device lookup -
 				 * the device list may have changed by now.
 				 */
@@ -427,6 +472,37 @@
 	return -ENODEV;
 }
 
+static long zcrypt_rng(char *buffer)
+{
+	struct zcrypt_device *zdev;
+	int rc;
+
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list) {
+		if (!zdev->online || !zdev->ops->rng)
+			continue;
+		zcrypt_device_get(zdev);
+		get_device(&zdev->ap_dev->device);
+		zdev->request_count++;
+		__zcrypt_decrease_preference(zdev);
+		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+			spin_unlock_bh(&zcrypt_device_lock);
+			rc = zdev->ops->rng(zdev, buffer);
+			spin_lock_bh(&zcrypt_device_lock);
+			module_put(zdev->ap_dev->drv->driver.owner);
+		} else
+			rc = -EAGAIN;
+		zdev->request_count--;
+		__zcrypt_increase_preference(zdev);
+		put_device(&zdev->ap_dev->device);
+		zcrypt_device_put(zdev);
+		spin_unlock_bh(&zcrypt_device_lock);
+		return rc;
+	}
+	spin_unlock_bh(&zcrypt_device_lock);
+	return -ENODEV;
+}
+
 static void zcrypt_status_mask(char status[AP_DEVICES])
 {
 	struct zcrypt_device *zdev;
@@ -514,6 +590,8 @@
 }
 
 /**
+ * zcrypt_ica_status(): Old, depracted combi status call.
+ *
  * Old, deprecated combi status call.
  */
 static long zcrypt_ica_status(struct file *filp, unsigned long arg)
@@ -615,7 +693,7 @@
 				(int __user *) arg);
 	case Z90STAT_DOMAIN_INDEX:
 		return put_user(ap_domain_index, (int __user *) arg);
-	/**
+	/*
 	 * Deprecated ioctls. Don't add another device count ioctl,
 	 * you can count them yourself in the user space with the
 	 * output of the Z90STAT_STATUS_MASK ioctl.
@@ -653,7 +731,7 @@
 }
 
 #ifdef CONFIG_COMPAT
-/**
+/*
  * ioctl32 conversion routines
  */
 struct compat_ica_rsa_modexpo {
@@ -804,7 +882,7 @@
 }
 #endif
 
-/**
+/*
  * Misc device file operations.
  */
 static const struct file_operations zcrypt_fops = {
@@ -819,7 +897,7 @@
 	.release	= zcrypt_release
 };
 
-/**
+/*
  * Misc device.
  */
 static struct miscdevice zcrypt_misc_device = {
@@ -828,7 +906,7 @@
 	.fops	    = &zcrypt_fops,
 };
 
-/**
+/*
  * Deprecated /proc entry support.
  */
 static struct proc_dir_entry *zcrypt_entry;
@@ -1022,7 +1100,7 @@
 	}
 
 	for (j = 0; j < 64 && *ptr; ptr++) {
-		/**
+		/*
 		 * '0' for no device, '1' for PCICA, '2' for PCICC,
 		 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
 		 * '5' for CEX2C and '6' for CEX2A'
@@ -1041,7 +1119,76 @@
 	return count;
 }
 
+static int zcrypt_rng_device_count;
+static u32 *zcrypt_rng_buffer;
+static int zcrypt_rng_buffer_index;
+static DEFINE_MUTEX(zcrypt_rng_mutex);
+
+static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	int rc;
+
+	/*
+	 * We don't need locking here because the RNG API guarantees serialized
+	 * read method calls.
+	 */
+	if (zcrypt_rng_buffer_index == 0) {
+		rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+		if (rc < 0)
+			return -EIO;
+		zcrypt_rng_buffer_index = rc / sizeof *data;
+	}
+	*data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
+	return sizeof *data;
+}
+
+static struct hwrng zcrypt_rng_dev = {
+	.name		= "zcrypt",
+	.data_read	= zcrypt_rng_data_read,
+};
+
+static int zcrypt_rng_device_add(void)
+{
+	int rc = 0;
+
+	mutex_lock(&zcrypt_rng_mutex);
+	if (zcrypt_rng_device_count == 0) {
+		zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
+		if (!zcrypt_rng_buffer) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		zcrypt_rng_buffer_index = 0;
+		rc = hwrng_register(&zcrypt_rng_dev);
+		if (rc)
+			goto out_free;
+		zcrypt_rng_device_count = 1;
+	} else
+		zcrypt_rng_device_count++;
+	mutex_unlock(&zcrypt_rng_mutex);
+	return 0;
+
+out_free:
+	free_page((unsigned long) zcrypt_rng_buffer);
+out:
+	mutex_unlock(&zcrypt_rng_mutex);
+	return rc;
+}
+
+static void zcrypt_rng_device_remove(void)
+{
+	mutex_lock(&zcrypt_rng_mutex);
+	zcrypt_rng_device_count--;
+	if (zcrypt_rng_device_count == 0) {
+		hwrng_unregister(&zcrypt_rng_dev);
+		free_page((unsigned long) zcrypt_rng_buffer);
+	}
+	mutex_unlock(&zcrypt_rng_mutex);
+}
+
 /**
+ * zcrypt_api_init(): Module initialization.
+ *
  * The module initialization code.
  */
 int __init zcrypt_api_init(void)
@@ -1076,6 +1223,8 @@
 }
 
 /**
+ * zcrypt_api_exit(): Module termination.
+ *
  * The module termination code.
  */
 void zcrypt_api_exit(void)
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index de4877e..5c6e222 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -43,17 +43,17 @@
 #define DEV_NAME	"zcrypt"
 
 #define PRINTK(fmt, args...) \
-	printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+	printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
 #define PRINTKN(fmt, args...) \
 	printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
 #define PRINTKW(fmt, args...) \
-	printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+	printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args)
 #define PRINTKC(fmt, args...) \
-	printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+	printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args)
 
 #ifdef ZCRYPT_DEBUG
 #define PDEBUG(fmt, args...) \
-	printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+	printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
 #else
 #define PDEBUG(fmt, args...) do {} while (0)
 #endif
@@ -100,6 +100,13 @@
 #define ZCRYPT_CEX2C		5
 #define ZCRYPT_CEX2A		6
 
+/**
+ * Large random numbers are pulled in 4096 byte chunks from the crypto cards
+ * and stored in a page. Be carefull when increasing this buffer due to size
+ * limitations for AP requests.
+ */
+#define ZCRYPT_RNG_BUFFER_SIZE	4096
+
 struct zcrypt_device;
 
 struct zcrypt_ops {
@@ -107,6 +114,7 @@
 	long (*rsa_modexpo_crt)(struct zcrypt_device *,
 				struct ica_rsa_modexpo_crt *);
 	long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
+	long (*rng)(struct zcrypt_device *, char *);
 };
 
 struct zcrypt_device {
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index 8dbcf0e..ed82f2f 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -174,7 +174,7 @@
 	key->pvtMeHdr = static_pvt_me_hdr;
 	key->pvtMeSec = static_pvt_me_sec;
 	key->pubMeSec = static_pub_me_sec;
-	/**
+	/*
 	 * In a private key, the modulus doesn't appear in the public
 	 * section. So, an arbitrary public exponent of 0x010001 will be
 	 * used.
@@ -338,7 +338,7 @@
 	pub = (struct cca_public_sec *)(key->key_parts + key_len);
 	*pub = static_cca_pub_sec;
 	pub->modulus_bit_len = 8 * crt->inputdatalength;
-	/**
+	/*
 	 * In a private key, the modulus doesn't appear in the public
 	 * section. So, an arbitrary public exponent of 0x010001 will be
 	 * used.
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 2cb616b..3e27fe7 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -108,7 +108,7 @@
 		return -EINVAL;
 	case REP82_ERROR_MESSAGE_TYPE:
 	//   REP88_ERROR_MESSAGE_TYPE		// '20' CEX2A
-		/**
+		/*
 		 * To sent a message of the wrong type is a bug in the
 		 * device driver. Warn about it, disable the device
 		 * and then repeat the request.
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index d6d59bf..17ea56c 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -42,7 +42,7 @@
 #define PCICC_MAX_MOD_SIZE_OLD	128	/* 1024 bits */
 #define PCICC_MAX_MOD_SIZE	256	/* 2048 bits */
 
-/**
+/*
  * PCICC cards need a speed rating of 0. This keeps them at the end of
  * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
  * used if no other cards are present because they are slow and can only
@@ -388,7 +388,7 @@
 	reply_len = le16_to_cpu(msg->length) - 2;
 	if (reply_len > outputdatalength)
 		return -EINVAL;
-	/**
+	/*
 	 * For all encipher requests, the length of the ciphertext (reply_len)
 	 * will always equal the modulus length. For MEX decipher requests
 	 * the output needs to get padded. Minimum pad size is 10.
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 70b9ddc..0bc9b31 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -356,6 +356,55 @@
 }
 
 /**
+ * Prepare a type6 CPRB message for random number generation
+ *
+ * @ap_dev: AP device pointer
+ * @ap_msg: pointer to AP message
+ */
+static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
+			       struct ap_message *ap_msg,
+			       unsigned random_number_length)
+{
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		char function_code[2];
+		short int rule_length;
+		char rule[8];
+		short int verb_length;
+		short int key_length;
+	} __attribute__((packed)) *msg = ap_msg->message;
+	static struct type6_hdr static_type6_hdrX = {
+		.type		= 0x06,
+		.offset1	= 0x00000058,
+		.agent_id	= {'C', 'A'},
+		.function_code	= {'R', 'L'},
+		.ToCardLen1	= sizeof *msg - sizeof(msg->hdr),
+		.FromCardLen1	= sizeof *msg - sizeof(msg->hdr),
+	};
+	static struct CPRBX static_cprbx = {
+		.cprb_len	= 0x00dc,
+		.cprb_ver_id	= 0x02,
+		.func_id	= {0x54, 0x32},
+		.req_parml	= sizeof *msg - sizeof(msg->hdr) -
+				  sizeof(msg->cprbx),
+		.rpl_msgbl	= sizeof *msg - sizeof(msg->hdr),
+	};
+
+	msg->hdr = static_type6_hdrX;
+	msg->hdr.FromCardLen2 = random_number_length,
+	msg->cprbx = static_cprbx;
+	msg->cprbx.rpl_datal = random_number_length,
+	msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
+	memcpy(msg->function_code, msg->hdr.function_code, 0x02);
+	msg->rule_length = 0x0a;
+	memcpy(msg->rule, "RANDOM  ", 8);
+	msg->verb_length = 0x02;
+	msg->key_length = 0x02;
+	ap_msg->length = sizeof *msg;
+}
+
+/**
  * Copy results from a type 86 ICA reply message back to user space.
  *
  * @zdev: crypto device pointer
@@ -452,7 +501,7 @@
 	reply_len = msg->length - 2;
 	if (reply_len > outputdatalength)
 		return -EINVAL;
-	/**
+	/*
 	 * For all encipher requests, the length of the ciphertext (reply_len)
 	 * will always equal the modulus length. For MEX decipher requests
 	 * the output needs to get padded. Minimum pad size is 10.
@@ -509,6 +558,26 @@
 	return 0;
 }
 
+static int convert_type86_rng(struct zcrypt_device *zdev,
+			  struct ap_message *reply,
+			  char *buffer)
+{
+	struct {
+		struct type86_hdr hdr;
+		struct type86_fmt2_ext fmt2;
+		struct CPRBX cprbx;
+	} __attribute__((packed)) *msg = reply->message;
+	char *data = reply->message;
+
+	if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) {
+		PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n",
+		       rc, rs);
+		return -EINVAL;
+	}
+	memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
+	return msg->fmt2.count2;
+}
+
 static int convert_response_ica(struct zcrypt_device *zdev,
 			    struct ap_message *reply,
 			    char __user *outputdata,
@@ -567,6 +636,31 @@
 	}
 }
 
+static int convert_response_rng(struct zcrypt_device *zdev,
+				 struct ap_message *reply,
+				 char *data)
+{
+	struct type86x_reply *msg = reply->message;
+
+	switch (msg->hdr.type) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		return -EINVAL;
+	case TYPE86_RSP_CODE:
+		if (msg->hdr.reply_code)
+			return -EINVAL;
+		if (msg->cprbx.cprb_ver_id == 0x02)
+			return convert_type86_rng(zdev, reply, data);
+		/* no break, incorrect cprb version is an unknown response */
+	default: /* Unknown response type, this should NEVER EVER happen */
+		PRINTK("Unrecognized Message Header: %08x%08x\n",
+		       *(unsigned int *) reply->message,
+		       *(unsigned int *) (reply->message+4));
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
 /**
  * This function is called from the AP bus code after a crypto request
  * "msg" has finished with the reply message "reply".
@@ -736,6 +830,42 @@
 }
 
 /**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to generate random data.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  PCIXCC/CEX2C device to the request distributor
+ * @buffer: pointer to a memory page to return random data
+ */
+
+static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
+				    char *buffer)
+{
+	struct ap_message ap_msg;
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_XCRB,
+	};
+	int rc;
+
+	ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &resp_type;
+	rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
+	init_completion(&resp_type.work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&resp_type.work);
+	if (rc == 0)
+		rc = convert_response_rng(zdev, &ap_msg, buffer);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+	kfree(ap_msg.message);
+	return rc;
+}
+
+/**
  * The crypto operations for a PCIXCC/CEX2C card.
  */
 static struct zcrypt_ops zcrypt_pcixcc_ops = {
@@ -744,6 +874,13 @@
 	.send_cprb = zcrypt_pcixcc_send_cprb,
 };
 
+static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
+	.rsa_modexpo = zcrypt_pcixcc_modexpo,
+	.rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
+	.send_cprb = zcrypt_pcixcc_send_cprb,
+	.rng = zcrypt_pcixcc_rng,
+};
+
 /**
  * Micro-code detection function. Its sends a message to a pcixcc card
  * to find out the microcode level.
@@ -859,6 +996,58 @@
 }
 
 /**
+ * Large random number detection function. Its sends a message to a pcixcc
+ * card to find out if large random numbers are supported.
+ * @ap_dev: pointer to the AP device.
+ *
+ * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
+ */
+static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
+{
+	struct ap_message ap_msg;
+	unsigned long long psmid;
+	struct {
+		struct type86_hdr hdr;
+		struct type86_fmt2_ext fmt2;
+		struct CPRBX cprbx;
+	} __attribute__((packed)) *reply;
+	int rc, i;
+
+	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+
+	rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
+	rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
+		     ap_msg.length);
+	if (rc)
+		goto out_free;
+
+	/* Wait for the test message to complete. */
+	for (i = 0; i < 2 * HZ; i++) {
+		msleep(1000 / HZ);
+		rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
+		if (rc == 0 && psmid == 0x0102030405060708ULL)
+			break;
+	}
+
+	if (i >= 2 * HZ) {
+		/* Got no answer. */
+		rc = -ENODEV;
+		goto out_free;
+	}
+
+	reply = ap_msg.message;
+	if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
+		rc = 1;
+	else
+		rc = 0;
+out_free:
+	free_page((unsigned long) ap_msg.message);
+	return rc;
+}
+
+/**
  * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
  * since the bus_match already checked the hardware type. The PCIXCC
  * cards come in two flavours: micro code level 2 and micro code level 3.
@@ -874,7 +1063,6 @@
 	if (!zdev)
 		return -ENOMEM;
 	zdev->ap_dev = ap_dev;
-	zdev->ops = &zcrypt_pcixcc_ops;
 	zdev->online = 1;
 	if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
 		rc = zcrypt_pcixcc_mcl(ap_dev);
@@ -901,6 +1089,15 @@
 		zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
 		zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
 	}
+	rc = zcrypt_pcixcc_rng_supported(ap_dev);
+	if (rc < 0) {
+		zcrypt_device_free(zdev);
+		return rc;
+	}
+	if (rc)
+		zdev->ops = &zcrypt_pcixcc_with_rng_ops;
+	else
+		zdev->ops = &zcrypt_pcixcc_ops;
 	ap_dev->reply = &zdev->reply;
 	ap_dev->private = zdev;
 	rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index d8a5c22..04a1d7b 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -299,7 +299,7 @@
 	struct claw_privbk *privptr=NULL;
 
 #ifdef FUNCTRACE
-	printk(KERN_INFO "%s Enter\n",__FUNCTION__);
+	printk(KERN_INFO "%s Enter\n",__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"probe");
 	if (!get_device(&cgdev->dev))
@@ -313,7 +313,7 @@
 		probe_error(cgdev);
 		put_device(&cgdev->dev);
 		printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
-			cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
+			cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
 		CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
 		return -ENOMEM;
 	}
@@ -323,7 +323,7 @@
                 probe_error(cgdev);
 		put_device(&cgdev->dev);
 		printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
-			cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
+			cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
 		CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
                 return -ENOMEM;
         }
@@ -340,7 +340,7 @@
 		probe_error(cgdev);
 		put_device(&cgdev->dev);
 		printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
-			cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
+			cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
 		CLAW_DBF_TEXT_(2,setup,"probex%d",rc);
 		return rc;
 	}
@@ -351,7 +351,7 @@
 	cgdev->dev.driver_data = privptr;
 #ifdef FUNCTRACE
         printk(KERN_INFO "claw:%s exit on line %d, "
-		"rc = 0\n",__FUNCTION__,__LINE__);
+		"rc = 0\n",__func__,__LINE__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"prbext 0");
 
@@ -371,7 +371,7 @@
         struct chbk *p_ch;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s enter\n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"claw_tx");
         p_ch=&privptr->channel[WRITE];
@@ -381,7 +381,7 @@
                 privptr->stats.tx_dropped++;
 #ifdef FUNCTRACE
                 printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n",
-			dev->name,__FUNCTION__, __LINE__);
+			dev->name,__func__, __LINE__);
 #endif
 		CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO);
                 return -EIO;
@@ -398,7 +398,7 @@
         spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n",
-		dev->name, __FUNCTION__, __LINE__, rc);
+		dev->name, __func__, __LINE__, rc);
 #endif
 	CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc);
         return rc;
@@ -460,7 +460,7 @@
 #ifdef IOTRACE
 		printk(KERN_INFO "%s: %s() Packed %d len %d\n",
 			p_env->ndev->name,
-			__FUNCTION__,pkt_cnt,new_skb->len);
+			__func__,pkt_cnt,new_skb->len);
 #endif
 	}
 	CLAW_DBF_TEXT(4,trace,"PackSKBx");
@@ -478,7 +478,7 @@
 	struct claw_privbk  *privptr=dev->priv;
 	int buff_size;
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter  \n",dev->name,__func__);
 #endif
 #ifdef DEBUGMSG
         printk(KERN_INFO "variable dev =\n");
@@ -491,14 +491,14 @@
 #ifdef FUNCTRACE
                 printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n",
 		dev->name,
-		__FUNCTION__, __LINE__);
+		__func__, __LINE__);
 #endif
                 return -EINVAL;
         }
         dev->mtu = new_mtu;
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name,
-	__FUNCTION__, __LINE__);
+	__func__, __LINE__);
 #endif
         return 0;
 }  /*   end of claw_change_mtu */
@@ -522,7 +522,7 @@
         struct ccwbk *p_buf;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter  \n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"open");
 	if (!dev || (dev->name[0] == 0x00)) {
@@ -537,7 +537,7 @@
         	if (rc) {
                 	printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n",
 			dev->name,
-			__FUNCTION__, __LINE__);
+			__func__, __LINE__);
 			CLAW_DBF_TEXT(2,trace,"openmem");
                 	return -ENOMEM;
         	}
@@ -661,7 +661,7 @@
                 claw_clear_busy(dev);
 #ifdef FUNCTRACE
                 printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 		CLAW_DBF_TEXT(2,trace,"open EIO");
                 return -EIO;
@@ -673,7 +673,7 @@
 
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"openok");
         return 0;
@@ -696,7 +696,7 @@
 
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s enter  \n",__FUNCTION__);
+	printk(KERN_INFO "%s enter  \n",__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"clawirq");
         /* Bypass all 'unsolicited interrupts' */
@@ -706,7 +706,7 @@
                         cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat);
 #ifdef FUNCTRACE
                 printk(KERN_INFO "claw: %s() "
-			"exit on line %d\n",__FUNCTION__,__LINE__);
+			"exit on line %d\n",__func__,__LINE__);
 #endif
 		CLAW_DBF_TEXT(2,trace,"badirq");
                 return;
@@ -752,7 +752,7 @@
 #endif
 #ifdef FUNCTRACE
 		printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 		CLAW_DBF_TEXT(2,trace,"chanchk");
                 /* return; */
@@ -777,7 +777,7 @@
 	     		(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
 #ifdef FUNCTRACE
                                 printk(KERN_INFO "%s:%s Exit on line %d\n",
-					dev->name,__FUNCTION__,__LINE__);
+					dev->name,__func__,__LINE__);
 #endif
                                 return;
                         }
@@ -788,7 +788,7 @@
 #endif
 #ifdef FUNCTRACE
                         printk(KERN_INFO "%s:%s Exit on line %d\n",
-				dev->name,__FUNCTION__,__LINE__);
+				dev->name,__func__,__LINE__);
 #endif
 			CLAW_DBF_TEXT(4,trace,"stop");
                         return;
@@ -804,7 +804,7 @@
 	     		(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
 #ifdef FUNCTRACE
 				printk(KERN_INFO "%s:%s Exit on line %d\n",
-					dev->name,__FUNCTION__,__LINE__);
+					dev->name,__func__,__LINE__);
 #endif
 				CLAW_DBF_TEXT(4,trace,"haltio");
                                 return;
@@ -838,7 +838,7 @@
 #endif
 #ifdef FUNCTRACE
                         printk(KERN_INFO "%s:%s Exit on line %d\n",
-				dev->name,__FUNCTION__,__LINE__);
+				dev->name,__func__,__LINE__);
 #endif
 			CLAW_DBF_TEXT(4,trace,"haltio");
                         return;
@@ -858,7 +858,7 @@
                                 }
 #ifdef FUNCTRACE
 				printk(KERN_INFO "%s:%s Exit on line %d\n",
-					dev->name,__FUNCTION__,__LINE__);
+					dev->name,__func__,__LINE__);
 #endif
 					CLAW_DBF_TEXT(4,trace,"notrdy");
                                         return;
@@ -874,7 +874,7 @@
 				}
 #ifdef FUNCTRACE
 				printk(KERN_INFO "%s:%s Exit on line %d\n",
-					dev->name,__FUNCTION__,__LINE__);
+					dev->name,__func__,__LINE__);
 #endif
 				CLAW_DBF_TEXT(4,trace,"PCI_read");
                                 return;
@@ -885,7 +885,7 @@
 	     		 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
 #ifdef FUNCTRACE
 				printk(KERN_INFO "%s:%s Exit on line %d\n",
-					dev->name,__FUNCTION__,__LINE__);
+					dev->name,__func__,__LINE__);
 #endif
 				CLAW_DBF_TEXT(4,trace,"SPend_rd");
                                 return;
@@ -906,7 +906,7 @@
 #endif
 #ifdef FUNCTRACE
 			printk(KERN_INFO "%s:%s Exit on line %d\n",
-				dev->name,__FUNCTION__,__LINE__);
+				dev->name,__func__,__LINE__);
 #endif
 			CLAW_DBF_TEXT(4,trace,"RdIRQXit");
                         return;
@@ -929,7 +929,7 @@
                                 }
 #ifdef FUNCTRACE
 				printk(KERN_INFO "%s:%s Exit on line %d\n",
-					dev->name,__FUNCTION__,__LINE__);
+					dev->name,__func__,__LINE__);
 #endif
 				CLAW_DBF_TEXT(4,trace,"rstrtwrt");
                                 return;
@@ -946,7 +946,7 @@
 	     		(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
 #ifdef FUNCTRACE
 				printk(KERN_INFO "%s:%s Exit on line %d\n",
-					dev->name,__FUNCTION__,__LINE__);
+					dev->name,__func__,__LINE__);
 #endif
 				CLAW_DBF_TEXT(4,trace,"writeUE");
                                 return;
@@ -969,7 +969,7 @@
 #endif
 #ifdef FUNCTRACE
 			printk(KERN_INFO "%s:%s Exit on line %d\n",
-				dev->name,__FUNCTION__,__LINE__);
+				dev->name,__func__,__LINE__);
 #endif
 			CLAW_DBF_TEXT(4,trace,"StWtExit");
                         return;
@@ -978,7 +978,7 @@
 				"state=%d\n",dev->name,p_ch->claw_state);
 #ifdef FUNCTRACE
 			printk(KERN_INFO "%s:%s Exit on line %d\n",
-				dev->name,__FUNCTION__,__LINE__);
+				dev->name,__func__,__LINE__);
 #endif
 			CLAW_DBF_TEXT(2,trace,"badIRQ");
                         return;
@@ -1001,7 +1001,7 @@
 	p_ch = (struct chbk *) data;
         dev = (struct net_device *)p_ch->ndev;
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter  \n",dev->name,__func__);
 #endif
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: variable p_ch =\n",dev->name);
@@ -1021,7 +1021,7 @@
 	CLAW_DBF_TEXT(4,trace,"TskletXt");
 #ifdef FUNCTRACE
 	printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
         return;
 }       /*    end of claw_irq_bh    */
@@ -1048,7 +1048,7 @@
         if (!privptr)
                 return 0;
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter  \n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"release");
 #ifdef DEBUGMSG
@@ -1090,7 +1090,7 @@
 	if(privptr->buffs_alloc != 1) {
 #ifdef FUNCTRACE
 	printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 		CLAW_DBF_TEXT(4,trace,"none2fre");
 		return 0;
@@ -1171,7 +1171,7 @@
         }
 #ifdef FUNCTRACE
 	printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"rlsexit");
         return 0;
@@ -1192,7 +1192,7 @@
 
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
         printk(KERN_INFO "claw: variable p_ch =\n");
         dumpit((char *) p_ch, sizeof(struct chbk));
 #endif
@@ -1200,20 +1200,20 @@
         if (p_ch->claw_state == CLAW_STOP) {
 #ifdef FUNCTRACE
 		printk(KERN_INFO "%s:%s Exit on line %d\n",
-			dev->name,__FUNCTION__,__LINE__);
+			dev->name,__func__,__LINE__);
 #endif
         	return;
         }
 #ifdef DEBUGMSG
         printk( KERN_INFO "%s:%s  state-%02x\n" ,
 		dev->name,
-		__FUNCTION__,
+		__func__,
 		p_ch->claw_state);
 #endif
 	claw_strt_out_IO( dev );
 #ifdef FUNCTRACE
 	printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"rtry_xit");
         return;
@@ -1235,7 +1235,7 @@
 	int	rc;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter  \n",p_ch->ndev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter  \n",p_ch->ndev->name,__func__);
         printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
         dumpit((char *) p_ch, sizeof(struct chbk));
 #endif
@@ -1262,7 +1262,7 @@
 
 #ifdef FUNCTRACE
 	printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
         return;
 }      /* end of claw_write_next      */
@@ -1276,7 +1276,7 @@
 claw_timer ( struct chbk * p_ch )
 {
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__func__);
         printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
         dumpit((char *) p_ch, sizeof(struct chbk));
 #endif
@@ -1285,7 +1285,7 @@
         wake_up(&p_ch->wait);
 #ifdef FUNCTRACE
 	printk(KERN_INFO "%s:%s Exit on line %d\n",
-		p_ch->ndev->name,__FUNCTION__,__LINE__);
+		p_ch->ndev->name,__func__,__LINE__);
 #endif
         return;
 }      /* end of claw_timer  */
@@ -1312,7 +1312,7 @@
 	int	order_of_mag=1;		/* assume 2 pages */
 	int	nump=2;
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s Enter pages = %d \n",__FUNCTION__,num_of_pages);
+	printk(KERN_INFO "%s Enter pages = %d \n",__func__,num_of_pages);
 #endif
 	CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages);
 	if (num_of_pages == 1)   {return 0; }  /* magnitude of 0 = 1 page */
@@ -1327,7 +1327,7 @@
 	if (order_of_mag > 9) { order_of_mag = 9; }  /* I know it's paranoid */
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s Exit on line %d, order = %d\n",
-	__FUNCTION__,__LINE__, order_of_mag);
+	__func__,__LINE__, order_of_mag);
 #endif
 	CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag);
 	return order_of_mag;
@@ -1349,7 +1349,7 @@
         struct ccwbk*  p_buf;
 #endif
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter  \n",dev->name,__func__);
 #endif
 #ifdef DEBUGMSG
         printk(KERN_INFO "dev\n");
@@ -1369,7 +1369,7 @@
         if ( p_first==NULL) {
 #ifdef FUNCTRACE
 		printk(KERN_INFO "%s:%s Exit on line %d\n",
-			dev->name,__FUNCTION__,__LINE__);
+			dev->name,__func__,__LINE__);
 #endif
 		CLAW_DBF_TEXT(4,trace,"addexit");
                 return 0;
@@ -1400,9 +1400,9 @@
         if ( privptr-> p_read_active_first ==NULL ) {
 #ifdef DEBUGMSG
                 printk(KERN_INFO "%s:%s p_read_active_first == NULL \n",
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
                 printk(KERN_INFO "%s:%s Read active first/last changed \n",
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
 #endif
                 privptr-> p_read_active_first= p_first;  /*    set new first */
                 privptr-> p_read_active_last = p_last;   /*    set new last  */
@@ -1411,7 +1411,7 @@
 
 #ifdef DEBUGMSG
                 printk(KERN_INFO "%s:%s Read in progress \n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
 #endif
                 /* set up TIC ccw  */
                 temp_ccw.cda= (__u32)__pa(&p_first->read);
@@ -1450,15 +1450,15 @@
                 privptr->p_read_active_last=p_last;
         } /* end of if ( privptr-> p_read_active_first ==NULL)  */
 #ifdef IOTRACE
-        printk(KERN_INFO "%s:%s  dump p_last CCW BK \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s  dump p_last CCW BK \n",dev->name,__func__);
         dumpit((char *)p_last, sizeof(struct ccwbk));
-        printk(KERN_INFO "%s:%s  dump p_end CCW BK \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s  dump p_end CCW BK \n",dev->name,__func__);
         dumpit((char *)p_end, sizeof(struct endccw));
 
-        printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__func__);
         dumpit((char *)p_first, sizeof(struct ccwbk));
         printk(KERN_INFO "%s:%s Dump Active CCW chain \n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
         p_buf=privptr->p_read_active_first;
         while (p_buf!=NULL) {
                 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -1467,7 +1467,7 @@
 #endif
 #ifdef FUNCTRACE
 	printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"addexit");
         return 0;
@@ -1483,7 +1483,7 @@
 {
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s: %s() > enter  \n",
-		cdev->dev.bus_id,__FUNCTION__);
+		cdev->dev.bus_id,__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"ccwret");
 #ifdef DEBUGMSG
@@ -1516,7 +1516,7 @@
         }
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s: %s() > exit on line %d\n",
-		cdev->dev.bus_id,__FUNCTION__,__LINE__);
+		cdev->dev.bus_id,__func__,__LINE__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"ccwret");
 }    /*    end of ccw_check_return_code   */
@@ -1531,7 +1531,7 @@
 	struct net_device *dev = p_ch->ndev;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s: %s() > enter\n",dev->name,__func__);
 #endif
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: variable dev =\n",dev->name);
@@ -1578,7 +1578,7 @@
 
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s: %s() exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 }   /*    end of ccw_check_unit_check    */
 
@@ -1706,7 +1706,7 @@
 	int    rc=0;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s > enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s > enter  \n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"findlink");
 #ifdef DEBUGMSG
@@ -1739,7 +1739,7 @@
 
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
         return 0;
 }    /*    end of find_link    */
@@ -1773,7 +1773,7 @@
         struct ccwbk                   *p_buf;
 #endif
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s: %s() > enter\n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"hw_tx");
 #ifdef DEBUGMSG
@@ -1787,7 +1787,7 @@
         p_ch=(struct chbk *)&privptr->channel[WRITE];
 	p_env =privptr->p_env;
 #ifdef IOTRACE
-        printk(KERN_INFO "%s: %s() dump sk_buff  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s: %s() dump sk_buff  \n",dev->name,__func__);
         dumpit((char *)skb ,sizeof(struct sk_buff));
 #endif
 	claw_free_wrt_buf(dev);	/* Clean up free chain if posible */
@@ -1877,7 +1877,7 @@
         while (len_of_data > 0) {
 #ifdef DEBUGMSG
                 printk(KERN_INFO "%s: %s() length-of-data is %ld \n",
-			dev->name ,__FUNCTION__,len_of_data);
+			dev->name ,__func__,len_of_data);
                 dumpit((char *)pDataAddress ,64);
 #endif
                 p_this_ccw=privptr->p_write_free_chain;  /* get a block */
@@ -1913,7 +1913,7 @@
                 p_last_ccw=p_this_ccw;      /* save new last block */
 #ifdef IOTRACE
 		printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n",
-			dev->name,__FUNCTION__,bytesInThisBuffer);
+			dev->name,__func__,bytesInThisBuffer);
                 dumpit((char *)p_this_ccw, sizeof(struct ccwbk));
                 dumpit((char *)p_this_ccw->p_buffer, 64);
 #endif
@@ -1998,7 +1998,7 @@
 
 #ifdef IOTRACE
         printk(KERN_INFO "%s: %s() >  Dump Active CCW chain \n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
         p_buf=privptr->p_write_active_first;
         while (p_buf!=NULL) {
                 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2018,7 +2018,7 @@
         /*      if write free count is zero , set NOBUFFER       */
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() > free_count is %d\n",
-		dev->name,__FUNCTION__,
+		dev->name,__func__,
 		(int) privptr->write_free_count );
 #endif
 	if (privptr->write_free_count==0) {
@@ -2029,7 +2029,7 @@
 Done:
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n",
-		dev->name,__FUNCTION__,__LINE__, rc);
+		dev->name,__func__,__LINE__, rc);
 #endif
 	return(rc);
 }    /*    end of claw_hw_tx    */
@@ -2063,7 +2063,7 @@
         addr_t   real_TIC_address;
         int i,j;
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s: %s() enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s: %s() enter  \n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"init_ccw");
 #ifdef DEBUGMSG
@@ -2097,15 +2097,15 @@
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() "
 		"ccw_blocks_required=%d\n",
-		dev->name,__FUNCTION__,
+		dev->name,__func__,
 		ccw_blocks_required);
         printk(KERN_INFO "%s: %s() "
 		"PAGE_SIZE=0x%x\n",
-		dev->name,__FUNCTION__,
+		dev->name,__func__,
 		(unsigned int)PAGE_SIZE);
         printk(KERN_INFO "%s: %s() > "
 		"PAGE_MASK=0x%x\n",
-		dev->name,__FUNCTION__,
+		dev->name,__func__,
 		(unsigned int)PAGE_MASK);
 #endif
         /*
@@ -2117,10 +2117,10 @@
 
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n",
-		dev->name,__FUNCTION__,
+		dev->name,__func__,
 		ccw_blocks_perpage);
         printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n",
-		dev->name,__FUNCTION__,
+		dev->name,__func__,
 		ccw_pages_required);
 #endif
         /*
@@ -2156,29 +2156,29 @@
 #ifdef DEBUGMSG
         if (privptr->p_env->read_size < PAGE_SIZE) {
             printk(KERN_INFO "%s: %s() reads_perpage=%d\n",
-	    	dev->name,__FUNCTION__,
+		dev->name,__func__,
 		claw_reads_perpage);
         }
         else {
             printk(KERN_INFO "%s: %s() pages_perread=%d\n",
-	    	dev->name,__FUNCTION__,
+		dev->name,__func__,
 		privptr->p_buff_pages_perread);
         }
         printk(KERN_INFO "%s: %s() read_pages=%d\n",
-		dev->name,__FUNCTION__,
+		dev->name,__func__,
 		claw_read_pages);
         if (privptr->p_env->write_size < PAGE_SIZE) {
             printk(KERN_INFO "%s: %s() writes_perpage=%d\n",
-	    	dev->name,__FUNCTION__,
+		dev->name,__func__,
 		claw_writes_perpage);
         }
         else {
             printk(KERN_INFO "%s: %s() pages_perwrite=%d\n",
-	    	dev->name,__FUNCTION__,
+		dev->name,__func__,
 		privptr->p_buff_pages_perwrite);
         }
         printk(KERN_INFO "%s: %s() write_pages=%d\n",
-		dev->name,__FUNCTION__,
+		dev->name,__func__,
 		claw_write_pages);
 #endif
 
@@ -2194,12 +2194,12 @@
                         printk(KERN_INFO "%s: %s()  "
 				"__get_free_pages for CCWs failed : "
 				"pages is %d\n",
-                                dev->name,__FUNCTION__,
+				dev->name,__func__,
 				ccw_pages_required );
 #ifdef FUNCTRACE
                         printk(KERN_INFO "%s: %s() > "
 				"exit on line %d, rc = ENOMEM\n",
-				dev->name,__FUNCTION__,
+				dev->name,__func__,
 				 __LINE__);
 #endif
                         return -ENOMEM;
@@ -2218,7 +2218,7 @@
         /*                              Initialize ending CCW block       */
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
 #endif
 
         p_endccw=privptr->p_end_ccw;
@@ -2276,7 +2276,7 @@
 
 #ifdef IOTRACE
         printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
         dumpit((char *)p_endccw, sizeof(struct endccw));
 #endif
 
@@ -2287,7 +2287,7 @@
 
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s()  Begin build a chain of CCW buffer \n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
 #endif
         p_buff=privptr->p_buff_ccw;
 
@@ -2306,7 +2306,7 @@
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() "
 		"End build a chain of CCW buffer \n",
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
         p_buf=p_free_chain;
         while (p_buf!=NULL) {
                 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2321,7 +2321,7 @@
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() "
 		"Begin initialize ClawSignalBlock \n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
 #endif
         if (privptr->p_claw_signal_blk==NULL) {
                 privptr->p_claw_signal_blk=p_free_chain;
@@ -2334,7 +2334,7 @@
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() >  End initialize "
 	 	"ClawSignalBlock\n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
         dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk));
 #endif
 
@@ -2349,14 +2349,14 @@
                 if (privptr->p_buff_write==NULL) {
                         printk(KERN_INFO "%s: %s() __get_free_pages for write"
 				" bufs failed : get is for %d pages\n",
-                                dev->name,__FUNCTION__,claw_write_pages );
+				dev->name,__func__,claw_write_pages );
                         free_pages((unsigned long)privptr->p_buff_ccw,
 			   (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
                         privptr->p_buff_ccw=NULL;
 #ifdef FUNCTRACE
                         printk(KERN_INFO "%s: %s() > exit on line %d,"
 			 	"rc = ENOMEM\n",
-				dev->name,__FUNCTION__,__LINE__);
+				dev->name,__func__,__LINE__);
 #endif
                         return -ENOMEM;
                 }
@@ -2369,7 +2369,7 @@
 			ccw_pages_required * PAGE_SIZE);
 #ifdef DEBUGMSG
                 printk(KERN_INFO "%s: %s() Begin build claw write free "
-			"chain \n",dev->name,__FUNCTION__);
+			"chain \n",dev->name,__func__);
 #endif
                 privptr->p_write_free_chain=NULL;
 
@@ -2409,14 +2409,14 @@
 #ifdef IOTRACE
                    printk(KERN_INFO "%s:%s __get_free_pages "
 		    "for writes buf: get for %d pages\n",
-		    dev->name,__FUNCTION__,
+		    dev->name,__func__,
 		    privptr->p_buff_pages_perwrite);
 #endif
                    if (p_buff==NULL) {
 			printk(KERN_INFO "%s:%s __get_free_pages "
 			 	"for writes buf failed : get is for %d pages\n",
 				dev->name,
-				__FUNCTION__,
+				__func__,
 				privptr->p_buff_pages_perwrite );
                         free_pages((unsigned long)privptr->p_buff_ccw,
 			      (int)pages_to_order_of_mag(
@@ -2433,7 +2433,7 @@
 #ifdef FUNCTRACE
                         printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n",
 			dev->name,
-			__FUNCTION__,
+			__func__,
 			__LINE__);
 #endif
                         return -ENOMEM;
@@ -2466,7 +2466,7 @@
 
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s:%s  End build claw write free chain \n",
-	dev->name,__FUNCTION__);
+	dev->name,__func__);
         p_buf=privptr->p_write_free_chain;
         while (p_buf!=NULL) {
                 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2485,7 +2485,7 @@
                         printk(KERN_INFO "%s: %s() "
 			 	"__get_free_pages for read buf failed : "
 			 	"get is for %d pages\n",
-                                dev->name,__FUNCTION__,claw_read_pages );
+				dev->name,__func__,claw_read_pages );
                         free_pages((unsigned long)privptr->p_buff_ccw,
 				(int)pages_to_order_of_mag(
 					privptr->p_buff_ccw_num));
@@ -2497,7 +2497,7 @@
                         privptr->p_buff_write=NULL;
 #ifdef FUNCTRACE
                         printk(KERN_INFO "%s: %s() > exit on line %d, rc ="
-				" ENOMEM\n",dev->name,__FUNCTION__,__LINE__);
+				" ENOMEM\n",dev->name,__func__,__LINE__);
 #endif
                         return -ENOMEM;
                 }
@@ -2509,7 +2509,7 @@
                 */
 #ifdef DEBUGMSG
                 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
 #endif
                 p_buff=privptr->p_buff_read;
                 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
@@ -2590,7 +2590,7 @@
 
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
 #endif
                 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
                         p_buff = (void *)__get_free_pages(__GFP_DMA,
@@ -2598,7 +2598,7 @@
                         if (p_buff==NULL) {
                                 printk(KERN_INFO "%s: %s() __get_free_pages for read "
 					"buf failed : get is for %d pages\n",
-					dev->name,__FUNCTION__,
+					dev->name,__func__,
                                         privptr->p_buff_pages_perread );
                                 free_pages((unsigned long)privptr->p_buff_ccw,
 					(int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
@@ -2622,7 +2622,7 @@
                                 privptr->p_buff_write=NULL;
 #ifdef FUNCTRACE
                                 printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n",
-					dev->name,__FUNCTION__,
+					dev->name,__func__,
 					__LINE__);
 #endif
                                 return -ENOMEM;
@@ -2695,7 +2695,7 @@
         }       /*  pBuffread = NULL */
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() >  End build claw read free chain \n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
         p_buf=p_first_CCWB;
         while (p_buf!=NULL) {
                 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2707,7 +2707,7 @@
 	privptr->buffs_alloc = 1;
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s: %s() exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
         return 0;
 }    /*    end of init_ccw_bk */
@@ -2723,11 +2723,11 @@
 {
   struct claw_privbk *privptr;
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s enter  \n",__FUNCTION__);
+	printk(KERN_INFO "%s enter  \n",__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"proberr");
 #ifdef DEBUGMSG
-        printk(KERN_INFO "%s variable cgdev =\n",__FUNCTION__);
+	printk(KERN_INFO "%s variable cgdev =\n",__func__);
         dumpit((char *) cgdev, sizeof(struct ccwgroup_device));
 #endif
         privptr=(struct claw_privbk *)cgdev->dev.driver_data;
@@ -2741,7 +2741,7 @@
         }
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s > exit on line %d\n",
-		 __FUNCTION__,__LINE__);
+		 __func__,__LINE__);
 #endif
 
         return;
@@ -2772,7 +2772,7 @@
         struct chbk *p_ch = NULL;
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s: %s() > enter  \n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"clw_cntl");
 #ifdef DEBUGMSG
@@ -2794,7 +2794,7 @@
 #ifdef FUNCTRACE
                 printk(KERN_INFO "%s: %s() > "
 			"exit on line %d, rc=0\n",
-			dev->name,__FUNCTION__,__LINE__);
+			dev->name,__func__,__LINE__);
 #endif
                 return 0;
         }
@@ -3057,7 +3057,7 @@
 
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 
         return 0;
@@ -3080,7 +3080,7 @@
         struct sk_buff 			*skb;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s > enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s > enter  \n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"sndcntl");
 #ifdef DEBUGMSG
@@ -3143,10 +3143,10 @@
         skb = dev_alloc_skb(sizeof(struct clawctl));
         if (!skb) {
                 printk(  "%s:%s low on mem, returning...\n",
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
 #ifdef DEBUG
                 printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n",
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
 #endif
                 return -ENOMEM;
         }
@@ -3162,7 +3162,7 @@
         	claw_hw_tx(skb, dev, 0);
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 
         return 0;
@@ -3180,7 +3180,7 @@
         struct clawctl 	   *p_ctl;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter  \n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"snd_conn");
 #ifdef  DEBUGMSG
@@ -3193,7 +3193,7 @@
         if ( privptr->system_validate_comp==0x00 ) {
 #ifdef FUNCTRACE
                 printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n",
-			dev->name,__FUNCTION__,__LINE__);
+			dev->name,__func__,__LINE__);
 #endif
                 return rc;
         }
@@ -3209,7 +3209,7 @@
        			HOST_APPL_NAME, privptr->p_env->api_type);
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
-		dev->name,__FUNCTION__,__LINE__, rc);
+		dev->name,__func__,__LINE__, rc);
 #endif
         return rc;
 
@@ -3228,7 +3228,7 @@
         struct conncmd *  p_connect;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"snd_dsc");
 #ifdef  DEBUGMSG
@@ -3244,7 +3244,7 @@
                 p_connect->host_name, p_connect->WS_name);
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
-		dev->name,__FUNCTION__, __LINE__, rc);
+		dev->name,__func__, __LINE__, rc);
 #endif
         return rc;
 }     /*   end of claw_snd_disc    */
@@ -3265,7 +3265,7 @@
 
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Enter\n",
-		dev->name,__FUNCTION__);
+		dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"chkresp");
 #ifdef DEBUGMSG
@@ -3285,7 +3285,7 @@
 		p_env->adapter_name  );
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
-		dev->name,__FUNCTION__,__LINE__, rc);
+		dev->name,__func__,__LINE__, rc);
 #endif
         return rc;
 }     /*    end of claw_snd_sys_validate_rsp    */
@@ -3301,7 +3301,7 @@
         int rc;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"conn_req");
 #ifdef DEBUGMSG
@@ -3311,7 +3311,7 @@
         rc=claw_snd_conn_req(dev, 1);
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
-		dev->name,__FUNCTION__,__LINE__, rc);
+		dev->name,__func__,__LINE__, rc);
 #endif
         return rc;
 }    /*   end of claw_strt_conn_req   */
@@ -3327,13 +3327,13 @@
 {
         struct claw_privbk *privptr;
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"stats");
         privptr = dev->priv;
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
         return &privptr->stats;
 }     /*   end of claw_stats   */
@@ -3366,7 +3366,7 @@
 	int     p=0;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s enter  \n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"unpkread");
         p_first_ccw=NULL;
@@ -3408,7 +3408,7 @@
                 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
 #ifdef DEBUGMSG
                         printk(KERN_INFO "%s: %s > More_to_come is ON\n",
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
 #endif
                         mtc_this_frm=1;
                         if (p_this_ccw->header.length!=
@@ -3435,7 +3435,7 @@
 #ifdef DEBUGMSG
                         printk(KERN_INFO "%s:%s goto next "
 				"frame from MoretoComeSkip \n",
-				dev->name,__FUNCTION__);
+				dev->name,__func__);
 #endif
                         goto NextFrame;
                 }
@@ -3445,7 +3445,7 @@
 #ifdef DEBUGMSG
                         printk(KERN_INFO "%s:%s goto next "
 				"frame from claw_process_control \n",
-				dev->name,__FUNCTION__);
+				dev->name,__func__);
 #endif
 			CLAW_DBF_TEXT(4,trace,"UnpkCntl");
                         goto NextFrame;
@@ -3468,7 +3468,7 @@
                 if (privptr->mtc_logical_link<0) {
 #ifdef DEBUGMSG
                 printk(KERN_INFO "%s: %s mtc_logical_link < 0  \n",
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
 #endif
 
                 /*
@@ -3487,7 +3487,7 @@
                         printk(KERN_INFO "%s: %s > goto next "
 				"frame from MoretoComeSkip \n",
 				dev->name,
-				__FUNCTION__);
+				__func__);
                         printk(KERN_INFO "      bytes_to_mov %d > (MAX_ENVELOPE_"
 				"SIZE-privptr->mtc_offset %d)\n",
 				bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset));
@@ -3505,13 +3505,13 @@
 		}
 #ifdef DEBUGMSG
                 printk(KERN_INFO "%s: %s() received data \n",
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
 		if (p_env->packing == DO_PACKED)
 			dumpit((char *)p_packd+sizeof(struct clawph),32);
 		else
 	                dumpit((char *)p_this_ccw->p_buffer, 32);
 		printk(KERN_INFO "%s: %s() bytelength %d \n",
-			dev->name,__FUNCTION__,bytes_to_mov);
+			dev->name,__func__,bytes_to_mov);
 #endif
                 if (mtc_this_frm==0) {
                         len_of_data=privptr->mtc_offset+bytes_to_mov;
@@ -3530,13 +3530,13 @@
 #ifdef DEBUGMSG
                                 printk(KERN_INFO "%s: %s() netif_"
 					"rx(skb) completed \n",
-					dev->name,__FUNCTION__);
+					dev->name,__func__);
 #endif
                         }
                         else {
                                 privptr->stats.rx_dropped++;
                                 printk(KERN_WARNING "%s: %s() low on memory\n",
-				dev->name,__FUNCTION__);
+				dev->name,__func__);
                         }
                         privptr->mtc_offset=0;
                         privptr->mtc_logical_link=-1;
@@ -3575,10 +3575,10 @@
 
 #ifdef IOTRACE
         printk(KERN_INFO "%s:%s processed frame is %d \n",
-		dev->name,__FUNCTION__,i);
+		dev->name,__func__,i);
         printk(KERN_INFO "%s:%s  F:%lx L:%lx\n",
 		dev->name,
-		__FUNCTION__,
+		__func__,
 		(unsigned long)p_first_ccw,
 		(unsigned long)p_last_ccw);
 #endif
@@ -3588,7 +3588,7 @@
         claw_strt_read(dev, LOCK_YES);
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s: %s exit on line %d\n",
-		dev->name, __FUNCTION__, __LINE__);
+		dev->name, __func__, __LINE__);
 #endif
         return;
 }     /*  end of unpack_read   */
@@ -3610,7 +3610,7 @@
         p_ch=&privptr->channel[READ];
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter  \n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter  \n",dev->name,__func__);
         printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock);
         dumpit((char *) dev, sizeof(struct net_device));
 #endif
@@ -3626,7 +3626,7 @@
         }
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s:%s state-%02x\n" ,
-		dev->name,__FUNCTION__, p_ch->claw_state);
+		dev->name,__func__, p_ch->claw_state);
 #endif
         if (lock==LOCK_YES) {
                 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -3634,7 +3634,7 @@
         if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
 #ifdef DEBUGMSG
                 printk(KERN_INFO "%s: HOT READ started in %s\n" ,
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
                 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
                 dumpit((char *)&p_clawh->flag , 1);
 #endif
@@ -3650,7 +3650,7 @@
 	else {
 #ifdef DEBUGMSG
 		printk(KERN_INFO "%s: No READ started by %s() In progress\n" ,
-			dev->name,__FUNCTION__);
+			dev->name,__func__);
 #endif
 		CLAW_DBF_TEXT(2,trace,"ReadAct");
 	}
@@ -3660,7 +3660,7 @@
         }
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 	CLAW_DBF_TEXT(4,trace,"StRdExit");
         return;
@@ -3681,7 +3681,7 @@
         struct ccwbk   	*p_first_ccw;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
 #endif
 	if (!dev) {
 		return;
@@ -3691,7 +3691,7 @@
 
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s:%s state-%02x\n" ,
-		dev->name,__FUNCTION__,p_ch->claw_state);
+		dev->name,__func__,p_ch->claw_state);
 #endif
         CLAW_DBF_TEXT(4,trace,"strt_io");
         p_first_ccw=privptr->p_write_active_first;
@@ -3701,14 +3701,14 @@
         if (p_first_ccw == NULL) {
 #ifdef FUNCTRACE
                 printk(KERN_INFO "%s:%s Exit on line %d\n",
-			dev->name,__FUNCTION__,__LINE__);
+			dev->name,__func__,__LINE__);
 #endif
                 return;
         }
         if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
                 parm = (unsigned long) p_ch;
 #ifdef DEBUGMSG
-                printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__FUNCTION__);
+		printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__func__);
                 dumpit((char *)p_first_ccw, sizeof(struct ccwbk));
 #endif
 		CLAW_DBF_TEXT(2,trace,"StWrtIO");
@@ -3721,7 +3721,7 @@
         dev->trans_start = jiffies;
 #ifdef FUNCTRACE
 	printk(KERN_INFO "%s:%s Exit on line %d\n",
-		dev->name,__FUNCTION__,__LINE__);
+		dev->name,__func__,__LINE__);
 #endif
 
         return;
@@ -3745,7 +3745,7 @@
         struct ccwbk*p_buf;
 #endif
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
         printk(KERN_INFO "%s: free count = %d  variable dev =\n",
 		dev->name,privptr->write_free_count);
 #endif
@@ -3798,7 +3798,7 @@
                 privptr->p_write_active_last=NULL;
 #ifdef DEBUGMSG
                 printk(KERN_INFO "%s:%s p_write_"
-			"active_first==NULL\n",dev->name,__FUNCTION__);
+			"active_first==NULL\n",dev->name,__func__);
 #endif
         }
 #ifdef IOTRACE
@@ -3819,7 +3819,7 @@
 	CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count);
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n",
-		dev->name,__FUNCTION__, __LINE__,privptr->write_free_count);
+		dev->name,__func__, __LINE__,privptr->write_free_count);
 #endif
         return;
 }
@@ -3833,7 +3833,7 @@
 {
 	struct claw_privbk *privptr;
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"free_dev");
 
@@ -3854,7 +3854,7 @@
 #endif
 	CLAW_DBF_TEXT(2,setup,"feee_ok");
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Exit\n",dev->name,__func__);
 #endif
 }
 
@@ -3867,13 +3867,13 @@
 claw_init_netdevice(struct net_device * dev)
 {
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"init_dev");
 	CLAW_DBF_TEXT_(2,setup,"%s",dev->name);
 	if (!dev) {
         printk(KERN_WARNING "claw:%s BAD Device exit line %d\n",
-		__FUNCTION__,__LINE__);
+		__func__,__LINE__);
 		CLAW_DBF_TEXT(2,setup,"baddev");
 		return;
 	}
@@ -3889,7 +3889,7 @@
 	dev->tx_queue_len = 1300;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Exit\n",dev->name,__func__);
 #endif
 	CLAW_DBF_TEXT(2,setup,"initok");
 	return;
@@ -3909,7 +3909,7 @@
 	struct ccw_dev_id dev_id;
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__FUNCTION__);
+	printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__func__);
 #endif
 	CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id);
 	privptr->channel[i].flag  = i+1;   /* Read is 1 Write is 2 */
@@ -3920,16 +3920,16 @@
 	p_ch->devno = dev_id.devno;
 	if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
 		printk(KERN_WARNING "%s Out of memory in %s for irb\n",
-			p_ch->id,__FUNCTION__);
+			p_ch->id,__func__);
 #ifdef FUNCTRACE
         	printk(KERN_INFO "%s:%s Exit on line %d\n",
-			p_ch->id,__FUNCTION__,__LINE__);
+			p_ch->id,__func__,__LINE__);
 #endif
 		return -ENOMEM;
 	}
 #ifdef FUNCTRACE
         	printk(KERN_INFO "%s:%s Exit on line %d\n",
-			cdev->dev.bus_id,__FUNCTION__,__LINE__);
+			cdev->dev.bus_id,__func__,__LINE__);
 #endif
 	return 0;
 }
@@ -3952,7 +3952,7 @@
 	int ret;
 	struct ccw_dev_id dev_id;
 
-	pr_debug("%s() called\n", __FUNCTION__);
+	pr_debug("%s() called\n", __func__);
 	printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id);
 	CLAW_DBF_TEXT(2,setup,"new_dev");
 	privptr = cgdev->dev.driver_data;
@@ -3990,7 +3990,7 @@
 	}
 	dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
 	if (!dev) {
-		printk(KERN_WARNING "%s:alloc_netdev failed\n",__FUNCTION__);
+		printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__);
 		goto out;
 	}
 	dev->priv = privptr;
@@ -4065,7 +4065,7 @@
 	struct net_device *ndev;
 	int	ret;
 
-	pr_debug("%s() called\n", __FUNCTION__);
+	pr_debug("%s() called\n", __func__);
 	CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
 	priv = cgdev->dev.driver_data;
 	if (!priv)
@@ -4095,15 +4095,15 @@
 {
 	struct claw_privbk *priv;
 
-	pr_debug("%s() called\n", __FUNCTION__);
+	pr_debug("%s() called\n", __func__);
 	CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
 	priv = cgdev->dev.driver_data;
 	if (!priv) {
-		printk(KERN_WARNING "claw: %s() no Priv exiting\n",__FUNCTION__);
+		printk(KERN_WARNING "claw: %s() no Priv exiting\n",__func__);
 		return;
 	}
 	printk(KERN_INFO "claw: %s() called %s will be removed.\n",
-			__FUNCTION__,cgdev->cdev[0]->dev.bus_id);
+			__func__,cgdev->cdev[0]->dev.bus_id);
 	if (cgdev->state == CCWGROUP_ONLINE)
 		claw_shutdown_device(cgdev);
 	claw_remove_files(&cgdev->dev);
@@ -4346,7 +4346,7 @@
 static int
 claw_add_files(struct device *dev)
 {
-	pr_debug("%s() called\n", __FUNCTION__);
+	pr_debug("%s() called\n", __func__);
 	CLAW_DBF_TEXT(2,setup,"add_file");
 	return sysfs_create_group(&dev->kobj, &claw_attr_group);
 }
@@ -4354,7 +4354,7 @@
 static void
 claw_remove_files(struct device *dev)
 {
-	pr_debug("%s() called\n", __FUNCTION__);
+	pr_debug("%s() called\n", __func__);
 	CLAW_DBF_TEXT(2,setup,"rem_file");
 	sysfs_remove_group(&dev->kobj, &claw_attr_group);
 }
@@ -4385,12 +4385,12 @@
 	printk(KERN_INFO "claw: starting driver\n");
 
 #ifdef FUNCTRACE
-        printk(KERN_INFO "claw: %s() enter \n",__FUNCTION__);
+	printk(KERN_INFO "claw: %s() enter \n",__func__);
 #endif
 	ret = claw_register_debug_facility();
 	if (ret) {
 		printk(KERN_WARNING "claw: %s() debug_register failed %d\n",
-			__FUNCTION__,ret);
+			__func__,ret);
 		return ret;
 	}
 	CLAW_DBF_TEXT(2,setup,"init_mod");
@@ -4398,10 +4398,10 @@
 	if (ret) {
 		claw_unregister_debug_facility();
 		printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n",
-			__FUNCTION__,ret);
+			__func__,ret);
 	}
 #ifdef FUNCTRACE
-        printk(KERN_INFO "claw: %s() exit \n",__FUNCTION__);
+	printk(KERN_INFO "claw: %s() exit \n",__func__);
 #endif
 	return ret;
 }
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 874a199..8f876f6 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -670,7 +670,7 @@
 	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
 	int rc;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 
 	if (!conn->netdev) {
 		iucv_message_reject(conn->path, msg);
@@ -718,7 +718,7 @@
 	struct ll_header header;
 	int rc;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 
 	if (conn && conn->netdev)
 		privptr = netdev_priv(conn->netdev);
@@ -799,7 +799,7 @@
 	struct netiucv_priv *privptr = netdev_priv(netdev);
 	int rc;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	conn->path = path;
 	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
@@ -821,7 +821,7 @@
 	struct iucv_event *ev = arg;
 	struct iucv_path *path = ev->data;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	iucv_path_sever(path, NULL);
 }
 
@@ -831,7 +831,7 @@
 	struct net_device *netdev = conn->netdev;
 	struct netiucv_priv *privptr = netdev_priv(netdev);
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	fsm_deltimer(&conn->timer);
 	fsm_newstate(fi, CONN_STATE_IDLE);
 	netdev->tx_queue_len = conn->path->msglim;
@@ -842,7 +842,7 @@
 {
 	struct iucv_connection *conn = arg;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	fsm_deltimer(&conn->timer);
 	iucv_path_sever(conn->path, NULL);
 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -854,7 +854,7 @@
 	struct net_device *netdev = conn->netdev;
 	struct netiucv_priv *privptr = netdev_priv(netdev);
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	fsm_deltimer(&conn->timer);
 	iucv_path_sever(conn->path, NULL);
@@ -870,7 +870,7 @@
 	struct iucv_connection *conn = arg;
 	int rc;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
 	PRINT_DEBUG("%s('%s'): connecting ...\n",
@@ -948,7 +948,7 @@
 	struct net_device *netdev = conn->netdev;
 	struct netiucv_priv *privptr = netdev_priv(netdev);
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	fsm_deltimer(&conn->timer);
 	fsm_newstate(fi, CONN_STATE_STOPPED);
@@ -1024,7 +1024,7 @@
 	struct net_device   *dev = arg;
 	struct netiucv_priv *privptr = netdev_priv(dev);
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	fsm_newstate(fi, DEV_STATE_STARTWAIT);
 	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
@@ -1044,7 +1044,7 @@
 	struct netiucv_priv *privptr = netdev_priv(dev);
 	struct iucv_event   ev;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	ev.conn = privptr->conn;
 
@@ -1066,7 +1066,7 @@
 	struct net_device   *dev = arg;
 	struct netiucv_priv *privptr = netdev_priv(dev);
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	switch (fsm_getstate(fi)) {
 		case DEV_STATE_STARTWAIT:
@@ -1097,7 +1097,7 @@
 static void
 dev_action_conndown(fsm_instance *fi, int event, void *arg)
 {
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	switch (fsm_getstate(fi)) {
 		case DEV_STATE_RUNNING:
@@ -1288,7 +1288,7 @@
 	struct netiucv_priv *privptr = netdev_priv(dev);
 	int rc;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 	/**
 	 * Some sanity checks ...
 	 */
@@ -1344,7 +1344,7 @@
 {
 	struct netiucv_priv *priv = netdev_priv(dev);
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return &priv->stats;
 }
 
@@ -1360,7 +1360,7 @@
  */
 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
 {
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
 		IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
 		return -EINVAL;
@@ -1378,7 +1378,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
 }
 
@@ -1393,7 +1393,7 @@
 	int 	i;
 	struct iucv_connection *cp;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	if (count > 9) {
 		PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
 		IUCV_DBF_TEXT_(setup, 2,
@@ -1449,7 +1449,7 @@
 			    char *buf)
 {	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
 }
 
@@ -1461,7 +1461,7 @@
 	char         *e;
 	int          bs1;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	if (count >= 39)
 		return -EINVAL;
 
@@ -1513,7 +1513,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
 }
 
@@ -1524,7 +1524,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
 }
 
@@ -1535,7 +1535,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
 }
 
@@ -1545,7 +1545,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.maxmulti = 0;
 	return count;
 }
@@ -1557,7 +1557,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
 }
 
@@ -1566,7 +1566,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.maxcqueue = 0;
 	return count;
 }
@@ -1578,7 +1578,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
 }
 
@@ -1587,7 +1587,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.doios_single = 0;
 	return count;
 }
@@ -1599,7 +1599,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
 }
 
@@ -1608,7 +1608,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	priv->conn->prof.doios_multi = 0;
 	return count;
 }
@@ -1620,7 +1620,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
 }
 
@@ -1629,7 +1629,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.txlen = 0;
 	return count;
 }
@@ -1641,7 +1641,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
 }
 
@@ -1650,7 +1650,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.tx_time = 0;
 	return count;
 }
@@ -1662,7 +1662,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
 }
 
@@ -1671,7 +1671,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.tx_pending = 0;
 	return count;
 }
@@ -1683,7 +1683,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
 }
 
@@ -1692,7 +1692,7 @@
 {
 	struct netiucv_priv *priv = dev->driver_data;
 
-	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.tx_max_pending = 0;
 	return count;
 }
@@ -1732,7 +1732,7 @@
 {
 	int ret;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
 	if (ret)
 		return ret;
@@ -1744,7 +1744,7 @@
 
 static void netiucv_remove_files(struct device *dev)
 {
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
 	sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
 }
@@ -1756,7 +1756,7 @@
 	int ret;
 
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	if (dev) {
 		snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
@@ -1792,7 +1792,7 @@
 
 static void netiucv_unregister_device(struct device *dev)
 {
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	netiucv_remove_files(dev);
 	device_unregister(dev);
 }
@@ -1857,7 +1857,7 @@
  */
 static void netiucv_remove_connection(struct iucv_connection *conn)
 {
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	write_lock_bh(&iucv_connection_rwlock);
 	list_del_init(&conn->list);
 	write_unlock_bh(&iucv_connection_rwlock);
@@ -1881,7 +1881,7 @@
 {
 	struct netiucv_priv *privptr = netdev_priv(dev);
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	if (!dev)
 		return;
@@ -1963,7 +1963,7 @@
 	struct netiucv_priv *priv;
 	struct iucv_connection *cp;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	if (count>9) {
 		PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
 		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
@@ -2048,7 +2048,7 @@
 	const char *p;
         int i;
 
-        IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 
         if (count >= IFNAMSIZ)
                 count = IFNAMSIZ - 1;;
@@ -2116,7 +2116,7 @@
 	struct netiucv_priv *priv;
 	struct device *dev;
 
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	while (!list_empty(&iucv_connection_list)) {
 		cp = list_entry(iucv_connection_list.next,
 				struct iucv_connection, list);
@@ -2146,8 +2146,7 @@
 	rc = iucv_register(&netiucv_handler, 1);
 	if (rc)
 		goto out_dbf;
-	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
-	netiucv_driver.groups = netiucv_drv_attr_groups;
+	IUCV_DBF_TEXT(trace, 3, __func__);
 	rc = driver_register(&netiucv_driver);
 	if (rc) {
 		PRINT_ERR("NETIUCV: failed to register driver.\n");
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 644a06e..4d4b542 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -59,15 +59,15 @@
 
 			printk(KERN_WARNING"%s: Code does not support more "
 			       "than two chained crws; please report to "
-			       "linux390@de.ibm.com!\n", __FUNCTION__);
+			       "linux390@de.ibm.com!\n", __func__);
 			ccode = stcrw(&tmp_crw);
 			printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
 			       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
-			       __FUNCTION__, tmp_crw.slct, tmp_crw.oflw,
+			       __func__, tmp_crw.slct, tmp_crw.oflw,
 			       tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
 			       tmp_crw.erc, tmp_crw.rsid);
 			printk(KERN_WARNING"%s: This was crw number %x in the "
-			       "chain\n", __FUNCTION__, chain);
+			       "chain\n", __func__, chain);
 			if (ccode != 0)
 				break;
 			chain = tmp_crw.chn ? chain + 1 : 0;
@@ -83,7 +83,7 @@
 		       crw[chain].rsid);
 		/* Check for overflows. */
 		if (crw[chain].oflw) {
-			pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
+			pr_debug("%s: crw overflow detected!\n", __func__);
 			css_schedule_eval_all();
 			chain = 0;
 			continue;
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index d3ca428..ca681f9 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -105,4 +105,8 @@
 #define ED_ETR_SYNC	12	/* External damage ETR sync check */
 #define ED_ETR_SWITCH	13	/* External damage ETR switch to local */
 
+struct pt_regs;
+
+void s390_handle_mcck(void);
+void s390_do_machine_check(struct pt_regs *regs);
 #endif /* __s390mach */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 874b55e..8c7e2b7 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -1030,10 +1030,10 @@
 
 	/* initialize debug locks */
 
-	spin_lock_init(&adapter->erp_dbf_lock);
 	spin_lock_init(&adapter->hba_dbf_lock);
 	spin_lock_init(&adapter->san_dbf_lock);
 	spin_lock_init(&adapter->scsi_dbf_lock);
+	spin_lock_init(&adapter->rec_dbf_lock);
 
 	retval = zfcp_adapter_debug_register(adapter);
 	if (retval)
@@ -1325,10 +1325,10 @@
 
 #define ZFCP_LOG_AREA                   ZFCP_LOG_AREA_FC
 
-static void
-zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
-			   struct fsf_status_read_buffer *status_buffer)
+static void zfcp_fsf_incoming_els_rscn(struct zfcp_fsf_req *fsf_req)
 {
+	struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fcp_rscn_head *fcp_rscn_head;
 	struct fcp_rscn_element *fcp_rscn_element;
 	struct zfcp_port *port;
@@ -1375,7 +1375,8 @@
 				ZFCP_LOG_INFO("incoming RSCN, trying to open "
 					      "port 0x%016Lx\n", port->wwpn);
 				zfcp_erp_port_reopen(port,
-						     ZFCP_STATUS_COMMON_ERP_FAILED);
+						     ZFCP_STATUS_COMMON_ERP_FAILED,
+						     82, fsf_req);
 				continue;
 			}
 
@@ -1406,10 +1407,10 @@
 	}
 }
 
-static void
-zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
-			    struct fsf_status_read_buffer *status_buffer)
+static void zfcp_fsf_incoming_els_plogi(struct zfcp_fsf_req *fsf_req)
 {
+	struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fsf_plogi *els_plogi;
 	struct zfcp_port *port;
 	unsigned long flags;
@@ -1428,14 +1429,14 @@
 			       status_buffer->d_id,
 			       zfcp_get_busid_by_adapter(adapter));
 	} else {
-		zfcp_erp_port_forced_reopen(port, 0);
+		zfcp_erp_port_forced_reopen(port, 0, 83, fsf_req);
 	}
 }
 
-static void
-zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
-			   struct fsf_status_read_buffer *status_buffer)
+static void zfcp_fsf_incoming_els_logo(struct zfcp_fsf_req *fsf_req)
 {
+	struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fcp_logo *els_logo = (struct fcp_logo *) status_buffer->payload;
 	struct zfcp_port *port;
 	unsigned long flags;
@@ -1453,7 +1454,7 @@
 			       status_buffer->d_id,
 			       zfcp_get_busid_by_adapter(adapter));
 	} else {
-		zfcp_erp_port_forced_reopen(port, 0);
+		zfcp_erp_port_forced_reopen(port, 0, 84, fsf_req);
 	}
 }
 
@@ -1480,12 +1481,12 @@
 
 	zfcp_san_dbf_event_incoming_els(fsf_req);
 	if (els_type == LS_PLOGI)
-		zfcp_fsf_incoming_els_plogi(adapter, status_buffer);
+		zfcp_fsf_incoming_els_plogi(fsf_req);
 	else if (els_type == LS_LOGO)
-		zfcp_fsf_incoming_els_logo(adapter, status_buffer);
+		zfcp_fsf_incoming_els_logo(fsf_req);
 	else if ((els_type & 0xffff0000) == LS_RSCN)
 		/* we are only concerned with the command, not the length */
-		zfcp_fsf_incoming_els_rscn(adapter, status_buffer);
+		zfcp_fsf_incoming_els_rscn(fsf_req);
 	else
 		zfcp_fsf_incoming_els_unknown(adapter, status_buffer);
 }
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index edc5015..66d3b88 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -170,9 +170,10 @@
 	BUG_ON(!zfcp_reqlist_isempty(adapter));
 	adapter->req_no = 0;
 
-	zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
-				       ZFCP_SET);
-	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+	zfcp_erp_modify_adapter_status(adapter, 10, NULL,
+				       ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 85,
+				NULL);
 	zfcp_erp_wait(adapter);
 	goto out;
 
@@ -197,7 +198,7 @@
 
 	down(&zfcp_data.config_sema);
 	adapter = dev_get_drvdata(&ccw_device->dev);
-	zfcp_erp_adapter_shutdown(adapter, 0);
+	zfcp_erp_adapter_shutdown(adapter, 0, 86, NULL);
 	zfcp_erp_wait(adapter);
 	zfcp_erp_thread_kill(adapter);
 	up(&zfcp_data.config_sema);
@@ -223,24 +224,21 @@
 	case CIO_GONE:
 		ZFCP_LOG_NORMAL("adapter %s: device gone\n",
 				zfcp_get_busid_by_adapter(adapter));
-		debug_text_event(adapter->erp_dbf,1,"dev_gone");
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
 		break;
 	case CIO_NO_PATH:
 		ZFCP_LOG_NORMAL("adapter %s: no path\n",
 				zfcp_get_busid_by_adapter(adapter));
-		debug_text_event(adapter->erp_dbf,1,"no_path");
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
 		break;
 	case CIO_OPER:
 		ZFCP_LOG_NORMAL("adapter %s: operational again\n",
 				zfcp_get_busid_by_adapter(adapter));
-		debug_text_event(adapter->erp_dbf,1,"dev_oper");
-		zfcp_erp_modify_adapter_status(adapter,
+		zfcp_erp_modify_adapter_status(adapter, 11, NULL,
 					       ZFCP_STATUS_COMMON_RUNNING,
 					       ZFCP_SET);
-		zfcp_erp_adapter_reopen(adapter,
-					ZFCP_STATUS_COMMON_ERP_FAILED);
+		zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+					89, NULL);
 		break;
 	}
 	zfcp_erp_wait(adapter);
@@ -272,7 +270,7 @@
 
 	down(&zfcp_data.config_sema);
 	adapter = dev_get_drvdata(&cdev->dev);
-	zfcp_erp_adapter_shutdown(adapter, 0);
+	zfcp_erp_adapter_shutdown(adapter, 0, 90, NULL);
 	zfcp_erp_wait(adapter);
 	up(&zfcp_data.config_sema);
 }
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 701046c..37b85c6 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -31,123 +31,128 @@
 
 #define ZFCP_LOG_AREA			ZFCP_LOG_AREA_OTHER
 
-static int
-zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
+static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
+			     int level, char *from, int from_len)
+{
+	int offset;
+	struct zfcp_dbf_dump *dump = to;
+	int room = to_len - sizeof(*dump);
+
+	for (offset = 0; offset < from_len; offset += dump->size) {
+		memset(to, 0, to_len);
+		strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
+		dump->total_size = from_len;
+		dump->offset = offset;
+		dump->size = min(from_len - offset, room);
+		memcpy(dump->data, from + offset, dump->size);
+		debug_event(dbf, level, dump, dump->size);
+	}
+}
+
+/* FIXME: this duplicate this code in s390 debug feature */
+static void zfcp_dbf_timestamp(unsigned long long stck, struct timespec *time)
 {
 	unsigned long long sec;
-	struct timespec dbftime;
-	int len = 0;
 
 	stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
 	sec = stck >> 12;
 	do_div(sec, 1000000);
-	dbftime.tv_sec = sec;
+	time->tv_sec = sec;
 	stck -= (sec * 1000000) << 12;
-	dbftime.tv_nsec = ((stck * 1000) >> 12);
-	len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n",
-		       label, dbftime.tv_sec, dbftime.tv_nsec);
-
-	return len;
+	time->tv_nsec = ((stck * 1000) >> 12);
 }
 
-static int zfcp_dbf_tag(char *out_buf, const char *label, const char *tag)
+static void zfcp_dbf_tag(char **p, const char *label, const char *tag)
 {
-	int len = 0, i;
+	int i;
 
-	len += sprintf(out_buf + len, "%-24s", label);
+	*p += sprintf(*p, "%-24s", label);
 	for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
-		len += sprintf(out_buf + len, "%c", tag[i]);
-	len += sprintf(out_buf + len, "\n");
-
-	return len;
+		*p += sprintf(*p, "%c", tag[i]);
+	*p += sprintf(*p, "\n");
 }
 
-static int
-zfcp_dbf_view(char *out_buf, const char *label, const char *format, ...)
+static void zfcp_dbf_outs(char **buf, const char *s1, const char *s2)
+{
+	*buf += sprintf(*buf, "%-24s%s\n", s1, s2);
+}
+
+static void zfcp_dbf_out(char **buf, const char *s, const char *format, ...)
 {
 	va_list arg;
-	int len = 0;
 
-	len += sprintf(out_buf + len, "%-24s", label);
+	*buf += sprintf(*buf, "%-24s", s);
 	va_start(arg, format);
-	len += vsprintf(out_buf + len, format, arg);
+	*buf += vsprintf(*buf, format, arg);
 	va_end(arg);
-	len += sprintf(out_buf + len, "\n");
-
-	return len;
+	*buf += sprintf(*buf, "\n");
 }
 
-static int
-zfcp_dbf_view_dump(char *out_buf, const char *label,
-		   char *buffer, int buflen, int offset, int total_size)
+static void zfcp_dbf_outd(char **p, const char *label, char *buffer,
+			  int buflen, int offset, int total_size)
 {
-	int len = 0;
-
-	if (offset == 0)
-		len += sprintf(out_buf + len, "%-24s  ", label);
-
+	if (!offset)
+		*p += sprintf(*p, "%-24s  ", label);
 	while (buflen--) {
 		if (offset > 0) {
 			if ((offset % 32) == 0)
-				len += sprintf(out_buf + len, "\n%-24c  ", ' ');
+				*p += sprintf(*p, "\n%-24c  ", ' ');
 			else if ((offset % 4) == 0)
-				len += sprintf(out_buf + len, " ");
+				*p += sprintf(*p, " ");
 		}
-		len += sprintf(out_buf + len, "%02x", *buffer++);
+		*p += sprintf(*p, "%02x", *buffer++);
 		if (++offset == total_size) {
-			len += sprintf(out_buf + len, "\n");
+			*p += sprintf(*p, "\n");
 			break;
 		}
 	}
-
-	if (total_size == 0)
-		len += sprintf(out_buf + len, "\n");
-
-	return len;
+	if (!total_size)
+		*p += sprintf(*p, "\n");
 }
 
-static int
-zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
-		     debug_entry_t * entry, char *out_buf)
+static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view,
+				int area, debug_entry_t *entry, char *out_buf)
 {
 	struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
-	int len = 0;
+	struct timespec t;
+	char *p = out_buf;
 
 	if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
-		len += zfcp_dbf_stck(out_buf + len, "timestamp",
-				     entry->id.stck);
-		len += zfcp_dbf_view(out_buf + len, "cpu", "%02i",
-				     entry->id.fields.cpuid);
-	} else {
-		len += zfcp_dbf_view_dump(out_buf + len, NULL,
-					  dump->data,
-					  dump->size,
-					  dump->offset, dump->total_size);
+		zfcp_dbf_timestamp(entry->id.stck, &t);
+		zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu",
+			     t.tv_sec, t.tv_nsec);
+		zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid);
+	} else	{
+		zfcp_dbf_outd(&p, NULL, dump->data, dump->size, dump->offset,
+			      dump->total_size);
 		if ((dump->offset + dump->size) == dump->total_size)
-			len += sprintf(out_buf + len, "\n");
+			p += sprintf(p, "\n");
 	}
-
-	return len;
+	return p - out_buf;
 }
 
+/**
+ * zfcp_hba_dbf_event_fsf_response - trace event for request completion
+ * @fsf_req: request that has been completed
+ */
 void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fsf_qtcb *qtcb = fsf_req->qtcb;
 	union fsf_prot_status_qual *prot_status_qual =
-	    &qtcb->prefix.prot_status_qual;
+					&qtcb->prefix.prot_status_qual;
 	union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual;
 	struct scsi_cmnd *scsi_cmnd;
 	struct zfcp_port *port;
 	struct zfcp_unit *unit;
 	struct zfcp_send_els *send_els;
 	struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
-	struct zfcp_hba_dbf_record_response *response = &rec->type.response;
+	struct zfcp_hba_dbf_record_response *response = &rec->u.response;
 	int level;
 	unsigned long flags;
 
 	spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
-	memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
+	memset(rec, 0, sizeof(*rec));
 	strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
 
 	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
@@ -161,6 +166,9 @@
 		   (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
 		strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
 		level = 4;
+	} else if (qtcb->header.log_length) {
+		strncpy(rec->tag2, "qtcb", ZFCP_DBF_TAG_SIZE);
+		level = 5;
 	} else {
 		strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
 		level = 6;
@@ -188,11 +196,9 @@
 		if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
 			break;
 		scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
-		if (scsi_cmnd != NULL) {
-			response->data.send_fcp.scsi_cmnd
-			    = (unsigned long)scsi_cmnd;
-			response->data.send_fcp.scsi_serial
-			    = scsi_cmnd->serial_number;
+		if (scsi_cmnd) {
+			response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
+			response->u.fcp.serial = scsi_cmnd->serial_number;
 		}
 		break;
 
@@ -200,25 +206,25 @@
 	case FSF_QTCB_CLOSE_PORT:
 	case FSF_QTCB_CLOSE_PHYSICAL_PORT:
 		port = (struct zfcp_port *)fsf_req->data;
-		response->data.port.wwpn = port->wwpn;
-		response->data.port.d_id = port->d_id;
-		response->data.port.port_handle = qtcb->header.port_handle;
+		response->u.port.wwpn = port->wwpn;
+		response->u.port.d_id = port->d_id;
+		response->u.port.port_handle = qtcb->header.port_handle;
 		break;
 
 	case FSF_QTCB_OPEN_LUN:
 	case FSF_QTCB_CLOSE_LUN:
 		unit = (struct zfcp_unit *)fsf_req->data;
 		port = unit->port;
-		response->data.unit.wwpn = port->wwpn;
-		response->data.unit.fcp_lun = unit->fcp_lun;
-		response->data.unit.port_handle = qtcb->header.port_handle;
-		response->data.unit.lun_handle = qtcb->header.lun_handle;
+		response->u.unit.wwpn = port->wwpn;
+		response->u.unit.fcp_lun = unit->fcp_lun;
+		response->u.unit.port_handle = qtcb->header.port_handle;
+		response->u.unit.lun_handle = qtcb->header.lun_handle;
 		break;
 
 	case FSF_QTCB_SEND_ELS:
 		send_els = (struct zfcp_send_els *)fsf_req->data;
-		response->data.send_els.d_id = qtcb->bottom.support.d_id;
-		response->data.send_els.ls_code = send_els->ls_code >> 24;
+		response->u.els.d_id = qtcb->bottom.support.d_id;
+		response->u.els.ls_code = send_els->ls_code >> 24;
 		break;
 
 	case FSF_QTCB_ABORT_FCP_CMND:
@@ -230,39 +236,54 @@
 		break;
 	}
 
-	debug_event(adapter->hba_dbf, level,
-		    rec, sizeof(struct zfcp_hba_dbf_record));
+	debug_event(adapter->hba_dbf, level, rec, sizeof(*rec));
+
+	/* have fcp channel microcode fixed to use as little as possible */
+	if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) {
+		/* adjust length skipping trailing zeros */
+		char *buf = (char *)qtcb + qtcb->header.log_start;
+		int len = qtcb->header.log_length;
+		for (; len && !buf[len - 1]; len--);
+		zfcp_dbf_hexdump(adapter->hba_dbf, rec, sizeof(*rec), level,
+				 buf, len);
+	}
+
 	spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
 }
 
-void
-zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
-			     struct fsf_status_read_buffer *status_buffer)
+/**
+ * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @adapter: adapter that has issued the unsolicited status buffer
+ * @status_buffer: buffer containing payload of unsolicited status
+ */
+void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
+				  struct fsf_status_read_buffer *status_buffer)
 {
 	struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
 	unsigned long flags;
 
 	spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
-	memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
+	memset(rec, 0, sizeof(*rec));
 	strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
 	strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
 
-	rec->type.status.failed = adapter->status_read_failed;
+	rec->u.status.failed = adapter->status_read_failed;
 	if (status_buffer != NULL) {
-		rec->type.status.status_type = status_buffer->status_type;
-		rec->type.status.status_subtype = status_buffer->status_subtype;
-		memcpy(&rec->type.status.queue_designator,
+		rec->u.status.status_type = status_buffer->status_type;
+		rec->u.status.status_subtype = status_buffer->status_subtype;
+		memcpy(&rec->u.status.queue_designator,
 		       &status_buffer->queue_designator,
 		       sizeof(struct fsf_queue_designator));
 
 		switch (status_buffer->status_type) {
 		case FSF_STATUS_READ_SENSE_DATA_AVAIL:
-			rec->type.status.payload_size =
+			rec->u.status.payload_size =
 			    ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
 			break;
 
 		case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
-			rec->type.status.payload_size =
+			rec->u.status.payload_size =
 			    ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
 			break;
 
@@ -270,119 +291,101 @@
 			switch (status_buffer->status_subtype) {
 			case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
 			case FSF_STATUS_READ_SUB_FDISC_FAILED:
-				rec->type.status.payload_size =
+				rec->u.status.payload_size =
 					sizeof(struct fsf_link_down_info);
 			}
 			break;
 
 		case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
-			rec->type.status.payload_size =
+			rec->u.status.payload_size =
 			    ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
 			break;
 		}
-		memcpy(&rec->type.status.payload,
-		       &status_buffer->payload, rec->type.status.payload_size);
+		memcpy(&rec->u.status.payload,
+		       &status_buffer->payload, rec->u.status.payload_size);
 	}
 
-	debug_event(adapter->hba_dbf, 2,
-		    rec, sizeof(struct zfcp_hba_dbf_record));
+	debug_event(adapter->hba_dbf, 2, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
 }
 
-void
-zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
-			unsigned int qdio_error, unsigned int siga_error,
-			int sbal_index, int sbal_count)
+/**
+ * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
+ * @adapter: adapter affected by this QDIO related event
+ * @status: as passed by qdio module
+ * @qdio_error: as passed by qdio module
+ * @siga_error: as passed by qdio module
+ * @sbal_index: first buffer with error condition, as passed by qdio module
+ * @sbal_count: number of buffers affected, as passed by qdio module
+ */
+void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
+			     unsigned int qdio_error, unsigned int siga_error,
+			     int sbal_index, int sbal_count)
 {
-	struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
+	struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
 	unsigned long flags;
 
 	spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
-	memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
-	strncpy(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE);
-	rec->type.qdio.status = status;
-	rec->type.qdio.qdio_error = qdio_error;
-	rec->type.qdio.siga_error = siga_error;
-	rec->type.qdio.sbal_index = sbal_index;
-	rec->type.qdio.sbal_count = sbal_count;
-	debug_event(adapter->hba_dbf, 0,
-		    rec, sizeof(struct zfcp_hba_dbf_record));
+	memset(r, 0, sizeof(*r));
+	strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
+	r->u.qdio.status = status;
+	r->u.qdio.qdio_error = qdio_error;
+	r->u.qdio.siga_error = siga_error;
+	r->u.qdio.sbal_index = sbal_index;
+	r->u.qdio.sbal_count = sbal_count;
+	debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
 	spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
 }
 
-static int
-zfcp_hba_dbf_view_response(char *out_buf,
-			   struct zfcp_hba_dbf_record_response *rec)
+static void zfcp_hba_dbf_view_response(char **p,
+				       struct zfcp_hba_dbf_record_response *r)
 {
-	int len = 0;
+	struct timespec t;
 
-	len += zfcp_dbf_view(out_buf + len, "fsf_command", "0x%08x",
-			     rec->fsf_command);
-	len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
-			     rec->fsf_reqid);
-	len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
-			     rec->fsf_seqno);
-	len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
-	len += zfcp_dbf_view(out_buf + len, "fsf_prot_status", "0x%08x",
-			     rec->fsf_prot_status);
-	len += zfcp_dbf_view(out_buf + len, "fsf_status", "0x%08x",
-			     rec->fsf_status);
-	len += zfcp_dbf_view_dump(out_buf + len, "fsf_prot_status_qual",
-				  rec->fsf_prot_status_qual,
-				  FSF_PROT_STATUS_QUAL_SIZE,
-				  0, FSF_PROT_STATUS_QUAL_SIZE);
-	len += zfcp_dbf_view_dump(out_buf + len, "fsf_status_qual",
-				  rec->fsf_status_qual,
-				  FSF_STATUS_QUALIFIER_SIZE,
-				  0, FSF_STATUS_QUALIFIER_SIZE);
-	len += zfcp_dbf_view(out_buf + len, "fsf_req_status", "0x%08x",
-			     rec->fsf_req_status);
-	len += zfcp_dbf_view(out_buf + len, "sbal_first", "0x%02x",
-			     rec->sbal_first);
-	len += zfcp_dbf_view(out_buf + len, "sbal_curr", "0x%02x",
-			     rec->sbal_curr);
-	len += zfcp_dbf_view(out_buf + len, "sbal_last", "0x%02x",
-			     rec->sbal_last);
-	len += zfcp_dbf_view(out_buf + len, "pool", "0x%02x", rec->pool);
+	zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command);
+	zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
+	zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno);
+	zfcp_dbf_timestamp(r->fsf_issued, &t);
+	zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
+	zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status);
+	zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status);
+	zfcp_dbf_outd(p, "fsf_prot_status_qual", r->fsf_prot_status_qual,
+		      FSF_PROT_STATUS_QUAL_SIZE, 0, FSF_PROT_STATUS_QUAL_SIZE);
+	zfcp_dbf_outd(p, "fsf_status_qual", r->fsf_status_qual,
+		      FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
+	zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
+	zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
+	zfcp_dbf_out(p, "sbal_curr", "0x%02x", r->sbal_curr);
+	zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
+	zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
 
-	switch (rec->fsf_command) {
+	switch (r->fsf_command) {
 	case FSF_QTCB_FCP_CMND:
-		if (rec->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
+		if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
 			break;
-		len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
-				     rec->data.send_fcp.scsi_cmnd);
-		len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
-				     rec->data.send_fcp.scsi_serial);
+		zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
+		zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
 		break;
 
 	case FSF_QTCB_OPEN_PORT_WITH_DID:
 	case FSF_QTCB_CLOSE_PORT:
 	case FSF_QTCB_CLOSE_PHYSICAL_PORT:
-		len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
-				     rec->data.port.wwpn);
-		len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
-				     rec->data.port.d_id);
-		len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
-				     rec->data.port.port_handle);
+		zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.port.wwpn);
+		zfcp_dbf_out(p, "d_id", "0x%06x", r->u.port.d_id);
+		zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.port.port_handle);
 		break;
 
 	case FSF_QTCB_OPEN_LUN:
 	case FSF_QTCB_CLOSE_LUN:
-		len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
-				     rec->data.unit.wwpn);
-		len += zfcp_dbf_view(out_buf + len, "fcp_lun", "0x%016Lx",
-				     rec->data.unit.fcp_lun);
-		len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
-				     rec->data.unit.port_handle);
-		len += zfcp_dbf_view(out_buf + len, "lun_handle", "0x%08x",
-				     rec->data.unit.lun_handle);
+		zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.unit.wwpn);
+		zfcp_dbf_out(p, "fcp_lun", "0x%016Lx", r->u.unit.fcp_lun);
+		zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.unit.port_handle);
+		zfcp_dbf_out(p, "lun_handle", "0x%08x", r->u.unit.lun_handle);
 		break;
 
 	case FSF_QTCB_SEND_ELS:
-		len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
-				     rec->data.send_els.d_id);
-		len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
-				     rec->data.send_els.ls_code);
+		zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
+		zfcp_dbf_out(p, "ls_code", "0x%02x", r->u.els.ls_code);
 		break;
 
 	case FSF_QTCB_ABORT_FCP_CMND:
@@ -393,74 +396,52 @@
 	case FSF_QTCB_UPLOAD_CONTROL_FILE:
 		break;
 	}
-
-	return len;
 }
 
-static int
-zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
+static void zfcp_hba_dbf_view_status(char **p,
+				     struct zfcp_hba_dbf_record_status *r)
 {
-	int len = 0;
-
-	len += zfcp_dbf_view(out_buf + len, "failed", "0x%02x", rec->failed);
-	len += zfcp_dbf_view(out_buf + len, "status_type", "0x%08x",
-			     rec->status_type);
-	len += zfcp_dbf_view(out_buf + len, "status_subtype", "0x%08x",
-			     rec->status_subtype);
-	len += zfcp_dbf_view_dump(out_buf + len, "queue_designator",
-				  (char *)&rec->queue_designator,
-				  sizeof(struct fsf_queue_designator),
-				  0, sizeof(struct fsf_queue_designator));
-	len += zfcp_dbf_view_dump(out_buf + len, "payload",
-				  (char *)&rec->payload,
-				  rec->payload_size, 0, rec->payload_size);
-
-	return len;
+	zfcp_dbf_out(p, "failed", "0x%02x", r->failed);
+	zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type);
+	zfcp_dbf_out(p, "status_subtype", "0x%08x", r->status_subtype);
+	zfcp_dbf_outd(p, "queue_designator", (char *)&r->queue_designator,
+		      sizeof(struct fsf_queue_designator), 0,
+		      sizeof(struct fsf_queue_designator));
+	zfcp_dbf_outd(p, "payload", (char *)&r->payload, r->payload_size, 0,
+		      r->payload_size);
 }
 
-static int
-zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
+static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
 {
-	int len = 0;
-
-	len += zfcp_dbf_view(out_buf + len, "status", "0x%08x", rec->status);
-	len += zfcp_dbf_view(out_buf + len, "qdio_error", "0x%08x",
-			     rec->qdio_error);
-	len += zfcp_dbf_view(out_buf + len, "siga_error", "0x%08x",
-			     rec->siga_error);
-	len += zfcp_dbf_view(out_buf + len, "sbal_index", "0x%02x",
-			     rec->sbal_index);
-	len += zfcp_dbf_view(out_buf + len, "sbal_count", "0x%02x",
-			     rec->sbal_count);
-
-	return len;
+	zfcp_dbf_out(p, "status", "0x%08x", r->status);
+	zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
+	zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error);
+	zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
+	zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
 }
 
-static int
-zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
-			 char *out_buf, const char *in_buf)
+static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
+				    char *out_buf, const char *in_buf)
 {
-	struct zfcp_hba_dbf_record *rec = (struct zfcp_hba_dbf_record *)in_buf;
-	int len = 0;
+	struct zfcp_hba_dbf_record *r = (struct zfcp_hba_dbf_record *)in_buf;
+	char *p = out_buf;
 
-	if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
+	if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
 		return 0;
 
-	len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
-	if (isalpha(rec->tag2[0]))
-		len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
-	if (strncmp(rec->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
-		len += zfcp_hba_dbf_view_response(out_buf + len,
-						  &rec->type.response);
-	else if (strncmp(rec->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
-		len += zfcp_hba_dbf_view_status(out_buf + len,
-						&rec->type.status);
-	else if (strncmp(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
-		len += zfcp_hba_dbf_view_qdio(out_buf + len, &rec->type.qdio);
+	zfcp_dbf_tag(&p, "tag", r->tag);
+	if (isalpha(r->tag2[0]))
+		zfcp_dbf_tag(&p, "tag2", r->tag2);
 
-	len += sprintf(out_buf + len, "\n");
+	if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
+		zfcp_hba_dbf_view_response(&p, &r->u.response);
+	else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
+		zfcp_hba_dbf_view_status(&p, &r->u.status);
+	else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
+		zfcp_hba_dbf_view_qdio(&p, &r->u.qdio);
 
-	return len;
+	p += sprintf(p, "\n");
+	return p - out_buf;
 }
 
 static struct debug_view zfcp_hba_dbf_view = {
@@ -472,219 +453,570 @@
 	NULL
 };
 
-static void
-_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
-			      u32 s_id, u32 d_id, void *buffer, int buflen)
-{
-	struct zfcp_send_ct *send_ct = (struct zfcp_send_ct *)fsf_req->data;
-	struct zfcp_port *port = send_ct->port;
-	struct zfcp_adapter *adapter = port->adapter;
-	struct ct_hdr *header = (struct ct_hdr *)buffer;
-	struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
-	struct zfcp_san_dbf_record_ct *ct = &rec->type.ct;
-	unsigned long flags;
+static const char *zfcp_rec_dbf_tags[] = {
+	[ZFCP_REC_DBF_ID_THREAD] = "thread",
+	[ZFCP_REC_DBF_ID_TARGET] = "target",
+	[ZFCP_REC_DBF_ID_TRIGGER] = "trigger",
+	[ZFCP_REC_DBF_ID_ACTION] = "action",
+};
 
-	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
-	memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
-	strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
-	rec->fsf_reqid = (unsigned long)fsf_req;
-	rec->fsf_seqno = fsf_req->seq_no;
-	rec->s_id = s_id;
-	rec->d_id = d_id;
-	if (strncmp(tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
-		ct->type.request.cmd_req_code = header->cmd_rsp_code;
-		ct->type.request.revision = header->revision;
-		ct->type.request.gs_type = header->gs_type;
-		ct->type.request.gs_subtype = header->gs_subtype;
-		ct->type.request.options = header->options;
-		ct->type.request.max_res_size = header->max_res_size;
-	} else if (strncmp(tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
-		ct->type.response.cmd_rsp_code = header->cmd_rsp_code;
-		ct->type.response.revision = header->revision;
-		ct->type.response.reason_code = header->reason_code;
-		ct->type.response.reason_code_expl = header->reason_code_expl;
-		ct->type.response.vendor_unique = header->vendor_unique;
+static const char *zfcp_rec_dbf_ids[] = {
+	[1]	= "new",
+	[2]	= "ready",
+	[3]	= "kill",
+	[4]	= "down sleep",
+	[5]	= "down wakeup",
+	[6]	= "down sleep ecd",
+	[7]	= "down wakeup ecd",
+	[8]	= "down sleep epd",
+	[9]	= "down wakeup epd",
+	[10]	= "online",
+	[11]	= "operational",
+	[12]	= "scsi slave destroy",
+	[13]	= "propagate failed adapter",
+	[14]	= "propagate failed port",
+	[15]	= "block adapter",
+	[16]	= "unblock adapter",
+	[17]	= "block port",
+	[18]	= "unblock port",
+	[19]	= "block unit",
+	[20]	= "unblock unit",
+	[21]	= "unit recovery failed",
+	[22]	= "port recovery failed",
+	[23]	= "adapter recovery failed",
+	[24]	= "qdio queues down",
+	[25]	= "p2p failed",
+	[26]	= "nameserver lookup failed",
+	[27]	= "nameserver port failed",
+	[28]	= "link up",
+	[29]	= "link down",
+	[30]	= "link up status read",
+	[31]	= "open port failed",
+	[32]	= "open port failed",
+	[33]	= "close port",
+	[34]	= "open unit failed",
+	[35]	= "exclusive open unit failed",
+	[36]	= "shared open unit failed",
+	[37]	= "link down",
+	[38]	= "link down status read no link",
+	[39]	= "link down status read fdisc login",
+	[40]	= "link down status read firmware update",
+	[41]	= "link down status read unknown reason",
+	[42]	= "link down ecd incomplete",
+	[43]	= "link down epd incomplete",
+	[44]	= "sysfs adapter recovery",
+	[45]	= "sysfs port recovery",
+	[46]	= "sysfs unit recovery",
+	[47]	= "port boxed abort",
+	[48]	= "unit boxed abort",
+	[49]	= "port boxed ct",
+	[50]	= "port boxed close physical",
+	[51]	= "port boxed open unit",
+	[52]	= "port boxed close unit",
+	[53]	= "port boxed fcp",
+	[54]	= "unit boxed fcp",
+	[55]	= "port access denied ct",
+	[56]	= "port access denied els",
+	[57]	= "port access denied open port",
+	[58]	= "port access denied close physical",
+	[59]	= "unit access denied open unit",
+	[60]	= "shared unit access denied open unit",
+	[61]	= "unit access denied fcp",
+	[62]	= "request timeout",
+	[63]	= "adisc link test reject or timeout",
+	[64]	= "adisc link test d_id changed",
+	[65]	= "adisc link test failed",
+	[66]	= "recovery out of memory",
+	[67]	= "adapter recovery repeated after state change",
+	[68]	= "port recovery repeated after state change",
+	[69]	= "unit recovery repeated after state change",
+	[70]	= "port recovery follow-up after successful adapter recovery",
+	[71]	= "adapter recovery escalation after failed adapter recovery",
+	[72]	= "port recovery follow-up after successful physical port "
+		  "recovery",
+	[73]	= "adapter recovery escalation after failed physical port "
+		  "recovery",
+	[74]	= "unit recovery follow-up after successful port recovery",
+	[75]	= "physical port recovery escalation after failed port "
+		  "recovery",
+	[76]	= "port recovery escalation after failed unit recovery",
+	[77]	= "recovery opening nameserver port",
+	[78]	= "duplicate request id",
+	[79]	= "link down",
+	[80]	= "exclusive read-only unit access unsupported",
+	[81]	= "shared read-write unit access unsupported",
+	[82]	= "incoming rscn",
+	[83]	= "incoming plogi",
+	[84]	= "incoming logo",
+	[85]	= "online",
+	[86]	= "offline",
+	[87]	= "ccw device gone",
+	[88]	= "ccw device no path",
+	[89]	= "ccw device operational",
+	[90]	= "ccw device shutdown",
+	[91]	= "sysfs port addition",
+	[92]	= "sysfs port removal",
+	[93]	= "sysfs adapter recovery",
+	[94]	= "sysfs unit addition",
+	[95]	= "sysfs unit removal",
+	[96]	= "sysfs port recovery",
+	[97]	= "sysfs unit recovery",
+	[98]	= "sequence number mismatch",
+	[99]	= "link up",
+	[100]	= "error state",
+	[101]	= "status read physical port closed",
+	[102]	= "link up status read",
+	[103]	= "too many failed status read buffers",
+	[104]	= "port handle not valid abort",
+	[105]	= "lun handle not valid abort",
+	[106]	= "port handle not valid ct",
+	[107]	= "port handle not valid close port",
+	[108]	= "port handle not valid close physical port",
+	[109]	= "port handle not valid open unit",
+	[110]	= "port handle not valid close unit",
+	[111]	= "lun handle not valid close unit",
+	[112]	= "port handle not valid fcp",
+	[113]	= "lun handle not valid fcp",
+	[114]	= "handle mismatch fcp",
+	[115]	= "lun not valid fcp",
+	[116]	= "qdio send failed",
+	[117]	= "version mismatch",
+	[118]	= "incompatible qtcb type",
+	[119]	= "unknown protocol status",
+	[120]	= "unknown fsf command",
+	[121]	= "no recommendation for status qualifier",
+	[122]	= "status read physical port closed in error",
+	[123]	= "fc service class not supported ct",
+	[124]	= "fc service class not supported els",
+	[125]	= "need newer zfcp",
+	[126]	= "need newer microcode",
+	[127]	= "arbitrated loop not supported",
+	[128]	= "unknown topology",
+	[129]	= "qtcb size mismatch",
+	[130]	= "unknown fsf status ecd",
+	[131]	= "fcp request too big",
+	[132]	= "fc service class not supported fcp",
+	[133]	= "data direction not valid fcp",
+	[134]	= "command length not valid fcp",
+	[135]	= "status read act update",
+	[136]	= "status read cfdc update",
+	[137]	= "hbaapi port open",
+	[138]	= "hbaapi unit open",
+	[139]	= "hbaapi unit shutdown",
+	[140]	= "qdio error",
+	[141]	= "scsi host reset",
+	[142]	= "dismissing fsf request for recovery action",
+	[143]	= "recovery action timed out",
+	[144]	= "recovery action gone",
+	[145]	= "recovery action being processed",
+	[146]	= "recovery action ready for next step",
+};
+
+static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
+				    char *buf, const char *_rec)
+{
+	struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec;
+	char *p = buf;
+
+	zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]);
+	zfcp_dbf_outs(&p, "hint", zfcp_rec_dbf_ids[r->id2]);
+	zfcp_dbf_out(&p, "id", "%d", r->id2);
+	switch (r->id) {
+	case ZFCP_REC_DBF_ID_THREAD:
+		zfcp_dbf_out(&p, "total", "%d", r->u.thread.total);
+		zfcp_dbf_out(&p, "ready", "%d", r->u.thread.ready);
+		zfcp_dbf_out(&p, "running", "%d", r->u.thread.running);
+		break;
+	case ZFCP_REC_DBF_ID_TARGET:
+		zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.target.ref);
+		zfcp_dbf_out(&p, "status", "0x%08x", r->u.target.status);
+		zfcp_dbf_out(&p, "erp_count", "%d", r->u.target.erp_count);
+		zfcp_dbf_out(&p, "d_id", "0x%06x", r->u.target.d_id);
+		zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.target.wwpn);
+		zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.target.fcp_lun);
+		break;
+	case ZFCP_REC_DBF_ID_TRIGGER:
+		zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.trigger.ref);
+		zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.trigger.action);
+		zfcp_dbf_out(&p, "requested", "%d", r->u.trigger.want);
+		zfcp_dbf_out(&p, "executed", "%d", r->u.trigger.need);
+		zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.trigger.wwpn);
+		zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
+		zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
+		zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
+		zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us);
+		break;
+	case ZFCP_REC_DBF_ID_ACTION:
+		zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
+		zfcp_dbf_out(&p, "fsf_req", "0x%016Lx", r->u.action.fsf_req);
+		zfcp_dbf_out(&p, "status", "0x%08Lx", r->u.action.status);
+		zfcp_dbf_out(&p, "step", "0x%08Lx", r->u.action.step);
+		break;
 	}
-	ct->payload_size =
-	    min(buflen - (int)sizeof(struct ct_hdr), ZFCP_DBF_CT_PAYLOAD);
-	memcpy(ct->payload, buffer + sizeof(struct ct_hdr), ct->payload_size);
-	debug_event(adapter->san_dbf, 3,
-		    rec, sizeof(struct zfcp_san_dbf_record));
-	spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+	p += sprintf(p, "\n");
+	return p - buf;
 }
 
+static struct debug_view zfcp_rec_dbf_view = {
+	"structured",
+	NULL,
+	&zfcp_dbf_view_header,
+	&zfcp_rec_dbf_view_format,
+	NULL,
+	NULL
+};
+
+/**
+ * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
+ * @id2: identifier for event
+ * @adapter: adapter
+ * @lock: non-zero value indicates that erp_lock has not yet been acquired
+ */
+void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock)
+{
+	struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
+	unsigned long flags = 0;
+	struct list_head *entry;
+	unsigned ready = 0, running = 0, total;
+
+	if (lock)
+		read_lock_irqsave(&adapter->erp_lock, flags);
+	list_for_each(entry, &adapter->erp_ready_head)
+		ready++;
+	list_for_each(entry, &adapter->erp_running_head)
+		running++;
+	total = adapter->erp_total_count;
+	if (lock)
+		read_unlock_irqrestore(&adapter->erp_lock, flags);
+
+	spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
+	memset(r, 0, sizeof(*r));
+	r->id = ZFCP_REC_DBF_ID_THREAD;
+	r->id2 = id2;
+	r->u.thread.total = total;
+	r->u.thread.ready = ready;
+	r->u.thread.running = running;
+	debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
+	spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+}
+
+static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
+				      struct zfcp_adapter *adapter,
+				      atomic_t *status, atomic_t *erp_count,
+				      u64 wwpn, u32 d_id, u64 fcp_lun)
+{
+	struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
+	memset(r, 0, sizeof(*r));
+	r->id = ZFCP_REC_DBF_ID_TARGET;
+	r->id2 = id2;
+	r->u.target.ref = (unsigned long)ref;
+	r->u.target.status = atomic_read(status);
+	r->u.target.wwpn = wwpn;
+	r->u.target.d_id = d_id;
+	r->u.target.fcp_lun = fcp_lun;
+	r->u.target.erp_count = atomic_read(erp_count);
+	debug_event(adapter->rec_dbf, 3, r, sizeof(*r));
+	spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+}
+
+/**
+ * zfcp_rec_dbf_event_adapter - trace event for adapter state change
+ * @id: identifier for trigger of state change
+ * @ref: additional reference (e.g. request)
+ * @adapter: adapter
+ */
+void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *adapter)
+{
+	zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status,
+				  &adapter->erp_counter, 0, 0, 0);
+}
+
+/**
+ * zfcp_rec_dbf_event_port - trace event for port state change
+ * @id: identifier for trigger of state change
+ * @ref: additional reference (e.g. request)
+ * @port: port
+ */
+void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port)
+{
+	struct zfcp_adapter *adapter = port->adapter;
+
+	zfcp_rec_dbf_event_target(id, ref, adapter, &port->status,
+				  &port->erp_counter, port->wwpn, port->d_id,
+				  0);
+}
+
+/**
+ * zfcp_rec_dbf_event_unit - trace event for unit state change
+ * @id: identifier for trigger of state change
+ * @ref: additional reference (e.g. request)
+ * @unit: unit
+ */
+void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit)
+{
+	struct zfcp_port *port = unit->port;
+	struct zfcp_adapter *adapter = port->adapter;
+
+	zfcp_rec_dbf_event_target(id, ref, adapter, &unit->status,
+				  &unit->erp_counter, port->wwpn, port->d_id,
+				  unit->fcp_lun);
+}
+
+/**
+ * zfcp_rec_dbf_event_trigger - trace event for triggered error recovery
+ * @id2: identifier for error recovery trigger
+ * @ref: additional reference (e.g. request)
+ * @want: originally requested error recovery action
+ * @need: error recovery action actually initiated
+ * @action: address of error recovery action struct
+ * @adapter: adapter
+ * @port: port
+ * @unit: unit
+ */
+void zfcp_rec_dbf_event_trigger(u8 id2, void *ref, u8 want, u8 need,
+				void *action, struct zfcp_adapter *adapter,
+				struct zfcp_port *port, struct zfcp_unit *unit)
+{
+	struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
+	memset(r, 0, sizeof(*r));
+	r->id = ZFCP_REC_DBF_ID_TRIGGER;
+	r->id2 = id2;
+	r->u.trigger.ref = (unsigned long)ref;
+	r->u.trigger.want = want;
+	r->u.trigger.need = need;
+	r->u.trigger.action = (unsigned long)action;
+	r->u.trigger.as = atomic_read(&adapter->status);
+	if (port) {
+		r->u.trigger.ps = atomic_read(&port->status);
+		r->u.trigger.wwpn = port->wwpn;
+	}
+	if (unit) {
+		r->u.trigger.us = atomic_read(&unit->status);
+		r->u.trigger.fcp_lun = unit->fcp_lun;
+	}
+	debug_event(adapter->rec_dbf, action ? 1 : 4, r, sizeof(*r));
+	spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+}
+
+/**
+ * zfcp_rec_dbf_event_action - trace event showing progress of recovery action
+ * @id2: identifier
+ * @erp_action: error recovery action struct pointer
+ */
+void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
+	memset(r, 0, sizeof(*r));
+	r->id = ZFCP_REC_DBF_ID_ACTION;
+	r->id2 = id2;
+	r->u.action.action = (unsigned long)erp_action;
+	r->u.action.status = erp_action->status;
+	r->u.action.step = erp_action->step;
+	r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
+	debug_event(adapter->rec_dbf, 4, r, sizeof(*r));
+	spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+}
+
+/**
+ * zfcp_san_dbf_event_ct_request - trace event for issued CT request
+ * @fsf_req: request containing issued CT data
+ */
 void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
 	struct zfcp_port *port = ct->port;
 	struct zfcp_adapter *adapter = port->adapter;
+	struct ct_hdr *hdr = zfcp_sg_to_address(ct->req);
+	struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
+	struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req;
+	unsigned long flags;
 
-	_zfcp_san_dbf_event_common_ct("octc", fsf_req,
-				      fc_host_port_id(adapter->scsi_host),
-				      port->d_id, zfcp_sg_to_address(ct->req),
-				      ct->req->length);
+	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+	memset(r, 0, sizeof(*r));
+	strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
+	r->fsf_reqid = (unsigned long)fsf_req;
+	r->fsf_seqno = fsf_req->seq_no;
+	r->s_id = fc_host_port_id(adapter->scsi_host);
+	r->d_id = port->d_id;
+	oct->cmd_req_code = hdr->cmd_rsp_code;
+	oct->revision = hdr->revision;
+	oct->gs_type = hdr->gs_type;
+	oct->gs_subtype = hdr->gs_subtype;
+	oct->options = hdr->options;
+	oct->max_res_size = hdr->max_res_size;
+	oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
+		       ZFCP_DBF_CT_PAYLOAD);
+	memcpy(oct->payload, (void *)hdr + sizeof(struct ct_hdr), oct->len);
+	debug_event(adapter->san_dbf, 3, r, sizeof(*r));
+	spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
 }
 
+/**
+ * zfcp_san_dbf_event_ct_response - trace event for completion of CT request
+ * @fsf_req: request containing CT response
+ */
 void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
 	struct zfcp_port *port = ct->port;
 	struct zfcp_adapter *adapter = port->adapter;
-
-	_zfcp_san_dbf_event_common_ct("rctc", fsf_req, port->d_id,
-				      fc_host_port_id(adapter->scsi_host),
-				      zfcp_sg_to_address(ct->resp),
-				      ct->resp->length);
-}
-
-static void
-_zfcp_san_dbf_event_common_els(const char *tag, int level,
-			       struct zfcp_fsf_req *fsf_req, u32 s_id,
-			       u32 d_id, u8 ls_code, void *buffer, int buflen)
-{
-	struct zfcp_adapter *adapter = fsf_req->adapter;
-	struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
-	struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
+	struct ct_hdr *hdr = zfcp_sg_to_address(ct->resp);
+	struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
+	struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp;
 	unsigned long flags;
-	int offset = 0;
 
 	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
-	do {
-		memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
-		if (offset == 0) {
-			strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
-			rec->fsf_reqid = (unsigned long)fsf_req;
-			rec->fsf_seqno = fsf_req->seq_no;
-			rec->s_id = s_id;
-			rec->d_id = d_id;
-			rec->type.els.ls_code = ls_code;
-			buflen = min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD);
-			rec->type.els.payload_size = buflen;
-			memcpy(rec->type.els.payload,
-			       buffer, min(buflen, ZFCP_DBF_ELS_PAYLOAD));
-			offset += min(buflen, ZFCP_DBF_ELS_PAYLOAD);
-		} else {
-			strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
-			dump->total_size = buflen;
-			dump->offset = offset;
-			dump->size = min(buflen - offset,
-					 (int)sizeof(struct zfcp_san_dbf_record)
-					 - (int)sizeof(struct zfcp_dbf_dump));
-			memcpy(dump->data, buffer + offset, dump->size);
-			offset += dump->size;
-		}
-		debug_event(adapter->san_dbf, level,
-			    rec, sizeof(struct zfcp_san_dbf_record));
-	} while (offset < buflen);
+	memset(r, 0, sizeof(*r));
+	strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
+	r->fsf_reqid = (unsigned long)fsf_req;
+	r->fsf_seqno = fsf_req->seq_no;
+	r->s_id = port->d_id;
+	r->d_id = fc_host_port_id(adapter->scsi_host);
+	rct->cmd_rsp_code = hdr->cmd_rsp_code;
+	rct->revision = hdr->revision;
+	rct->reason_code = hdr->reason_code;
+	rct->expl = hdr->reason_code_expl;
+	rct->vendor_unique = hdr->vendor_unique;
+	rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
+		       ZFCP_DBF_CT_PAYLOAD);
+	memcpy(rct->payload, (void *)hdr + sizeof(struct ct_hdr), rct->len);
+	debug_event(adapter->san_dbf, 3, r, sizeof(*r));
 	spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
 }
 
+static void zfcp_san_dbf_event_els(const char *tag, int level,
+				   struct zfcp_fsf_req *fsf_req, u32 s_id,
+				   u32 d_id, u8 ls_code, void *buffer,
+				   int buflen)
+{
+	struct zfcp_adapter *adapter = fsf_req->adapter;
+	struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+	strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
+	rec->fsf_reqid = (unsigned long)fsf_req;
+	rec->fsf_seqno = fsf_req->seq_no;
+	rec->s_id = s_id;
+	rec->d_id = d_id;
+	rec->u.els.ls_code = ls_code;
+	debug_event(adapter->san_dbf, level, rec, sizeof(*rec));
+	zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level,
+			 buffer, min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD));
+	spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+}
+
+/**
+ * zfcp_san_dbf_event_els_request - trace event for issued ELS
+ * @fsf_req: request containing issued ELS
+ */
 void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
 
-	_zfcp_san_dbf_event_common_els("oels", 2, fsf_req,
-				       fc_host_port_id(els->adapter->scsi_host),
-				       els->d_id,
-				       *(u8 *) zfcp_sg_to_address(els->req),
-				       zfcp_sg_to_address(els->req),
-				       els->req->length);
+	zfcp_san_dbf_event_els("oels", 2, fsf_req,
+			       fc_host_port_id(els->adapter->scsi_host),
+			       els->d_id, *(u8 *) zfcp_sg_to_address(els->req),
+			       zfcp_sg_to_address(els->req), els->req->length);
 }
 
+/**
+ * zfcp_san_dbf_event_els_response - trace event for completed ELS
+ * @fsf_req: request containing ELS response
+ */
 void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
 
-	_zfcp_san_dbf_event_common_els("rels", 2, fsf_req, els->d_id,
-				       fc_host_port_id(els->adapter->scsi_host),
-				       *(u8 *) zfcp_sg_to_address(els->req),
-				       zfcp_sg_to_address(els->resp),
-				       els->resp->length);
+	zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id,
+			       fc_host_port_id(els->adapter->scsi_host),
+			       *(u8 *)zfcp_sg_to_address(els->req),
+			       zfcp_sg_to_address(els->resp),
+			       els->resp->length);
 }
 
+/**
+ * zfcp_san_dbf_event_incoming_els - trace event for incomig ELS
+ * @fsf_req: request containing unsolicited status buffer with incoming ELS
+ */
 void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_adapter *adapter = fsf_req->adapter;
-	struct fsf_status_read_buffer *status_buffer =
-	    (struct fsf_status_read_buffer *)fsf_req->data;
-	int length = (int)status_buffer->length -
-	    (int)((void *)&status_buffer->payload - (void *)status_buffer);
+	struct fsf_status_read_buffer *buf =
+			(struct fsf_status_read_buffer *)fsf_req->data;
+	int length = (int)buf->length -
+		     (int)((void *)&buf->payload - (void *)buf);
 
-	_zfcp_san_dbf_event_common_els("iels", 1, fsf_req, status_buffer->d_id,
-				       fc_host_port_id(adapter->scsi_host),
-				       *(u8 *) status_buffer->payload,
-				       (void *)status_buffer->payload, length);
+	zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id,
+			       fc_host_port_id(adapter->scsi_host),
+			       *(u8 *)buf->payload, (void *)buf->payload,
+			       length);
 }
 
-static int
-zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
-			 char *out_buf, const char *in_buf)
+static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
+				    char *out_buf, const char *in_buf)
 {
-	struct zfcp_san_dbf_record *rec = (struct zfcp_san_dbf_record *)in_buf;
+	struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf;
 	char *buffer = NULL;
 	int buflen = 0, total = 0;
-	int len = 0;
+	char *p = out_buf;
 
-	if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
+	if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
 		return 0;
 
-	len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
-	len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
-			     rec->fsf_reqid);
-	len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
-			     rec->fsf_seqno);
-	len += zfcp_dbf_view(out_buf + len, "s_id", "0x%06x", rec->s_id);
-	len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x", rec->d_id);
+	zfcp_dbf_tag(&p, "tag", r->tag);
+	zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
+	zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
+	zfcp_dbf_out(&p, "s_id", "0x%06x", r->s_id);
+	zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id);
 
-	if (strncmp(rec->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
-		len += zfcp_dbf_view(out_buf + len, "cmd_req_code", "0x%04x",
-				     rec->type.ct.type.request.cmd_req_code);
-		len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
-				     rec->type.ct.type.request.revision);
-		len += zfcp_dbf_view(out_buf + len, "gs_type", "0x%02x",
-				     rec->type.ct.type.request.gs_type);
-		len += zfcp_dbf_view(out_buf + len, "gs_subtype", "0x%02x",
-				     rec->type.ct.type.request.gs_subtype);
-		len += zfcp_dbf_view(out_buf + len, "options", "0x%02x",
-				     rec->type.ct.type.request.options);
-		len += zfcp_dbf_view(out_buf + len, "max_res_size", "0x%04x",
-				     rec->type.ct.type.request.max_res_size);
-		total = rec->type.ct.payload_size;
-		buffer = rec->type.ct.payload;
+	if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
+		struct zfcp_san_dbf_record_ct_request *ct = &r->u.ct_req;
+		zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
+		zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
+		zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
+		zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype);
+		zfcp_dbf_out(&p, "options", "0x%02x", ct->options);
+		zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
+		total = ct->len;
+		buffer = ct->payload;
 		buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
-	} else if (strncmp(rec->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
-		len += zfcp_dbf_view(out_buf + len, "cmd_rsp_code", "0x%04x",
-				     rec->type.ct.type.response.cmd_rsp_code);
-		len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
-				     rec->type.ct.type.response.revision);
-		len += zfcp_dbf_view(out_buf + len, "reason_code", "0x%02x",
-				     rec->type.ct.type.response.reason_code);
-		len +=
-		    zfcp_dbf_view(out_buf + len, "reason_code_expl", "0x%02x",
-				  rec->type.ct.type.response.reason_code_expl);
-		len +=
-		    zfcp_dbf_view(out_buf + len, "vendor_unique", "0x%02x",
-				  rec->type.ct.type.response.vendor_unique);
-		total = rec->type.ct.payload_size;
-		buffer = rec->type.ct.payload;
+	} else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
+		struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp;
+		zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code);
+		zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
+		zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
+		zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
+		zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
+		total = ct->len;
+		buffer = ct->payload;
 		buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
-	} else if (strncmp(rec->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
-		   strncmp(rec->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
-		   strncmp(rec->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
-		len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
-				     rec->type.els.ls_code);
-		total = rec->type.els.payload_size;
-		buffer = rec->type.els.payload;
+	} else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
+		   strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
+		   strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
+		struct zfcp_san_dbf_record_els *els = &r->u.els;
+		zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code);
+		total = els->len;
+		buffer = els->payload;
 		buflen = min(total, ZFCP_DBF_ELS_PAYLOAD);
 	}
 
-	len += zfcp_dbf_view_dump(out_buf + len, "payload",
-				  buffer, buflen, 0, total);
-
+	zfcp_dbf_outd(&p, "payload", buffer, buflen, 0, total);
 	if (buflen == total)
-		len += sprintf(out_buf + len, "\n");
+		p += sprintf(p, "\n");
 
-	return len;
+	return p - out_buf;
 }
 
 static struct debug_view zfcp_san_dbf_view = {
@@ -696,12 +1028,11 @@
 	NULL
 };
 
-static void
-_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
-			    struct zfcp_adapter *adapter,
-			    struct scsi_cmnd *scsi_cmnd,
-			    struct zfcp_fsf_req *fsf_req,
-			    unsigned long old_req_id)
+static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
+				struct zfcp_adapter *adapter,
+				struct scsi_cmnd *scsi_cmnd,
+				struct zfcp_fsf_req *fsf_req,
+				unsigned long old_req_id)
 {
 	struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
 	struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
@@ -712,7 +1043,7 @@
 
 	spin_lock_irqsave(&adapter->scsi_dbf_lock, flags);
 	do {
-		memset(rec, 0, sizeof(struct zfcp_scsi_dbf_record));
+		memset(rec, 0, sizeof(*rec));
 		if (offset == 0) {
 			strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
 			strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
@@ -738,20 +1069,16 @@
 				fcp_sns_info =
 				    zfcp_get_fcp_sns_info_ptr(fcp_rsp);
 
-				rec->type.fcp.rsp_validity =
-				    fcp_rsp->validity.value;
-				rec->type.fcp.rsp_scsi_status =
-				    fcp_rsp->scsi_status;
-				rec->type.fcp.rsp_resid = fcp_rsp->fcp_resid;
+				rec->rsp_validity = fcp_rsp->validity.value;
+				rec->rsp_scsi_status = fcp_rsp->scsi_status;
+				rec->rsp_resid = fcp_rsp->fcp_resid;
 				if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
-					rec->type.fcp.rsp_code =
-					    *(fcp_rsp_info + 3);
+					rec->rsp_code = *(fcp_rsp_info + 3);
 				if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
 					buflen = min((int)fcp_rsp->fcp_sns_len,
 						     ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
-					rec->type.fcp.sns_info_len = buflen;
-					memcpy(rec->type.fcp.sns_info,
-					       fcp_sns_info,
+					rec->sns_info_len = buflen;
+					memcpy(rec->sns_info, fcp_sns_info,
 					       min(buflen,
 						   ZFCP_DBF_SCSI_FCP_SNS_INFO));
 					offset += min(buflen,
@@ -762,7 +1089,7 @@
 				rec->fsf_seqno = fsf_req->seq_no;
 				rec->fsf_issued = fsf_req->issued;
 			}
-			rec->type.old_fsf_reqid = old_req_id;
+			rec->old_fsf_reqid = old_req_id;
 		} else {
 			strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
 			dump->total_size = buflen;
@@ -774,108 +1101,101 @@
 			memcpy(dump->data, fcp_sns_info + offset, dump->size);
 			offset += dump->size;
 		}
-		debug_event(adapter->scsi_dbf, level,
-			    rec, sizeof(struct zfcp_scsi_dbf_record));
+		debug_event(adapter->scsi_dbf, level, rec, sizeof(*rec));
 	} while (offset < buflen);
 	spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
 }
 
-void
-zfcp_scsi_dbf_event_result(const char *tag, int level,
-			   struct zfcp_adapter *adapter,
-			   struct scsi_cmnd *scsi_cmnd,
-			   struct zfcp_fsf_req *fsf_req)
+/**
+ * zfcp_scsi_dbf_event_result - trace event for SCSI command completion
+ * @tag: tag indicating success or failure of SCSI command
+ * @level: trace level applicable for this event
+ * @adapter: adapter that has been used to issue the SCSI command
+ * @scsi_cmnd: SCSI command pointer
+ * @fsf_req: request used to issue SCSI command (might be NULL)
+ */
+void zfcp_scsi_dbf_event_result(const char *tag, int level,
+				struct zfcp_adapter *adapter,
+				struct scsi_cmnd *scsi_cmnd,
+				struct zfcp_fsf_req *fsf_req)
 {
-	_zfcp_scsi_dbf_event_common("rslt", tag, level,
-			adapter, scsi_cmnd, fsf_req, 0);
+	zfcp_scsi_dbf_event("rslt", tag, level, adapter, scsi_cmnd, fsf_req, 0);
 }
 
-void
-zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
-			  struct scsi_cmnd *scsi_cmnd,
-			  struct zfcp_fsf_req *new_fsf_req,
-			  unsigned long old_req_id)
+/**
+ * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort
+ * @tag: tag indicating success or failure of abort operation
+ * @adapter: adapter thas has been used to issue SCSI command to be aborted
+ * @scsi_cmnd: SCSI command to be aborted
+ * @new_fsf_req: request containing abort (might be NULL)
+ * @old_req_id: identifier of request containg SCSI command to be aborted
+ */
+void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
+			       struct scsi_cmnd *scsi_cmnd,
+			       struct zfcp_fsf_req *new_fsf_req,
+			       unsigned long old_req_id)
 {
-	_zfcp_scsi_dbf_event_common("abrt", tag, 1,
-			adapter, scsi_cmnd, new_fsf_req, old_req_id);
+	zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scsi_cmnd, new_fsf_req,
+			    old_req_id);
 }
 
-void
-zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
-			     struct scsi_cmnd *scsi_cmnd)
+/**
+ * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset
+ * @tag: tag indicating success or failure of reset operation
+ * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
+ * @unit: unit that needs reset
+ * @scsi_cmnd: SCSI command which caused this error recovery
+ */
+void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag,
+				  struct zfcp_unit *unit,
+				  struct scsi_cmnd *scsi_cmnd)
 {
-	struct zfcp_adapter *adapter = unit->port->adapter;
-
-	_zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
-			tag, 1, adapter, scsi_cmnd, NULL, 0);
+	zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1,
+			    unit->port->adapter, scsi_cmnd, NULL, 0);
 }
 
-static int
-zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
-			  char *out_buf, const char *in_buf)
+static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view,
+				     char *out_buf, const char *in_buf)
 {
-	struct zfcp_scsi_dbf_record *rec =
-	    (struct zfcp_scsi_dbf_record *)in_buf;
-	int len = 0;
+	struct zfcp_scsi_dbf_record *r = (struct zfcp_scsi_dbf_record *)in_buf;
+	struct timespec t;
+	char *p = out_buf;
 
-	if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
+	if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
 		return 0;
 
-	len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
-	len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
-	len += zfcp_dbf_view(out_buf + len, "scsi_id", "0x%08x", rec->scsi_id);
-	len += zfcp_dbf_view(out_buf + len, "scsi_lun", "0x%08x",
-			     rec->scsi_lun);
-	len += zfcp_dbf_view(out_buf + len, "scsi_result", "0x%08x",
-			     rec->scsi_result);
-	len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
-			     rec->scsi_cmnd);
-	len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
-			     rec->scsi_serial);
-	len += zfcp_dbf_view_dump(out_buf + len, "scsi_opcode",
-				  rec->scsi_opcode,
-				  ZFCP_DBF_SCSI_OPCODE,
-				  0, ZFCP_DBF_SCSI_OPCODE);
-	len += zfcp_dbf_view(out_buf + len, "scsi_retries", "0x%02x",
-			     rec->scsi_retries);
-	len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x",
-			     rec->scsi_allowed);
-	if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
-		len += zfcp_dbf_view(out_buf + len, "old_fsf_reqid", "0x%0Lx",
-				     rec->type.old_fsf_reqid);
-	}
-	len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
-			     rec->fsf_reqid);
-	len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
-			     rec->fsf_seqno);
-	len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
-	if (strncmp(rec->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
-		len +=
-		    zfcp_dbf_view(out_buf + len, "fcp_rsp_validity", "0x%02x",
-				  rec->type.fcp.rsp_validity);
-		len +=
-		    zfcp_dbf_view(out_buf + len, "fcp_rsp_scsi_status",
-				  "0x%02x", rec->type.fcp.rsp_scsi_status);
-		len +=
-		    zfcp_dbf_view(out_buf + len, "fcp_rsp_resid", "0x%08x",
-				  rec->type.fcp.rsp_resid);
-		len +=
-		    zfcp_dbf_view(out_buf + len, "fcp_rsp_code", "0x%08x",
-				  rec->type.fcp.rsp_code);
-		len +=
-		    zfcp_dbf_view(out_buf + len, "fcp_sns_info_len", "0x%08x",
-				  rec->type.fcp.sns_info_len);
-		len +=
-		    zfcp_dbf_view_dump(out_buf + len, "fcp_sns_info",
-				       rec->type.fcp.sns_info,
-				       min((int)rec->type.fcp.sns_info_len,
-					   ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
-				       rec->type.fcp.sns_info_len);
-	}
+	zfcp_dbf_tag(&p, "tag", r->tag);
+	zfcp_dbf_tag(&p, "tag2", r->tag2);
+	zfcp_dbf_out(&p, "scsi_id", "0x%08x", r->scsi_id);
+	zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun);
+	zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result);
+	zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd);
+	zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial);
+	zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE,
+		      0, ZFCP_DBF_SCSI_OPCODE);
+	zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries);
+	zfcp_dbf_out(&p, "scsi_allowed", "0x%02x", r->scsi_allowed);
+	if (strncmp(r->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0)
+		zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid);
+	zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
+	zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
+	zfcp_dbf_timestamp(r->fsf_issued, &t);
+	zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
 
-	len += sprintf(out_buf + len, "\n");
-
-	return len;
+	if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
+		zfcp_dbf_out(&p, "fcp_rsp_validity", "0x%02x", r->rsp_validity);
+		zfcp_dbf_out(&p, "fcp_rsp_scsi_status", "0x%02x",
+			     r->rsp_scsi_status);
+		zfcp_dbf_out(&p, "fcp_rsp_resid", "0x%08x", r->rsp_resid);
+		zfcp_dbf_out(&p, "fcp_rsp_code", "0x%08x", r->rsp_code);
+		zfcp_dbf_out(&p, "fcp_sns_info_len", "0x%08x", r->sns_info_len);
+		zfcp_dbf_outd(&p, "fcp_sns_info", r->sns_info,
+			      min((int)r->sns_info_len,
+			      ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
+			      r->sns_info_len);
+	}
+	p += sprintf(p, "\n");
+	return p - out_buf;
 }
 
 static struct debug_view zfcp_scsi_dbf_view = {
@@ -897,13 +1217,14 @@
 	char dbf_name[DEBUG_MAX_NAME_LEN];
 
 	/* debug feature area which records recovery activity */
-	sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter));
-	adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2,
-					  sizeof(struct zfcp_erp_dbf_record));
-	if (!adapter->erp_dbf)
+	sprintf(dbf_name, "zfcp_%s_rec", zfcp_get_busid_by_adapter(adapter));
+	adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1,
+					  sizeof(struct zfcp_rec_dbf_record));
+	if (!adapter->rec_dbf)
 		goto failed;
-	debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
-	debug_set_level(adapter->erp_dbf, 3);
+	debug_register_view(adapter->rec_dbf, &debug_hex_ascii_view);
+	debug_register_view(adapter->rec_dbf, &zfcp_rec_dbf_view);
+	debug_set_level(adapter->rec_dbf, 3);
 
 	/* debug feature area which records HBA (FSF and QDIO) conditions */
 	sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter));
@@ -952,11 +1273,11 @@
 	debug_unregister(adapter->scsi_dbf);
 	debug_unregister(adapter->san_dbf);
 	debug_unregister(adapter->hba_dbf);
-	debug_unregister(adapter->erp_dbf);
+	debug_unregister(adapter->rec_dbf);
 	adapter->scsi_dbf = NULL;
 	adapter->san_dbf = NULL;
 	adapter->hba_dbf = NULL;
-	adapter->erp_dbf = NULL;
+	adapter->rec_dbf = NULL;
 }
 
 #undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
new file mode 100644
index 0000000..54c34e4
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -0,0 +1,228 @@
+/*
+ * This file is part of the zfcp device driver for
+ * FCP adapters for IBM System z9 and zSeries.
+ *
+ * Copyright IBM Corp. 2008, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef ZFCP_DBF_H
+#define ZFCP_DBF_H
+
+#include "zfcp_fsf.h"
+
+#define ZFCP_DBF_TAG_SIZE      4
+
+struct zfcp_dbf_dump {
+	u8 tag[ZFCP_DBF_TAG_SIZE];
+	u32 total_size;		/* size of total dump data */
+	u32 offset;		/* how much data has being already dumped */
+	u32 size;		/* how much data comes with this record */
+	u8 data[];		/* dump data */
+} __attribute__ ((packed));
+
+struct zfcp_rec_dbf_record_thread {
+	u32 total;
+	u32 ready;
+	u32 running;
+} __attribute__ ((packed));
+
+struct zfcp_rec_dbf_record_target {
+	u64 ref;
+	u32 status;
+	u32 d_id;
+	u64 wwpn;
+	u64 fcp_lun;
+	u32 erp_count;
+} __attribute__ ((packed));
+
+struct zfcp_rec_dbf_record_trigger {
+	u8 want;
+	u8 need;
+	u32 as;
+	u32 ps;
+	u32 us;
+	u64 ref;
+	u64 action;
+	u64 wwpn;
+	u64 fcp_lun;
+} __attribute__ ((packed));
+
+struct zfcp_rec_dbf_record_action {
+	u32 status;
+	u32 step;
+	u64 action;
+	u64 fsf_req;
+} __attribute__ ((packed));
+
+struct zfcp_rec_dbf_record {
+	u8 id;
+	u8 id2;
+	union {
+		struct zfcp_rec_dbf_record_action action;
+		struct zfcp_rec_dbf_record_thread thread;
+		struct zfcp_rec_dbf_record_target target;
+		struct zfcp_rec_dbf_record_trigger trigger;
+	} u;
+} __attribute__ ((packed));
+
+enum {
+	ZFCP_REC_DBF_ID_ACTION,
+	ZFCP_REC_DBF_ID_THREAD,
+	ZFCP_REC_DBF_ID_TARGET,
+	ZFCP_REC_DBF_ID_TRIGGER,
+};
+
+struct zfcp_hba_dbf_record_response {
+	u32 fsf_command;
+	u64 fsf_reqid;
+	u32 fsf_seqno;
+	u64 fsf_issued;
+	u32 fsf_prot_status;
+	u32 fsf_status;
+	u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
+	u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+	u32 fsf_req_status;
+	u8 sbal_first;
+	u8 sbal_curr;
+	u8 sbal_last;
+	u8 pool;
+	u64 erp_action;
+	union {
+		struct {
+			u64 cmnd;
+			u64 serial;
+		} fcp;
+		struct {
+			u64 wwpn;
+			u32 d_id;
+			u32 port_handle;
+		} port;
+		struct {
+			u64 wwpn;
+			u64 fcp_lun;
+			u32 port_handle;
+			u32 lun_handle;
+		} unit;
+		struct {
+			u32 d_id;
+			u8 ls_code;
+		} els;
+	} u;
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record_status {
+	u8 failed;
+	u32 status_type;
+	u32 status_subtype;
+	struct fsf_queue_designator
+	 queue_designator;
+	u32 payload_size;
+#define ZFCP_DBF_UNSOL_PAYLOAD				80
+#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL		32
+#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD	56
+#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT	2 * sizeof(u32)
+	u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record_qdio {
+	u32 status;
+	u32 qdio_error;
+	u32 siga_error;
+	u8 sbal_index;
+	u8 sbal_count;
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record {
+	u8 tag[ZFCP_DBF_TAG_SIZE];
+	u8 tag2[ZFCP_DBF_TAG_SIZE];
+	union {
+		struct zfcp_hba_dbf_record_response response;
+		struct zfcp_hba_dbf_record_status status;
+		struct zfcp_hba_dbf_record_qdio qdio;
+	} u;
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record_ct_request {
+	u16 cmd_req_code;
+	u8 revision;
+	u8 gs_type;
+	u8 gs_subtype;
+	u8 options;
+	u16 max_res_size;
+	u32 len;
+#define ZFCP_DBF_CT_PAYLOAD	24
+	u8 payload[ZFCP_DBF_CT_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record_ct_response {
+	u16 cmd_rsp_code;
+	u8 revision;
+	u8 reason_code;
+	u8 expl;
+	u8 vendor_unique;
+	u32 len;
+	u8 payload[ZFCP_DBF_CT_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record_els {
+	u8 ls_code;
+	u32 len;
+#define ZFCP_DBF_ELS_PAYLOAD	32
+#define ZFCP_DBF_ELS_MAX_PAYLOAD 1024
+	u8 payload[ZFCP_DBF_ELS_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record {
+	u8 tag[ZFCP_DBF_TAG_SIZE];
+	u64 fsf_reqid;
+	u32 fsf_seqno;
+	u32 s_id;
+	u32 d_id;
+	union {
+		struct zfcp_san_dbf_record_ct_request ct_req;
+		struct zfcp_san_dbf_record_ct_response ct_resp;
+		struct zfcp_san_dbf_record_els els;
+	} u;
+} __attribute__ ((packed));
+
+struct zfcp_scsi_dbf_record {
+	u8 tag[ZFCP_DBF_TAG_SIZE];
+	u8 tag2[ZFCP_DBF_TAG_SIZE];
+	u32 scsi_id;
+	u32 scsi_lun;
+	u32 scsi_result;
+	u64 scsi_cmnd;
+	u64 scsi_serial;
+#define ZFCP_DBF_SCSI_OPCODE	16
+	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
+	u8 scsi_retries;
+	u8 scsi_allowed;
+	u64 fsf_reqid;
+	u32 fsf_seqno;
+	u64 fsf_issued;
+	u64 old_fsf_reqid;
+	u8 rsp_validity;
+	u8 rsp_scsi_status;
+	u32 rsp_resid;
+	u8 rsp_code;
+#define ZFCP_DBF_SCSI_FCP_SNS_INFO	16
+#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO	256
+	u32 sns_info_len;
+	u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
+} __attribute__ ((packed));
+
+#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9e9f6c1..bda8c77 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -47,6 +47,7 @@
 #include <asm/qdio.h>
 #include <asm/debug.h>
 #include <asm/ebcdic.h>
+#include "zfcp_dbf.h"
 #include "zfcp_fsf.h"
 
 
@@ -262,167 +263,6 @@
 } __attribute__((packed));
 
 /*
- * DBF stuff
- */
-#define ZFCP_DBF_TAG_SIZE      4
-
-struct zfcp_dbf_dump {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u32 total_size;		/* size of total dump data */
-	u32 offset;		/* how much data has being already dumped */
-	u32 size;		/* how much data comes with this record */
-	u8 data[];		/* dump data */
-} __attribute__ ((packed));
-
-/* FIXME: to be inflated when reworking the erp dbf */
-struct zfcp_erp_dbf_record {
-	u8 dummy[16];
-} __attribute__ ((packed));
-
-struct zfcp_hba_dbf_record_response {
-	u32 fsf_command;
-	u64 fsf_reqid;
-	u32 fsf_seqno;
-	u64 fsf_issued;
-	u32 fsf_prot_status;
-	u32 fsf_status;
-	u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
-	u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
-	u32 fsf_req_status;
-	u8 sbal_first;
-	u8 sbal_curr;
-	u8 sbal_last;
-	u8 pool;
-	u64 erp_action;
-	union {
-		struct {
-			u64 scsi_cmnd;
-			u64 scsi_serial;
-		} send_fcp;
-		struct {
-			u64 wwpn;
-			u32 d_id;
-			u32 port_handle;
-		} port;
-		struct {
-			u64 wwpn;
-			u64 fcp_lun;
-			u32 port_handle;
-			u32 lun_handle;
-		} unit;
-		struct {
-			u32 d_id;
-			u8 ls_code;
-		} send_els;
-	} data;
-} __attribute__ ((packed));
-
-struct zfcp_hba_dbf_record_status {
-	u8 failed;
-	u32 status_type;
-	u32 status_subtype;
-	struct fsf_queue_designator
-	 queue_designator;
-	u32 payload_size;
-#define ZFCP_DBF_UNSOL_PAYLOAD				80
-#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL		32
-#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD	56
-#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT	2 * sizeof(u32)
-	u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
-} __attribute__ ((packed));
-
-struct zfcp_hba_dbf_record_qdio {
-	u32 status;
-	u32 qdio_error;
-	u32 siga_error;
-	u8 sbal_index;
-	u8 sbal_count;
-} __attribute__ ((packed));
-
-struct zfcp_hba_dbf_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u8 tag2[ZFCP_DBF_TAG_SIZE];
-	union {
-		struct zfcp_hba_dbf_record_response response;
-		struct zfcp_hba_dbf_record_status status;
-		struct zfcp_hba_dbf_record_qdio qdio;
-	} type;
-} __attribute__ ((packed));
-
-struct zfcp_san_dbf_record_ct {
-	union {
-		struct {
-			u16 cmd_req_code;
-			u8 revision;
-			u8 gs_type;
-			u8 gs_subtype;
-			u8 options;
-			u16 max_res_size;
-		} request;
-		struct {
-			u16 cmd_rsp_code;
-			u8 revision;
-			u8 reason_code;
-			u8 reason_code_expl;
-			u8 vendor_unique;
-		} response;
-	} type;
-	u32 payload_size;
-#define ZFCP_DBF_CT_PAYLOAD	24
-	u8 payload[ZFCP_DBF_CT_PAYLOAD];
-} __attribute__ ((packed));
-
-struct zfcp_san_dbf_record_els {
-	u8 ls_code;
-	u32 payload_size;
-#define ZFCP_DBF_ELS_PAYLOAD	32
-#define ZFCP_DBF_ELS_MAX_PAYLOAD 1024
-	u8 payload[ZFCP_DBF_ELS_PAYLOAD];
-} __attribute__ ((packed));
-
-struct zfcp_san_dbf_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u64 fsf_reqid;
-	u32 fsf_seqno;
-	u32 s_id;
-	u32 d_id;
-	union {
-		struct zfcp_san_dbf_record_ct ct;
-		struct zfcp_san_dbf_record_els els;
-	} type;
-} __attribute__ ((packed));
-
-struct zfcp_scsi_dbf_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u8 tag2[ZFCP_DBF_TAG_SIZE];
-	u32 scsi_id;
-	u32 scsi_lun;
-	u32 scsi_result;
-	u64 scsi_cmnd;
-	u64 scsi_serial;
-#define ZFCP_DBF_SCSI_OPCODE	16
-	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
-	u8 scsi_retries;
-	u8 scsi_allowed;
-	u64 fsf_reqid;
-	u32 fsf_seqno;
-	u64 fsf_issued;
-	union {
-		u64 old_fsf_reqid;
-		struct {
-			u8 rsp_validity;
-			u8 rsp_scsi_status;
-			u32 rsp_resid;
-			u8 rsp_code;
-#define ZFCP_DBF_SCSI_FCP_SNS_INFO	16
-#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO	256
-			u32 sns_info_len;
-			u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
-		} fcp;
-	} type;
-} __attribute__ ((packed));
-
-/*
  * FC-FS stuff
  */
 #define R_A_TOV				10 /* seconds */
@@ -539,7 +379,7 @@
 
 /* logging routine for zfcp */
 #define _ZFCP_LOG(fmt, args...) \
-	printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __FUNCTION__, \
+	printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \
 	       __LINE__ , ##args)
 
 #define ZFCP_LOG(level, fmt, args...) \
@@ -634,7 +474,6 @@
 		 ZFCP_STATUS_PORT_NO_SCSI_ID)
 
 /* logical unit status */
-#define ZFCP_STATUS_UNIT_NOTSUPPUNITRESET	0x00000001
 #define ZFCP_STATUS_UNIT_TEMPORARY		0x00000002
 #define ZFCP_STATUS_UNIT_SHARED			0x00000004
 #define ZFCP_STATUS_UNIT_READONLY		0x00000008
@@ -917,15 +756,15 @@
 	u32			erp_low_mem_count; /* nr of erp actions waiting
 						      for memory */
 	struct zfcp_port	*nameserver_port;  /* adapter's nameserver */
-	debug_info_t		*erp_dbf;
+	debug_info_t		*rec_dbf;
 	debug_info_t		*hba_dbf;
 	debug_info_t		*san_dbf;          /* debug feature areas */
 	debug_info_t		*scsi_dbf;
-	spinlock_t		erp_dbf_lock;
+	spinlock_t		rec_dbf_lock;
 	spinlock_t		hba_dbf_lock;
 	spinlock_t		san_dbf_lock;
 	spinlock_t		scsi_dbf_lock;
-	struct zfcp_erp_dbf_record	erp_dbf_buf;
+	struct zfcp_rec_dbf_record	rec_dbf_buf;
 	struct zfcp_hba_dbf_record	hba_dbf_buf;
 	struct zfcp_san_dbf_record	san_dbf_buf;
 	struct zfcp_scsi_dbf_record	scsi_dbf_buf;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 2dc8110..8054846 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -26,13 +26,17 @@
 static int zfcp_erp_adisc(struct zfcp_port *);
 static void zfcp_erp_adisc_handler(unsigned long);
 
-static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int);
-static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int);
-static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int);
-static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int);
+static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int, u8,
+					    void *);
+static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int, u8,
+						void *);
+static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int, u8, void *);
+static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int, u8, void *);
 
-static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int);
-static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int);
+static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int, u8,
+					     void *);
+static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int, u8,
+					     void *);
 
 static void zfcp_erp_adapter_block(struct zfcp_adapter *, int);
 static void zfcp_erp_adapter_unblock(struct zfcp_adapter *);
@@ -97,7 +101,8 @@
 static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
 
 static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
-				   struct zfcp_port *, struct zfcp_unit *);
+				   struct zfcp_port *, struct zfcp_unit *,
+				   u8 id, void *ref);
 static int zfcp_erp_action_dequeue(struct zfcp_erp_action *);
 static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *,
 				    struct zfcp_port *, struct zfcp_unit *,
@@ -128,11 +133,9 @@
 	atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
 	write_unlock_irq(&req_queue->queue_lock);
 
-	debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
 	while (qdio_shutdown(adapter->ccw_device,
 			     QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
 		ssleep(1);
-	debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
 
 	/* cleanup used outbound sbals */
 	count = atomic_read(&req_queue->free_count);
@@ -163,7 +166,7 @@
 	/* reset FSF request sequence number */
 	adapter->fsf_req_seq_no = 0;
 	/* all ports and units are closed */
-	zfcp_erp_modify_adapter_status(adapter,
+	zfcp_erp_modify_adapter_status(adapter, 24, NULL,
 				       ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
 }
 
@@ -179,7 +182,8 @@
 static void zfcp_fsf_request_timeout_handler(unsigned long data)
 {
 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
-	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62,
+				NULL);
 }
 
 void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
@@ -200,12 +204,11 @@
  * returns:	0	- initiated action successfully
  *		<0	- failed to initiate action
  */
-static int
-zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
+static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter,
+					    int clear_mask, u8 id, void *ref)
 {
 	int retval;
 
-	debug_text_event(adapter->erp_dbf, 5, "a_ro");
 	ZFCP_LOG_DEBUG("reopen adapter %s\n",
 		       zfcp_get_busid_by_adapter(adapter));
 
@@ -214,14 +217,13 @@
 	if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
 		ZFCP_LOG_DEBUG("skipped reopen of failed adapter %s\n",
 			       zfcp_get_busid_by_adapter(adapter));
-		debug_text_event(adapter->erp_dbf, 5, "a_ro_f");
 		/* ensure propagation of failed status to new devices */
-		zfcp_erp_adapter_failed(adapter);
+		zfcp_erp_adapter_failed(adapter, 13, NULL);
 		retval = -EIO;
 		goto out;
 	}
 	retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
-					 adapter, NULL, NULL);
+					 adapter, NULL, NULL, id, ref);
 
  out:
 	return retval;
@@ -236,56 +238,56 @@
  * returns:	0	- initiated action successfully
  *		<0	- failed to initiate action
  */
-int
-zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask)
+int zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask,
+			    u8 id, void *ref)
 {
 	int retval;
 	unsigned long flags;
 
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	write_lock(&adapter->erp_lock);
-	retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask);
+	retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask, id, ref);
 	write_unlock(&adapter->erp_lock);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
 	return retval;
 }
 
-int
-zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask)
+int zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask,
+			      u8 id, void *ref)
 {
 	int retval;
 
 	retval = zfcp_erp_adapter_reopen(adapter,
 					 ZFCP_STATUS_COMMON_RUNNING |
 					 ZFCP_STATUS_COMMON_ERP_FAILED |
-					 clear_mask);
+					 clear_mask, id, ref);
 
 	return retval;
 }
 
-int
-zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask)
+int zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask, u8 id,
+			   void *ref)
 {
 	int retval;
 
 	retval = zfcp_erp_port_reopen(port,
 				      ZFCP_STATUS_COMMON_RUNNING |
 				      ZFCP_STATUS_COMMON_ERP_FAILED |
-				      clear_mask);
+				      clear_mask, id, ref);
 
 	return retval;
 }
 
-int
-zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask)
+int zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask, u8 id,
+			   void *ref)
 {
 	int retval;
 
 	retval = zfcp_erp_unit_reopen(unit,
 				      ZFCP_STATUS_COMMON_RUNNING |
 				      ZFCP_STATUS_COMMON_ERP_FAILED |
-				      clear_mask);
+				      clear_mask, id, ref);
 
 	return retval;
 }
@@ -399,8 +401,7 @@
 				"force physical port reopen "
 				"(adapter %s, port d_id=0x%06x)\n",
 				zfcp_get_busid_by_adapter(adapter), d_id);
-		debug_text_event(adapter->erp_dbf, 3, "forcreop");
-		if (zfcp_erp_port_forced_reopen(port, 0))
+		if (zfcp_erp_port_forced_reopen(port, 0, 63, NULL))
 			ZFCP_LOG_NORMAL("failed reopen of port "
 					"(adapter %s, wwpn=0x%016Lx)\n",
 					zfcp_get_busid_by_port(port),
@@ -427,7 +428,7 @@
 				"adisc_resp_wwpn=0x%016Lx)\n",
 				zfcp_get_busid_by_port(port),
 				port->wwpn, (wwn_t) adisc->wwpn);
-		if (zfcp_erp_port_reopen(port, 0))
+		if (zfcp_erp_port_reopen(port, 0, 64, NULL))
 			ZFCP_LOG_NORMAL("failed reopen of port "
 					"(adapter %s, wwpn=0x%016Lx)\n",
 					zfcp_get_busid_by_port(port),
@@ -461,7 +462,7 @@
 		ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx "
 				"on adapter %s\n ", port->wwpn,
 				zfcp_get_busid_by_port(port));
-		retval = zfcp_erp_port_forced_reopen(port, 0);
+		retval = zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
 		if (retval != 0) {
 			ZFCP_LOG_NORMAL("reopen of remote port 0x%016Lx "
 					"on adapter %s failed\n", port->wwpn,
@@ -484,14 +485,11 @@
  * returns:	0	- initiated action successfully
  *		<0	- failed to initiate action
  */
-static int
-zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask)
+static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port,
+						int clear_mask, u8 id,
+						void *ref)
 {
 	int retval;
-	struct zfcp_adapter *adapter = port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 5, "pf_ro");
-	debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 
 	ZFCP_LOG_DEBUG("forced reopen of port 0x%016Lx on adapter %s\n",
 		       port->wwpn, zfcp_get_busid_by_port(port));
@@ -502,14 +500,12 @@
 		ZFCP_LOG_DEBUG("skipped forced reopen of failed port 0x%016Lx "
 			       "on adapter %s\n", port->wwpn,
 			       zfcp_get_busid_by_port(port));
-		debug_text_event(adapter->erp_dbf, 5, "pf_ro_f");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		retval = -EIO;
 		goto out;
 	}
 
 	retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
-					 port->adapter, port, NULL);
+					 port->adapter, port, NULL, id, ref);
 
  out:
 	return retval;
@@ -524,8 +520,8 @@
  * returns:	0	- initiated action successfully
  *		<0	- failed to initiate action
  */
-int
-zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask)
+int zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask, u8 id,
+				void *ref)
 {
 	int retval;
 	unsigned long flags;
@@ -534,7 +530,8 @@
 	adapter = port->adapter;
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	write_lock(&adapter->erp_lock);
-	retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask);
+	retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask, id,
+						      ref);
 	write_unlock(&adapter->erp_lock);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
@@ -551,14 +548,10 @@
  * returns:	0	- initiated action successfully
  *		<0	- failed to initiate action
  */
-static int
-zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask)
+static int zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask,
+					 u8 id, void *ref)
 {
 	int retval;
-	struct zfcp_adapter *adapter = port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 5, "p_ro");
-	debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 
 	ZFCP_LOG_DEBUG("reopen of port 0x%016Lx on adapter %s\n",
 		       port->wwpn, zfcp_get_busid_by_port(port));
@@ -569,16 +562,14 @@
 		ZFCP_LOG_DEBUG("skipped reopen of failed port 0x%016Lx "
 			       "on adapter %s\n", port->wwpn,
 			       zfcp_get_busid_by_port(port));
-		debug_text_event(adapter->erp_dbf, 5, "p_ro_f");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		/* ensure propagation of failed status to new devices */
-		zfcp_erp_port_failed(port);
+		zfcp_erp_port_failed(port, 14, NULL);
 		retval = -EIO;
 		goto out;
 	}
 
 	retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
-					 port->adapter, port, NULL);
+					 port->adapter, port, NULL, id, ref);
 
  out:
 	return retval;
@@ -594,8 +585,8 @@
  * correct locking. An error recovery task is initiated to do the reopen.
  * To wait for the completion of the reopen zfcp_erp_wait should be used.
  */
-int
-zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask)
+int zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask, u8 id,
+			 void *ref)
 {
 	int retval;
 	unsigned long flags;
@@ -603,7 +594,7 @@
 
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	write_lock(&adapter->erp_lock);
-	retval = zfcp_erp_port_reopen_internal(port, clear_mask);
+	retval = zfcp_erp_port_reopen_internal(port, clear_mask, id, ref);
 	write_unlock(&adapter->erp_lock);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
@@ -620,14 +611,12 @@
  * returns:	0	- initiated action successfully
  *		<0	- failed to initiate action
  */
-static int
-zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask)
+static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask,
+					 u8 id, void *ref)
 {
 	int retval;
 	struct zfcp_adapter *adapter = unit->port->adapter;
 
-	debug_text_event(adapter->erp_dbf, 5, "u_ro");
-	debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
 	ZFCP_LOG_DEBUG("reopen of unit 0x%016Lx on port 0x%016Lx "
 		       "on adapter %s\n", unit->fcp_lun,
 		       unit->port->wwpn, zfcp_get_busid_by_unit(unit));
@@ -639,15 +628,12 @@
 			       "on port 0x%016Lx on adapter %s\n",
 			       unit->fcp_lun, unit->port->wwpn,
 			       zfcp_get_busid_by_unit(unit));
-		debug_text_event(adapter->erp_dbf, 5, "u_ro_f");
-		debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
-			    sizeof (fcp_lun_t));
 		retval = -EIO;
 		goto out;
 	}
 
 	retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT,
-					 unit->port->adapter, unit->port, unit);
+					 adapter, unit->port, unit, id, ref);
  out:
 	return retval;
 }
@@ -662,8 +648,8 @@
  * locking. An error recovery task is initiated to do the reopen.
  * To wait for the completion of the reopen zfcp_erp_wait should be used.
  */
-int
-zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask)
+int zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask, u8 id,
+			 void *ref)
 {
 	int retval;
 	unsigned long flags;
@@ -675,7 +661,7 @@
 
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	write_lock(&adapter->erp_lock);
-	retval = zfcp_erp_unit_reopen_internal(unit, clear_mask);
+	retval = zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref);
 	write_unlock(&adapter->erp_lock);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
@@ -687,19 +673,43 @@
  */
 static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
 {
-	debug_text_event(adapter->erp_dbf, 6, "a_bl");
-	zfcp_erp_modify_adapter_status(adapter,
+	zfcp_erp_modify_adapter_status(adapter, 15, NULL,
 				       ZFCP_STATUS_COMMON_UNBLOCKED |
 				       clear_mask, ZFCP_CLEAR);
 }
 
+/* FIXME: isn't really atomic */
+/*
+ * returns the mask which has not been set so far, i.e.
+ * 0 if no bit has been changed, !0 if some bit has been changed
+ */
+static int atomic_test_and_set_mask(unsigned long mask, atomic_t *v)
+{
+	int changed_bits = (atomic_read(v) /*XOR*/^ mask) & mask;
+	atomic_set_mask(mask, v);
+	return changed_bits;
+}
+
+/* FIXME: isn't really atomic */
+/*
+ * returns the mask which has not been cleared so far, i.e.
+ * 0 if no bit has been changed, !0 if some bit has been changed
+ */
+static int atomic_test_and_clear_mask(unsigned long mask, atomic_t *v)
+{
+	int changed_bits = atomic_read(v) & mask;
+	atomic_clear_mask(mask, v);
+	return changed_bits;
+}
+
 /**
  * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
  */
 static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
 {
-	debug_text_event(adapter->erp_dbf, 6, "a_ubl");
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
+	if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+				     &adapter->status))
+		zfcp_rec_dbf_event_adapter(16, NULL, adapter);
 }
 
 /*
@@ -714,11 +724,7 @@
 static void
 zfcp_erp_port_block(struct zfcp_port *port, int clear_mask)
 {
-	struct zfcp_adapter *adapter = port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 6, "p_bl");
-	debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
-	zfcp_erp_modify_port_status(port,
+	zfcp_erp_modify_port_status(port, 17, NULL,
 				    ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
 				    ZFCP_CLEAR);
 }
@@ -733,11 +739,9 @@
 static void
 zfcp_erp_port_unblock(struct zfcp_port *port)
 {
-	struct zfcp_adapter *adapter = port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 6, "p_ubl");
-	debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
+	if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+				     &port->status))
+		zfcp_rec_dbf_event_port(18, NULL, port);
 }
 
 /*
@@ -752,11 +756,7 @@
 static void
 zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
 {
-	struct zfcp_adapter *adapter = unit->port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 6, "u_bl");
-	debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
-	zfcp_erp_modify_unit_status(unit,
+	zfcp_erp_modify_unit_status(unit, 19, NULL,
 				    ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
 				    ZFCP_CLEAR);
 }
@@ -771,11 +771,9 @@
 static void
 zfcp_erp_unit_unblock(struct zfcp_unit *unit)
 {
-	struct zfcp_adapter *adapter = unit->port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 6, "u_ubl");
-	debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
+	if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+				     &unit->status))
+		zfcp_rec_dbf_event_unit(20, NULL, unit);
 }
 
 static void
@@ -783,11 +781,9 @@
 {
 	struct zfcp_adapter *adapter = erp_action->adapter;
 
-	debug_text_event(adapter->erp_dbf, 4, "a_ar");
-	debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof (int));
-
 	zfcp_erp_action_to_ready(erp_action);
 	up(&adapter->erp_ready_sem);
+	zfcp_rec_dbf_event_thread(2, adapter, 0);
 }
 
 /*
@@ -849,18 +845,15 @@
 		if (zfcp_reqlist_find_safe(adapter, erp_action->fsf_req) &&
 		    erp_action->fsf_req->erp_action == erp_action) {
 			/* fsf_req still exists */
-			debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
-			debug_event(adapter->erp_dbf, 3, &erp_action->fsf_req,
-				    sizeof (unsigned long));
 			/* dismiss fsf_req of timed out/dismissed erp_action */
 			if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
 						  ZFCP_STATUS_ERP_TIMEDOUT)) {
-				debug_text_event(adapter->erp_dbf, 3,
-						 "a_ca_disreq");
 				erp_action->fsf_req->status |=
 					ZFCP_STATUS_FSFREQ_DISMISSED;
+				zfcp_rec_dbf_event_action(142, erp_action);
 			}
 			if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
+				zfcp_rec_dbf_event_action(143, erp_action);
 				ZFCP_LOG_NORMAL("error: erp step timed out "
 						"(action=%d, fsf_req=%p)\n ",
 						erp_action->action,
@@ -879,7 +872,6 @@
 				erp_action->fsf_req = NULL;
 			}
 		} else {
-			debug_text_event(adapter->erp_dbf, 3, "a_ca_gonereq");
 			/*
 			 * even if this fsf_req has gone, forget about
 			 * association between erp_action and fsf_req
@@ -887,8 +879,7 @@
 			erp_action->fsf_req = NULL;
 		}
 		spin_unlock(&adapter->req_list_lock);
-	} else
-		debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
+	}
 }
 
 /**
@@ -900,19 +891,11 @@
 static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
 					  unsigned long set_mask)
 {
-	struct zfcp_adapter *adapter = erp_action->adapter;
-
 	if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
-		debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex");
-		debug_event(adapter->erp_dbf, 2, &erp_action->action,
-			    sizeof (int));
 		erp_action->status |= set_mask;
 		zfcp_erp_action_ready(erp_action);
 	} else {
 		/* action is ready or gone - nothing to do */
-		debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
-		debug_event(adapter->erp_dbf, 3, &erp_action->action,
-			    sizeof (int));
 	}
 }
 
@@ -939,10 +922,6 @@
 zfcp_erp_memwait_handler(unsigned long data)
 {
 	struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
-	struct zfcp_adapter *adapter = erp_action->adapter;
-
-	debug_text_event(adapter->erp_dbf, 2, "a_mwh");
-	debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
 
 	zfcp_erp_async_handler(erp_action, 0);
 }
@@ -955,10 +934,6 @@
 static void zfcp_erp_timeout_handler(unsigned long data)
 {
 	struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
-	struct zfcp_adapter *adapter = erp_action->adapter;
-
-	debug_text_event(adapter->erp_dbf, 2, "a_th");
-	debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
 
 	zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
 }
@@ -973,11 +948,6 @@
  */
 static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
 {
-	struct zfcp_adapter *adapter = erp_action->adapter;
-
-	debug_text_event(adapter->erp_dbf, 2, "a_adis");
-	debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
-
 	erp_action->status |= ZFCP_STATUS_ERP_DISMISSED;
 	if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING)
 		zfcp_erp_action_ready(erp_action);
@@ -995,12 +965,10 @@
 		ZFCP_LOG_NORMAL("error: creation of erp thread failed for "
 				"adapter %s\n",
 				zfcp_get_busid_by_adapter(adapter));
-		debug_text_event(adapter->erp_dbf, 5, "a_thset_fail");
 	} else {
 		wait_event(adapter->erp_thread_wqh,
 			   atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
 					    &adapter->status));
-		debug_text_event(adapter->erp_dbf, 5, "a_thset_ok");
 	}
 
 	return (retval < 0);
@@ -1027,6 +995,7 @@
 
 	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
 	up(&adapter->erp_ready_sem);
+	zfcp_rec_dbf_event_thread(2, adapter, 1);
 
 	wait_event(adapter->erp_thread_wqh,
 		   !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
@@ -1035,8 +1004,6 @@
 	atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
 			  &adapter->status);
 
-	debug_text_event(adapter->erp_dbf, 5, "a_thki_ok");
-
 	return retval;
 }
 
@@ -1059,7 +1026,6 @@
 	/* Block all signals */
 	siginitsetinv(&current->blocked, 0);
 	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
-	debug_text_event(adapter->erp_dbf, 5, "a_th_run");
 	wake_up(&adapter->erp_thread_wqh);
 
 	while (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
@@ -1084,12 +1050,12 @@
 		 * no action in 'ready' queue to be processed and
 		 * thread is not to be killed
 		 */
+		zfcp_rec_dbf_event_thread(4, adapter, 1);
 		down_interruptible(&adapter->erp_ready_sem);
-		debug_text_event(adapter->erp_dbf, 5, "a_th_woken");
+		zfcp_rec_dbf_event_thread(5, adapter, 1);
 	}
 
 	atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
-	debug_text_event(adapter->erp_dbf, 5, "a_th_stop");
 	wake_up(&adapter->erp_thread_wqh);
 
 	return 0;
@@ -1125,7 +1091,6 @@
 	/* dequeue dismissed action and leave, if required */
 	retval = zfcp_erp_strategy_check_action(erp_action, retval);
 	if (retval == ZFCP_ERP_DISMISSED) {
-		debug_text_event(adapter->erp_dbf, 4, "a_st_dis1");
 		goto unlock;
 	}
 
@@ -1176,20 +1141,17 @@
 		   element was timed out.
 		 */
 		if (adapter->erp_total_count == adapter->erp_low_mem_count) {
-			debug_text_event(adapter->erp_dbf, 3, "a_st_lowmem");
 			ZFCP_LOG_NORMAL("error: no mempool elements available, "
 					"restarting I/O on adapter %s "
 					"to free mempool\n",
 					zfcp_get_busid_by_adapter(adapter));
-			zfcp_erp_adapter_reopen_internal(adapter, 0);
+			zfcp_erp_adapter_reopen_internal(adapter, 0, 66, NULL);
 		} else {
-		debug_text_event(adapter->erp_dbf, 2, "a_st_memw");
 		retval = zfcp_erp_strategy_memwait(erp_action);
 		}
 		goto unlock;
 	case ZFCP_ERP_CONTINUES:
 		/* leave since this action runs asynchronously */
-		debug_text_event(adapter->erp_dbf, 6, "a_st_cont");
 		if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
 			--adapter->erp_low_mem_count;
 			erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
@@ -1218,7 +1180,6 @@
 	 * action is repeated in order to process state change
 	 */
 	if (retval == ZFCP_ERP_EXIT) {
-		debug_text_event(adapter->erp_dbf, 2, "a_st_exit");
 		goto unlock;
 	}
 
@@ -1244,8 +1205,6 @@
 	if (retval != ZFCP_ERP_DISMISSED)
 		zfcp_erp_strategy_check_queues(adapter);
 
-	debug_text_event(adapter->erp_dbf, 6, "a_st_done");
-
 	return retval;
 }
 
@@ -1260,17 +1219,12 @@
 static int
 zfcp_erp_strategy_check_action(struct zfcp_erp_action *erp_action, int retval)
 {
-	struct zfcp_adapter *adapter = erp_action->adapter;
-
 	zfcp_erp_strategy_check_fsfreq(erp_action);
 
-	debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof (int));
 	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
-		debug_text_event(adapter->erp_dbf, 3, "a_stcd_dis");
 		zfcp_erp_action_dequeue(erp_action);
 		retval = ZFCP_ERP_DISMISSED;
-	} else
-		debug_text_event(adapter->erp_dbf, 5, "a_stcd_nodis");
+	}
 
 	return retval;
 }
@@ -1279,7 +1233,6 @@
 zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
 {
 	int retval = ZFCP_ERP_FAILED;
-	struct zfcp_adapter *adapter = erp_action->adapter;
 
 	/*
 	 * try to execute/continue action as far as possible,
@@ -1309,9 +1262,6 @@
 		break;
 
 	default:
-		debug_text_exception(adapter->erp_dbf, 1, "a_stda_bug");
-		debug_event(adapter->erp_dbf, 1, &erp_action->action,
-			    sizeof (int));
 		ZFCP_LOG_NORMAL("bug: unknown erp action requested on "
 				"adapter %s (action=%d)\n",
 				zfcp_get_busid_by_adapter(erp_action->adapter),
@@ -1333,10 +1283,7 @@
 zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
 {
 	int retval = ZFCP_ERP_CONTINUES;
-	struct zfcp_adapter *adapter = erp_action->adapter;
 
-	debug_text_event(adapter->erp_dbf, 6, "a_mwinit");
-	debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int));
 	init_timer(&erp_action->timer);
 	erp_action->timer.function = zfcp_erp_memwait_handler;
 	erp_action->timer.data = (unsigned long) erp_action;
@@ -1353,13 +1300,12 @@
  *
  */
 void
-zfcp_erp_adapter_failed(struct zfcp_adapter *adapter)
+zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
 {
-	zfcp_erp_modify_adapter_status(adapter,
+	zfcp_erp_modify_adapter_status(adapter, id, ref,
 				       ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
 	ZFCP_LOG_NORMAL("adapter erp failed on adapter %s\n",
 			zfcp_get_busid_by_adapter(adapter));
-	debug_text_event(adapter->erp_dbf, 2, "a_afail");
 }
 
 /*
@@ -1369,9 +1315,9 @@
  *
  */
 void
-zfcp_erp_port_failed(struct zfcp_port *port)
+zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
 {
-	zfcp_erp_modify_port_status(port,
+	zfcp_erp_modify_port_status(port, id, ref,
 				    ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
 
 	if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
@@ -1381,9 +1327,6 @@
 	else
 		ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n",
 				zfcp_get_busid_by_port(port), port->wwpn);
-
-	debug_text_event(port->adapter->erp_dbf, 2, "p_pfail");
-	debug_event(port->adapter->erp_dbf, 2, &port->wwpn, sizeof (wwn_t));
 }
 
 /*
@@ -1393,17 +1336,14 @@
  *
  */
 void
-zfcp_erp_unit_failed(struct zfcp_unit *unit)
+zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
 {
-	zfcp_erp_modify_unit_status(unit,
+	zfcp_erp_modify_unit_status(unit, id, ref,
 				    ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
 
 	ZFCP_LOG_NORMAL("unit erp failed on unit 0x%016Lx on port 0x%016Lx "
 			" on adapter %s\n", unit->fcp_lun,
 			unit->port->wwpn, zfcp_get_busid_by_unit(unit));
-	debug_text_event(unit->port->adapter->erp_dbf, 2, "u_ufail");
-	debug_event(unit->port->adapter->erp_dbf, 2,
-		    &unit->fcp_lun, sizeof (fcp_lun_t));
 }
 
 /*
@@ -1427,10 +1367,6 @@
 	struct zfcp_port *port = erp_action->port;
 	struct zfcp_unit *unit = erp_action->unit;
 
-	debug_text_event(adapter->erp_dbf, 5, "a_stct_norm");
-	debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof (int));
-	debug_event(adapter->erp_dbf, 5, &result, sizeof (int));
-
 	switch (erp_action->action) {
 
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
@@ -1457,15 +1393,14 @@
 			      struct zfcp_port *port,
 			      struct zfcp_unit *unit, int retval)
 {
-	debug_text_event(adapter->erp_dbf, 3, "a_stsc");
-	debug_event(adapter->erp_dbf, 3, &action, sizeof (int));
-
 	switch (action) {
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		if (zfcp_erp_strategy_statechange_detected(&adapter->status,
 							   status)) {
-			zfcp_erp_adapter_reopen_internal(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+			zfcp_erp_adapter_reopen_internal(adapter,
+						ZFCP_STATUS_COMMON_ERP_FAILED,
+						67, NULL);
 			retval = ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1474,7 +1409,9 @@
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 		if (zfcp_erp_strategy_statechange_detected(&port->status,
 							   status)) {
-			zfcp_erp_port_reopen_internal(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+			zfcp_erp_port_reopen_internal(port,
+						ZFCP_STATUS_COMMON_ERP_FAILED,
+						68, NULL);
 			retval = ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1482,7 +1419,9 @@
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
 		if (zfcp_erp_strategy_statechange_detected(&unit->status,
 							   status)) {
-			zfcp_erp_unit_reopen_internal(unit, ZFCP_STATUS_COMMON_ERP_FAILED);
+			zfcp_erp_unit_reopen_internal(unit,
+						ZFCP_STATUS_COMMON_ERP_FAILED,
+						69, NULL);
 			retval = ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1506,10 +1445,6 @@
 static int
 zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
 {
-	debug_text_event(unit->port->adapter->erp_dbf, 5, "u_stct");
-	debug_event(unit->port->adapter->erp_dbf, 5, &unit->fcp_lun,
-		    sizeof (fcp_lun_t));
-
 	switch (result) {
 	case ZFCP_ERP_SUCCEEDED :
 		atomic_set(&unit->erp_counter, 0);
@@ -1518,7 +1453,7 @@
 	case ZFCP_ERP_FAILED :
 		atomic_inc(&unit->erp_counter);
 		if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
-			zfcp_erp_unit_failed(unit);
+			zfcp_erp_unit_failed(unit, 21, NULL);
 		break;
 	case ZFCP_ERP_EXIT :
 		/* nothing */
@@ -1536,9 +1471,6 @@
 static int
 zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
 {
-	debug_text_event(port->adapter->erp_dbf, 5, "p_stct");
-	debug_event(port->adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
-
 	switch (result) {
 	case ZFCP_ERP_SUCCEEDED :
 		atomic_set(&port->erp_counter, 0);
@@ -1547,7 +1479,7 @@
 	case ZFCP_ERP_FAILED :
 		atomic_inc(&port->erp_counter);
 		if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
-			zfcp_erp_port_failed(port);
+			zfcp_erp_port_failed(port, 22, NULL);
 		break;
 	case ZFCP_ERP_EXIT :
 		/* nothing */
@@ -1565,8 +1497,6 @@
 static int
 zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result)
 {
-	debug_text_event(adapter->erp_dbf, 5, "a_stct");
-
 	switch (result) {
 	case ZFCP_ERP_SUCCEEDED :
 		atomic_set(&adapter->erp_counter, 0);
@@ -1575,7 +1505,7 @@
 	case ZFCP_ERP_FAILED :
 		atomic_inc(&adapter->erp_counter);
 		if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
-			zfcp_erp_adapter_failed(adapter);
+			zfcp_erp_adapter_failed(adapter, 23, NULL);
 		break;
 	case ZFCP_ERP_EXIT :
 		/* nothing */
@@ -1658,37 +1588,34 @@
 				   struct zfcp_port *port,
 				   struct zfcp_unit *unit, int status)
 {
-	debug_text_event(adapter->erp_dbf, 5, "a_stfol");
-	debug_event(adapter->erp_dbf, 5, &action, sizeof (int));
-
 	/* initiate follow-up actions depending on success of finished action */
 	switch (action) {
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		if (status == ZFCP_ERP_SUCCEEDED)
-			zfcp_erp_port_reopen_all_internal(adapter, 0);
+			zfcp_erp_port_reopen_all_internal(adapter, 0, 70, NULL);
 		else
-			zfcp_erp_adapter_reopen_internal(adapter, 0);
+			zfcp_erp_adapter_reopen_internal(adapter, 0, 71, NULL);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 		if (status == ZFCP_ERP_SUCCEEDED)
-			zfcp_erp_port_reopen_internal(port, 0);
+			zfcp_erp_port_reopen_internal(port, 0, 72, NULL);
 		else
-			zfcp_erp_adapter_reopen_internal(adapter, 0);
+			zfcp_erp_adapter_reopen_internal(adapter, 0, 73, NULL);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 		if (status == ZFCP_ERP_SUCCEEDED)
-			zfcp_erp_unit_reopen_all_internal(port, 0);
+			zfcp_erp_unit_reopen_all_internal(port, 0, 74, NULL);
 		else
-			zfcp_erp_port_forced_reopen_internal(port, 0);
+			zfcp_erp_port_forced_reopen_internal(port, 0, 75, NULL);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
 		/* Nothing to do if status == ZFCP_ERP_SUCCEEDED */
 		if (status != ZFCP_ERP_SUCCEEDED)
-			zfcp_erp_port_reopen_internal(unit->port, 0);
+			zfcp_erp_port_reopen_internal(unit->port, 0, 76, NULL);
 		break;
 	}
 
@@ -1704,12 +1631,10 @@
 	read_lock(&adapter->erp_lock);
 	if (list_empty(&adapter->erp_ready_head) &&
 	    list_empty(&adapter->erp_running_head)) {
-			debug_text_event(adapter->erp_dbf, 4, "a_cq_wake");
 			atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
 					  &adapter->status);
 			wake_up(&adapter->erp_done_wqh);
-	} else
-		debug_text_event(adapter->erp_dbf, 5, "a_cq_notempty");
+	}
 	read_unlock(&adapter->erp_lock);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
@@ -1733,29 +1658,27 @@
 	return retval;
 }
 
-void
-zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter,
-			       u32 mask, int set_or_clear)
+void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id,
+				    void *ref, u32 mask, int set_or_clear)
 {
 	struct zfcp_port *port;
-	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS;
 
 	if (set_or_clear == ZFCP_SET) {
-		atomic_set_mask(mask, &adapter->status);
-		debug_text_event(adapter->erp_dbf, 3, "a_mod_as_s");
+		changed = atomic_test_and_set_mask(mask, &adapter->status);
 	} else {
-		atomic_clear_mask(mask, &adapter->status);
+		changed = atomic_test_and_clear_mask(mask, &adapter->status);
 		if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
 			atomic_set(&adapter->erp_counter, 0);
-		debug_text_event(adapter->erp_dbf, 3, "a_mod_as_c");
 	}
-	debug_event(adapter->erp_dbf, 3, &mask, sizeof (u32));
+	if (changed)
+		zfcp_rec_dbf_event_adapter(id, ref, adapter);
 
 	/* Deal with all underlying devices, only pass common_mask */
 	if (common_mask)
 		list_for_each_entry(port, &adapter->port_list_head, list)
-		    zfcp_erp_modify_port_status(port, common_mask,
-						set_or_clear);
+			zfcp_erp_modify_port_status(port, id, ref, common_mask,
+						    set_or_clear);
 }
 
 /*
@@ -1764,29 +1687,27 @@
  * purpose:	sets the port and all underlying devices to ERP_FAILED
  *
  */
-void
-zfcp_erp_modify_port_status(struct zfcp_port *port, u32 mask, int set_or_clear)
+void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
+				 u32 mask, int set_or_clear)
 {
 	struct zfcp_unit *unit;
-	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS;
 
 	if (set_or_clear == ZFCP_SET) {
-		atomic_set_mask(mask, &port->status);
-		debug_text_event(port->adapter->erp_dbf, 3, "p_mod_ps_s");
+		changed = atomic_test_and_set_mask(mask, &port->status);
 	} else {
-		atomic_clear_mask(mask, &port->status);
+		changed = atomic_test_and_clear_mask(mask, &port->status);
 		if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
 			atomic_set(&port->erp_counter, 0);
-		debug_text_event(port->adapter->erp_dbf, 3, "p_mod_ps_c");
 	}
-	debug_event(port->adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t));
-	debug_event(port->adapter->erp_dbf, 3, &mask, sizeof (u32));
+	if (changed)
+		zfcp_rec_dbf_event_port(id, ref, port);
 
 	/* Modify status of all underlying devices, only pass common mask */
 	if (common_mask)
 		list_for_each_entry(unit, &port->unit_list_head, list)
-		    zfcp_erp_modify_unit_status(unit, common_mask,
-						set_or_clear);
+			zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
+						    set_or_clear);
 }
 
 /*
@@ -1795,22 +1716,21 @@
  * purpose:	sets the unit to ERP_FAILED
  *
  */
-void
-zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u32 mask, int set_or_clear)
+void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
+				 u32 mask, int set_or_clear)
 {
+	u32 changed;
+
 	if (set_or_clear == ZFCP_SET) {
-		atomic_set_mask(mask, &unit->status);
-		debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_s");
+		changed = atomic_test_and_set_mask(mask, &unit->status);
 	} else {
-		atomic_clear_mask(mask, &unit->status);
+		changed = atomic_test_and_clear_mask(mask, &unit->status);
 		if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
 			atomic_set(&unit->erp_counter, 0);
 		}
-		debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_c");
 	}
-	debug_event(unit->port->adapter->erp_dbf, 3, &unit->fcp_lun,
-		    sizeof (fcp_lun_t));
-	debug_event(unit->port->adapter->erp_dbf, 3, &mask, sizeof (u32));
+	if (changed)
+		zfcp_rec_dbf_event_unit(id, ref, unit);
 }
 
 /*
@@ -1822,30 +1742,32 @@
  * returns:	0	- initiated action successfully
  *		<0	- failed to initiate action
  */
-int
-zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask)
+int zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask,
+			     u8 id, void *ref)
 {
 	int retval;
 	unsigned long flags;
 
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	write_lock(&adapter->erp_lock);
-	retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask);
+	retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask, id,
+						   ref);
 	write_unlock(&adapter->erp_lock);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
 	return retval;
 }
 
-static int
-zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter, int clear_mask)
+static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter,
+					     int clear_mask, u8 id, void *ref)
 {
 	int retval = 0;
 	struct zfcp_port *port;
 
 	list_for_each_entry(port, &adapter->port_list_head, list)
 		if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
-			zfcp_erp_port_reopen_internal(port, clear_mask);
+			zfcp_erp_port_reopen_internal(port, clear_mask, id,
+						      ref);
 
 	return retval;
 }
@@ -1857,14 +1779,14 @@
  *
  * returns:	FIXME
  */
-static int
-zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port, int clear_mask)
+static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port,
+					     int clear_mask, u8 id, void *ref)
 {
 	int retval = 0;
 	struct zfcp_unit *unit;
 
 	list_for_each_entry(unit, &port->unit_list_head, list)
-	    zfcp_erp_unit_reopen_internal(unit, clear_mask);
+		zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref);
 
 	return retval;
 }
@@ -1892,10 +1814,6 @@
 	else
 		retval = zfcp_erp_adapter_strategy_open(erp_action);
 
-	debug_text_event(adapter->erp_dbf, 3, "a_ast/ret");
-	debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
-	debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
-
 	if (retval == ZFCP_ERP_FAILED) {
 		ZFCP_LOG_INFO("Waiting to allow the adapter %s "
 			      "to recover itself\n",
@@ -2021,7 +1939,6 @@
 			      zfcp_get_busid_by_adapter(adapter));
 		goto failed_qdio_establish;
 	}
-	debug_text_event(adapter->erp_dbf, 3, "qdio_est");
 
 	if (qdio_activate(adapter->ccw_device, 0) != 0) {
 		ZFCP_LOG_INFO("error: activation of QDIO queues failed "
@@ -2029,7 +1946,6 @@
 			      zfcp_get_busid_by_adapter(adapter));
 		goto failed_qdio_activate;
 	}
-	debug_text_event(adapter->erp_dbf, 3, "qdio_act");
 
 	/*
 	 * put buffers into response queue,
@@ -2077,11 +1993,9 @@
 	/* NOP */
 
  failed_qdio_activate:
-	debug_text_event(adapter->erp_dbf, 3, "qdio_down1a");
 	while (qdio_shutdown(adapter->ccw_device,
 			     QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
 		ssleep(1);
-	debug_text_event(adapter->erp_dbf, 3, "qdio_down1b");
 
  failed_qdio_establish:
  failed_sanity:
@@ -2127,14 +2041,12 @@
 		write_unlock_irq(&adapter->erp_lock);
 		if (zfcp_fsf_exchange_config_data(erp_action)) {
 			retval = ZFCP_ERP_FAILED;
-			debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
 			ZFCP_LOG_INFO("error:  initiation of exchange of "
 				      "configuration data failed for "
 				      "adapter %s\n",
 				      zfcp_get_busid_by_adapter(adapter));
 			break;
 		}
-		debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
 		ZFCP_LOG_DEBUG("Xchange underway\n");
 
 		/*
@@ -2150,7 +2062,9 @@
 		 * _must_ be the one belonging to the 'exchange config
 		 * data' request.
 		 */
+		zfcp_rec_dbf_event_thread(6, adapter, 1);
 		down(&adapter->erp_ready_sem);
+		zfcp_rec_dbf_event_thread(7, adapter, 1);
 		if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
 			ZFCP_LOG_INFO("error: exchange of configuration data "
 				      "for adapter %s timed out\n",
@@ -2198,16 +2112,15 @@
 
 	ret = zfcp_fsf_exchange_port_data(erp_action);
 	if (ret == -EOPNOTSUPP) {
-		debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
 		return ZFCP_ERP_SUCCEEDED;
 	} else if (ret) {
-		debug_text_event(adapter->erp_dbf, 3, "a_xport_failed");
 		return ZFCP_ERP_FAILED;
 	}
-	debug_text_event(adapter->erp_dbf, 6, "a_xport_ok");
 
 	ret = ZFCP_ERP_SUCCEEDED;
+	zfcp_rec_dbf_event_thread(8, adapter, 1);
 	down(&adapter->erp_ready_sem);
+	zfcp_rec_dbf_event_thread(9, adapter, 1);
 	if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
 		ZFCP_LOG_INFO("error: exchange port data timed out (adapter "
 			      "%s)\n", zfcp_get_busid_by_adapter(adapter));
@@ -2261,7 +2174,6 @@
 {
 	int retval = ZFCP_ERP_FAILED;
 	struct zfcp_port *port = erp_action->port;
-	struct zfcp_adapter *adapter = erp_action->adapter;
 
 	switch (erp_action->step) {
 
@@ -2298,11 +2210,6 @@
 		break;
 	}
 
-	debug_text_event(adapter->erp_dbf, 3, "p_pfst/ret");
-	debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t));
-	debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
-	debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
-
 	return retval;
 }
 
@@ -2320,7 +2227,6 @@
 {
 	int retval = ZFCP_ERP_FAILED;
 	struct zfcp_port *port = erp_action->port;
-	struct zfcp_adapter *adapter = erp_action->adapter;
 
 	switch (erp_action->step) {
 
@@ -2353,11 +2259,6 @@
 		retval = zfcp_erp_port_strategy_open(erp_action);
 
  out:
-	debug_text_event(adapter->erp_dbf, 3, "p_pst/ret");
-	debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t));
-	debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
-	debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
-
 	return retval;
 }
 
@@ -2395,7 +2296,7 @@
 						port->wwpn,
 						zfcp_get_busid_by_adapter(adapter),
 						adapter->peer_wwpn);
-				zfcp_erp_port_failed(port);
+				zfcp_erp_port_failed(port, 25, NULL);
 				retval = ZFCP_ERP_FAILED;
 				break;
 			}
@@ -2421,8 +2322,8 @@
 			/* nameserver port may live again */
 			atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
 					&adapter->nameserver_port->status);
-			if (zfcp_erp_port_reopen(adapter->nameserver_port, 0)
-			    >= 0) {
+			if (zfcp_erp_port_reopen(adapter->nameserver_port, 0,
+						 77, erp_action) >= 0) {
 				erp_action->step =
 					ZFCP_ERP_STEP_NAMESERVER_OPEN;
 				retval = ZFCP_ERP_CONTINUES;
@@ -2453,7 +2354,7 @@
 					       "for port 0x%016Lx "
 					       "(misconfigured WWPN?)\n",
 					       port->wwpn);
-				zfcp_erp_port_failed(port);
+				zfcp_erp_port_failed(port, 26, NULL);
 				retval = ZFCP_ERP_EXIT;
 			} else {
 				ZFCP_LOG_DEBUG("nameserver look-up failed for "
@@ -2549,17 +2450,12 @@
 	read_lock_irqsave(&adapter->erp_lock, flags);
 	list_for_each_entry_safe(erp_action, tmp, &adapter->erp_running_head,
 				 list) {
-		debug_text_event(adapter->erp_dbf, 4, "p_pstnsw_n");
-		debug_event(adapter->erp_dbf, 4, &erp_action->port->wwpn,
-			    sizeof (wwn_t));
 		if (erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
-			debug_text_event(adapter->erp_dbf, 3, "p_pstnsw_w");
-			debug_event(adapter->erp_dbf, 3,
-				    &erp_action->port->wwpn, sizeof (wwn_t));
 			if (atomic_test_mask(
 				    ZFCP_STATUS_COMMON_ERP_FAILED,
 				    &adapter->nameserver_port->status))
-				zfcp_erp_port_failed(erp_action->port);
+				zfcp_erp_port_failed(erp_action->port, 27,
+						     NULL);
 			zfcp_erp_action_ready(erp_action);
 		}
 	}
@@ -2580,26 +2476,18 @@
 zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action)
 {
 	int retval;
-	struct zfcp_adapter *adapter = erp_action->adapter;
-	struct zfcp_port *port = erp_action->port;
 
 	retval = zfcp_fsf_close_physical_port(erp_action);
 	if (retval == -ENOMEM) {
-		debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		retval = ZFCP_ERP_NOMEM;
 		goto out;
 	}
 	erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
 	if (retval != 0) {
-		debug_text_event(adapter->erp_dbf, 5, "o_pfstc_cpf");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		/* could not send 'open', fail */
 		retval = ZFCP_ERP_FAILED;
 		goto out;
 	}
-	debug_text_event(adapter->erp_dbf, 6, "o_pfstc_cpok");
-	debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
 	retval = ZFCP_ERP_CONTINUES;
  out:
 	return retval;
@@ -2609,10 +2497,6 @@
 zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
 {
 	int retval = 0;
-	struct zfcp_adapter *adapter = port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 5, "p_pstclst");
-	debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 
 	atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
 			  ZFCP_STATUS_COMMON_CLOSING |
@@ -2636,26 +2520,18 @@
 zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
 {
 	int retval;
-	struct zfcp_adapter *adapter = erp_action->adapter;
-	struct zfcp_port *port = erp_action->port;
 
 	retval = zfcp_fsf_close_port(erp_action);
 	if (retval == -ENOMEM) {
-		debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		retval = ZFCP_ERP_NOMEM;
 		goto out;
 	}
 	erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
 	if (retval != 0) {
-		debug_text_event(adapter->erp_dbf, 5, "p_pstc_cpf");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		/* could not send 'close', fail */
 		retval = ZFCP_ERP_FAILED;
 		goto out;
 	}
-	debug_text_event(adapter->erp_dbf, 6, "p_pstc_cpok");
-	debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
 	retval = ZFCP_ERP_CONTINUES;
  out:
 	return retval;
@@ -2673,26 +2549,18 @@
 zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
 {
 	int retval;
-	struct zfcp_adapter *adapter = erp_action->adapter;
-	struct zfcp_port *port = erp_action->port;
 
 	retval = zfcp_fsf_open_port(erp_action);
 	if (retval == -ENOMEM) {
-		debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		retval = ZFCP_ERP_NOMEM;
 		goto out;
 	}
 	erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
 	if (retval != 0) {
-		debug_text_event(adapter->erp_dbf, 5, "p_psto_opf");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		/* could not send 'open', fail */
 		retval = ZFCP_ERP_FAILED;
 		goto out;
 	}
-	debug_text_event(adapter->erp_dbf, 6, "p_psto_opok");
-	debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
 	retval = ZFCP_ERP_CONTINUES;
  out:
 	return retval;
@@ -2710,26 +2578,18 @@
 zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action)
 {
 	int retval;
-	struct zfcp_adapter *adapter = erp_action->adapter;
-	struct zfcp_port *port = erp_action->port;
 
 	retval = zfcp_ns_gid_pn_request(erp_action);
 	if (retval == -ENOMEM) {
-		debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		retval = ZFCP_ERP_NOMEM;
 		goto out;
 	}
 	erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
 	if (retval != 0) {
-		debug_text_event(adapter->erp_dbf, 5, "p_pstn_ref");
-		debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 		/* could not send nameserver request, fail */
 		retval = ZFCP_ERP_FAILED;
 		goto out;
 	}
-	debug_text_event(adapter->erp_dbf, 6, "p_pstn_reok");
-	debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
 	retval = ZFCP_ERP_CONTINUES;
  out:
 	return retval;
@@ -2750,7 +2610,6 @@
 {
 	int retval = ZFCP_ERP_FAILED;
 	struct zfcp_unit *unit = erp_action->unit;
-	struct zfcp_adapter *adapter = erp_action->adapter;
 
 	switch (erp_action->step) {
 
@@ -2797,10 +2656,6 @@
 		break;
 	}
 
-	debug_text_event(adapter->erp_dbf, 3, "u_ust/ret");
-	debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof (fcp_lun_t));
-	debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
-	debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
 	return retval;
 }
 
@@ -2808,10 +2663,6 @@
 zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
 {
 	int retval = 0;
-	struct zfcp_adapter *adapter = unit->port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 5, "u_ustclst");
-	debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
 
 	atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
 			  ZFCP_STATUS_COMMON_CLOSING |
@@ -2835,28 +2686,18 @@
 zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
 {
 	int retval;
-	struct zfcp_adapter *adapter = erp_action->adapter;
-	struct zfcp_unit *unit = erp_action->unit;
 
 	retval = zfcp_fsf_close_unit(erp_action);
 	if (retval == -ENOMEM) {
-		debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem");
-		debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
-			    sizeof (fcp_lun_t));
 		retval = ZFCP_ERP_NOMEM;
 		goto out;
 	}
 	erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
 	if (retval != 0) {
-		debug_text_event(adapter->erp_dbf, 5, "u_ustc_cuf");
-		debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
-			    sizeof (fcp_lun_t));
 		/* could not send 'close', fail */
 		retval = ZFCP_ERP_FAILED;
 		goto out;
 	}
-	debug_text_event(adapter->erp_dbf, 6, "u_ustc_cuok");
-	debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
 	retval = ZFCP_ERP_CONTINUES;
 
  out:
@@ -2875,28 +2716,18 @@
 zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
 {
 	int retval;
-	struct zfcp_adapter *adapter = erp_action->adapter;
-	struct zfcp_unit *unit = erp_action->unit;
 
 	retval = zfcp_fsf_open_unit(erp_action);
 	if (retval == -ENOMEM) {
-		debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem");
-		debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
-			    sizeof (fcp_lun_t));
 		retval = ZFCP_ERP_NOMEM;
 		goto out;
 	}
 	erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
 	if (retval != 0) {
-		debug_text_event(adapter->erp_dbf, 5, "u_usto_ouf");
-		debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
-			    sizeof (fcp_lun_t));
 		/* could not send 'open', fail */
 		retval = ZFCP_ERP_FAILED;
 		goto out;
 	}
-	debug_text_event(adapter->erp_dbf, 6, "u_usto_ouok");
-	debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
 	retval = ZFCP_ERP_CONTINUES;
  out:
 	return retval;
@@ -2918,14 +2749,12 @@
  *
  * returns:
  */
-static int
-zfcp_erp_action_enqueue(int action,
-			struct zfcp_adapter *adapter,
-			struct zfcp_port *port, struct zfcp_unit *unit)
+static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+				   struct zfcp_port *port,
+				   struct zfcp_unit *unit, u8 id, void *ref)
 {
-	int retval = 1;
+	int retval = 1, need = want;
 	struct zfcp_erp_action *erp_action = NULL;
-	int stronger_action = 0;
 	u32 status = 0;
 
 	/*
@@ -2944,17 +2773,11 @@
 			      &adapter->status))
 		return -EIO;
 
-	debug_event(adapter->erp_dbf, 4, &action, sizeof (int));
 	/* check whether we really need this */
-	switch (action) {
+	switch (want) {
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
 		if (atomic_test_mask
 		    (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) {
-			debug_text_event(adapter->erp_dbf, 4, "u_actenq_drp");
-			debug_event(adapter->erp_dbf, 4, &port->wwpn,
-				    sizeof (wwn_t));
-			debug_event(adapter->erp_dbf, 4, &unit->fcp_lun,
-				    sizeof (fcp_lun_t));
 			goto out;
 		}
 		if (!atomic_test_mask
@@ -2964,18 +2787,13 @@
 			goto out;
 		}
 		if (!atomic_test_mask
-		    (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) {
-			stronger_action = ZFCP_ERP_ACTION_REOPEN_PORT;
-			unit = NULL;
-		}
+		    (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
+			need = ZFCP_ERP_ACTION_REOPEN_PORT;
 		/* fall through !!! */
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 		if (atomic_test_mask
 		    (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) {
-			debug_text_event(adapter->erp_dbf, 4, "p_actenq_drp");
-			debug_event(adapter->erp_dbf, 4, &port->wwpn,
-				    sizeof (wwn_t));
 			goto out;
 		}
 		/* fall through !!! */
@@ -2987,15 +2805,9 @@
 			    ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
 				ZFCP_LOG_INFO("dropped erp action %i (port "
 					      "0x%016Lx, action in use: %i)\n",
-					      action, port->wwpn,
+					      want, port->wwpn,
 					      port->erp_action.action);
-				debug_text_event(adapter->erp_dbf, 4,
-						 "pf_actenq_drp");
-			} else
-				debug_text_event(adapter->erp_dbf, 4,
-						 "pf_actenq_drpcp");
-			debug_event(adapter->erp_dbf, 4, &port->wwpn,
-				    sizeof (wwn_t));
+			}
 			goto out;
 		}
 		if (!atomic_test_mask
@@ -3005,46 +2817,36 @@
 			goto out;
 		}
 		if (!atomic_test_mask
-		    (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) {
-			stronger_action = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
-			port = NULL;
-		}
+		    (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
+			need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
 		/* fall through !!! */
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		if (atomic_test_mask
 		    (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) {
-			debug_text_event(adapter->erp_dbf, 4, "a_actenq_drp");
 			goto out;
 		}
 		break;
 
 	default:
-		debug_text_exception(adapter->erp_dbf, 1, "a_actenq_bug");
-		debug_event(adapter->erp_dbf, 1, &action, sizeof (int));
 		ZFCP_LOG_NORMAL("bug: unknown erp action requested "
 				"on adapter %s (action=%d)\n",
-				zfcp_get_busid_by_adapter(adapter), action);
+				zfcp_get_busid_by_adapter(adapter), want);
 		goto out;
 	}
 
 	/* check whether we need something stronger first */
-	if (stronger_action) {
-		debug_text_event(adapter->erp_dbf, 4, "a_actenq_str");
-		debug_event(adapter->erp_dbf, 4, &stronger_action,
-			    sizeof (int));
+	if (need) {
 		ZFCP_LOG_DEBUG("stronger erp action %d needed before "
 			       "erp action %d on adapter %s\n",
-			       stronger_action, action,
-			       zfcp_get_busid_by_adapter(adapter));
-		action = stronger_action;
+			       need, want, zfcp_get_busid_by_adapter(adapter));
 	}
 
 	/* mark adapter to have some error recovery pending */
 	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
 
 	/* setup error recovery action */
-	switch (action) {
+	switch (need) {
 
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
 		zfcp_unit_get(unit);
@@ -3077,13 +2879,11 @@
 		break;
 	}
 
-	debug_text_event(adapter->erp_dbf, 4, "a_actenq");
-
 	memset(erp_action, 0, sizeof (struct zfcp_erp_action));
 	erp_action->adapter = adapter;
 	erp_action->port = port;
 	erp_action->unit = unit;
-	erp_action->action = action;
+	erp_action->action = need;
 	erp_action->status = status;
 
 	++adapter->erp_total_count;
@@ -3091,8 +2891,11 @@
 	/* finally put it into 'ready' queue and kick erp thread */
 	list_add_tail(&erp_action->list, &adapter->erp_ready_head);
 	up(&adapter->erp_ready_sem);
+	zfcp_rec_dbf_event_thread(1, adapter, 0);
 	retval = 0;
  out:
+	zfcp_rec_dbf_event_trigger(id, ref, want, need, erp_action,
+				   adapter, port, unit);
 	return retval;
 }
 
@@ -3108,9 +2911,9 @@
 		erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
 	}
 
-	debug_text_event(adapter->erp_dbf, 4, "a_actdeq");
-	debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof (int));
 	list_del(&erp_action->list);
+	zfcp_rec_dbf_event_action(144, erp_action);
+
 	switch (erp_action->action) {
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
 		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
@@ -3215,7 +3018,6 @@
 {
 	struct zfcp_port *port;
 
-	debug_text_event(adapter->erp_dbf, 5, "a_actab");
 	if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status))
 		zfcp_erp_action_dismiss(&adapter->erp_action);
 	else
@@ -3226,10 +3028,7 @@
 static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
 {
 	struct zfcp_unit *unit;
-	struct zfcp_adapter *adapter = port->adapter;
 
-	debug_text_event(adapter->erp_dbf, 5, "p_actab");
-	debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
 	if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status))
 		zfcp_erp_action_dismiss(&port->erp_action);
 	else
@@ -3239,92 +3038,60 @@
 
 static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
 {
-	struct zfcp_adapter *adapter = unit->port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 5, "u_actab");
-	debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
 	if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
 		zfcp_erp_action_dismiss(&unit->erp_action);
 }
 
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
 {
-	struct zfcp_adapter *adapter = erp_action->adapter;
-
-	debug_text_event(adapter->erp_dbf, 6, "a_toru");
-	debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int));
 	list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
+	zfcp_rec_dbf_event_action(145, erp_action);
 }
 
 static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
 {
-	struct zfcp_adapter *adapter = erp_action->adapter;
-
-	debug_text_event(adapter->erp_dbf, 6, "a_tore");
-	debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int));
 	list_move(&erp_action->list, &erp_action->adapter->erp_ready_head);
+	zfcp_rec_dbf_event_action(146, erp_action);
 }
 
-void
-zfcp_erp_port_boxed(struct zfcp_port *port)
+void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
 {
-	struct zfcp_adapter *adapter = port->adapter;
 	unsigned long flags;
 
-	debug_text_event(adapter->erp_dbf, 3, "p_access_boxed");
-	debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	zfcp_erp_modify_port_status(port,
-			ZFCP_STATUS_COMMON_ACCESS_BOXED,
-			ZFCP_SET);
+	zfcp_erp_modify_port_status(port, id, ref,
+				    ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
 }
 
-void
-zfcp_erp_unit_boxed(struct zfcp_unit *unit)
+void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
 {
-	struct zfcp_adapter *adapter = unit->port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 3, "u_access_boxed");
-	debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t));
-	zfcp_erp_modify_unit_status(unit,
-			ZFCP_STATUS_COMMON_ACCESS_BOXED,
-			ZFCP_SET);
-	zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED);
+	zfcp_erp_modify_unit_status(unit, id, ref,
+				    ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
+	zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
 }
 
-void
-zfcp_erp_port_access_denied(struct zfcp_port *port)
+void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
 {
-	struct zfcp_adapter *adapter = port->adapter;
 	unsigned long flags;
 
-	debug_text_event(adapter->erp_dbf, 3, "p_access_denied");
-	debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	zfcp_erp_modify_port_status(port,
-			ZFCP_STATUS_COMMON_ERP_FAILED |
-			ZFCP_STATUS_COMMON_ACCESS_DENIED,
-			ZFCP_SET);
+	zfcp_erp_modify_port_status(port, id, ref,
+				    ZFCP_STATUS_COMMON_ERP_FAILED |
+				    ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 }
 
-void
-zfcp_erp_unit_access_denied(struct zfcp_unit *unit)
+void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref)
 {
-	struct zfcp_adapter *adapter = unit->port->adapter;
-
-	debug_text_event(adapter->erp_dbf, 3, "u_access_denied");
-	debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t));
-	zfcp_erp_modify_unit_status(unit,
-			ZFCP_STATUS_COMMON_ERP_FAILED |
-			ZFCP_STATUS_COMMON_ACCESS_DENIED,
-			ZFCP_SET);
+	zfcp_erp_modify_unit_status(unit, id, ref,
+				    ZFCP_STATUS_COMMON_ERP_FAILED |
+				    ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
 }
 
-void
-zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter)
+void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
+				     void *ref)
 {
 	struct zfcp_port *port;
 	unsigned long flags;
@@ -3332,54 +3099,43 @@
 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
 		return;
 
-	debug_text_event(adapter->erp_dbf, 3, "a_access_recover");
-	debug_event(adapter->erp_dbf, 3, zfcp_get_busid_by_adapter(adapter), 8);
-
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	if (adapter->nameserver_port)
-		zfcp_erp_port_access_changed(adapter->nameserver_port);
+		zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
 	list_for_each_entry(port, &adapter->port_list_head, list)
 		if (port != adapter->nameserver_port)
-			zfcp_erp_port_access_changed(port);
+			zfcp_erp_port_access_changed(port, id, ref);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 }
 
-void
-zfcp_erp_port_access_changed(struct zfcp_port *port)
+void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id, void *ref)
 {
 	struct zfcp_adapter *adapter = port->adapter;
 	struct zfcp_unit *unit;
 
-	debug_text_event(adapter->erp_dbf, 3, "p_access_recover");
-	debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
-
 	if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
 			      &port->status) &&
 	    !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
 			      &port->status)) {
 		if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
 			list_for_each_entry(unit, &port->unit_list_head, list)
-				zfcp_erp_unit_access_changed(unit);
+				zfcp_erp_unit_access_changed(unit, id, ref);
 		return;
 	}
 
 	ZFCP_LOG_NORMAL("reopen of port 0x%016Lx on adapter %s "
 			"(due to ACT update)\n",
 			port->wwpn, zfcp_get_busid_by_adapter(adapter));
-	if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+	if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref))
 		ZFCP_LOG_NORMAL("failed reopen of port"
 				"(adapter %s, wwpn=0x%016Lx)\n",
 				zfcp_get_busid_by_adapter(adapter), port->wwpn);
 }
 
-void
-zfcp_erp_unit_access_changed(struct zfcp_unit *unit)
+void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id, void *ref)
 {
 	struct zfcp_adapter *adapter = unit->port->adapter;
 
-	debug_text_event(adapter->erp_dbf, 3, "u_access_recover");
-	debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t));
-
 	if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
 			      &unit->status) &&
 	    !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
@@ -3390,7 +3146,7 @@
 			" on adapter %s (due to ACT update)\n",
 			unit->fcp_lun, unit->port->wwpn,
 			zfcp_get_busid_by_adapter(adapter));
-	if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+	if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref))
 		ZFCP_LOG_NORMAL("failed reopen of unit (adapter %s, "
 				"wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n",
 				zfcp_get_busid_by_adapter(adapter),
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 06b1079..6abf178 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -131,22 +131,25 @@
 extern struct fc_function_template zfcp_transport_functions;
 
 /******************************** ERP ****************************************/
-extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
-extern int  zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
-extern int  zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
-extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
+extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
+					   u32, int);
+extern int  zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
+extern int  zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
+extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
 
-extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
-extern int  zfcp_erp_port_reopen(struct zfcp_port *, int);
-extern int  zfcp_erp_port_shutdown(struct zfcp_port *, int);
-extern int  zfcp_erp_port_forced_reopen(struct zfcp_port *, int);
-extern void zfcp_erp_port_failed(struct zfcp_port *);
-extern int  zfcp_erp_port_reopen_all(struct zfcp_adapter *, int);
+extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
+					int);
+extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
+extern int  zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
+extern int  zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
+extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
+extern int  zfcp_erp_port_reopen_all(struct zfcp_adapter *, int, u8, void *);
 
-extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u32, int);
-extern int  zfcp_erp_unit_reopen(struct zfcp_unit *, int);
-extern int  zfcp_erp_unit_shutdown(struct zfcp_unit *, int);
-extern void zfcp_erp_unit_failed(struct zfcp_unit *);
+extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
+					int);
+extern int  zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
+extern int  zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
+extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
 
 extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
 extern int  zfcp_erp_thread_kill(struct zfcp_adapter *);
@@ -155,15 +158,25 @@
 
 extern int  zfcp_test_link(struct zfcp_port *);
 
-extern void zfcp_erp_port_boxed(struct zfcp_port *);
-extern void zfcp_erp_unit_boxed(struct zfcp_unit *);
-extern void zfcp_erp_port_access_denied(struct zfcp_port *);
-extern void zfcp_erp_unit_access_denied(struct zfcp_unit *);
-extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *);
-extern void zfcp_erp_port_access_changed(struct zfcp_port *);
-extern void zfcp_erp_unit_access_changed(struct zfcp_unit *);
+extern void zfcp_erp_port_boxed(struct zfcp_port *, u8 id, void *ref);
+extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8 id, void *ref);
+extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8 id, void *ref);
+extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8 id, void *ref);
+extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
+extern void zfcp_erp_port_access_changed(struct zfcp_port *, u8, void *);
+extern void zfcp_erp_unit_access_changed(struct zfcp_unit *, u8, void *);
 
 /******************************** AUX ****************************************/
+extern void zfcp_rec_dbf_event_thread(u8 id, struct zfcp_adapter *adapter,
+				      int lock);
+extern void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *);
+extern void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port);
+extern void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit);
+extern void zfcp_rec_dbf_event_trigger(u8 id, void *ref, u8 want, u8 need,
+				       void *action, struct zfcp_adapter *,
+				       struct zfcp_port *, struct zfcp_unit *);
+extern void zfcp_rec_dbf_event_action(u8 id, struct zfcp_erp_action *);
+
 extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
 extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
 					 struct fsf_status_read_buffer *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 0dff058..7c3f028 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -46,7 +46,7 @@
 static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
 static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
 static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
-static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
+static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *, u8,
 	struct fsf_link_down_info *);
 static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
 
@@ -284,37 +284,6 @@
 		goto skip_protstatus;
 	}
 
-	/* log additional information provided by FSF (if any) */
-	if (likely(qtcb->header.log_length)) {
-		/* do not trust them ;-) */
-		if (unlikely(qtcb->header.log_start >
-			     sizeof(struct fsf_qtcb))) {
-			ZFCP_LOG_NORMAL
-			    ("bug: ULP (FSF logging) log data starts "
-			     "beyond end of packet header. Ignored. "
-			     "(start=%i, size=%li)\n",
-			     qtcb->header.log_start,
-			     sizeof(struct fsf_qtcb));
-			goto forget_log;
-		}
-		if (unlikely((size_t) (qtcb->header.log_start +
-				       qtcb->header.log_length) >
-			     sizeof(struct fsf_qtcb))) {
-			ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
-					"beyond end of packet header. Ignored. "
-					"(start=%i, length=%i, size=%li)\n",
-					qtcb->header.log_start,
-					qtcb->header.log_length,
-					sizeof(struct fsf_qtcb));
-			goto forget_log;
-		}
-		ZFCP_LOG_TRACE("ULP log data: \n");
-		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
-			      (char *) qtcb + qtcb->header.log_start,
-			      qtcb->header.log_length);
-	}
- forget_log:
-
 	/* evaluate FSF Protocol Status */
 	switch (qtcb->prefix.prot_status) {
 
@@ -329,7 +298,7 @@
 				zfcp_get_busid_by_adapter(adapter),
 				prot_status_qual->version_error.fsf_version,
 				ZFCP_QTCB_VERSION);
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 117, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -340,7 +309,7 @@
 				qtcb->prefix.req_seq_no,
 				zfcp_get_busid_by_adapter(adapter),
 				prot_status_qual->sequence_error.exp_req_seq_no);
-		zfcp_erp_adapter_reopen(adapter, 0);
+		zfcp_erp_adapter_reopen(adapter, 0, 98, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
@@ -351,7 +320,7 @@
 				"that used on adapter %s. "
 				"Stopping all operations on this adapter.\n",
 				zfcp_get_busid_by_adapter(adapter));
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 118, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -368,14 +337,15 @@
 				*(unsigned long long*)
 				(&qtcb->bottom.support.req_handle),
 					zfcp_get_busid_by_adapter(adapter));
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 78, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
 	case FSF_PROT_LINK_DOWN:
-		zfcp_fsf_link_down_info_eval(adapter,
+		zfcp_fsf_link_down_info_eval(fsf_req, 37,
 					     &prot_status_qual->link_down_info);
-		zfcp_erp_adapter_reopen(adapter, 0);
+		/* FIXME: reopening adapter now? better wait for link up */
+		zfcp_erp_adapter_reopen(adapter, 0, 79, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -385,12 +355,13 @@
 			      "Re-starting operations on this adapter.\n",
 			      zfcp_get_busid_by_adapter(adapter));
 		/* All ports should be marked as ready to run again */
-		zfcp_erp_modify_adapter_status(adapter,
+		zfcp_erp_modify_adapter_status(adapter, 28, NULL,
 					       ZFCP_STATUS_COMMON_RUNNING,
 					       ZFCP_SET);
 		zfcp_erp_adapter_reopen(adapter,
 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
-					| ZFCP_STATUS_COMMON_ERP_FAILED);
+					| ZFCP_STATUS_COMMON_ERP_FAILED,
+					99, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -400,7 +371,7 @@
 				"Restarting all operations on this "
 				"adapter.\n",
 				zfcp_get_busid_by_adapter(adapter));
-		zfcp_erp_adapter_reopen(adapter, 0);
+		zfcp_erp_adapter_reopen(adapter, 0, 100, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
@@ -413,7 +384,7 @@
 				"(debug info 0x%x).\n",
 				zfcp_get_busid_by_adapter(adapter),
 				qtcb->prefix.prot_status);
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 119, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 	}
 
@@ -452,7 +423,7 @@
 				"(debug info 0x%x).\n",
 				zfcp_get_busid_by_adapter(fsf_req->adapter),
 				fsf_req->qtcb->header.fsf_command);
-		zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
+		zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 120, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -506,7 +477,7 @@
 				"problem on the adapter %s "
 				"Stopping all operations on this adapter. ",
 				zfcp_get_busid_by_adapter(fsf_req->adapter));
-		zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
+		zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 121, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_SQ_ULP_PROGRAMMING_ERROR:
@@ -537,9 +508,11 @@
  * zfcp_fsf_link_down_info_eval - evaluate link down information block
  */
 static void
-zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
+zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *fsf_req, u8 id,
 			     struct fsf_link_down_info *link_down)
 {
+	struct zfcp_adapter *adapter = fsf_req->adapter;
+
 	if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
 	                     &adapter->status))
 		return;
@@ -630,7 +603,7 @@
 				link_down->vendor_specific_code);
 
  out:
-	zfcp_erp_adapter_failed(adapter);
+	zfcp_erp_adapter_failed(adapter, id, fsf_req);
 }
 
 /*
@@ -824,19 +797,14 @@
 	switch (status_buffer->status_subtype) {
 
 	case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
-		debug_text_event(adapter->erp_dbf, 3, "unsol_pc_phys:");
-		zfcp_erp_port_reopen(port, 0);
+		zfcp_erp_port_reopen(port, 0, 101, fsf_req);
 		break;
 
 	case FSF_STATUS_READ_SUB_ERROR_PORT:
-		debug_text_event(adapter->erp_dbf, 1, "unsol_pc_err:");
-		zfcp_erp_port_shutdown(port, 0);
+		zfcp_erp_port_shutdown(port, 0, 122, fsf_req);
 		break;
 
 	default:
-		debug_text_event(adapter->erp_dbf, 0, "unsol_unk_sub:");
-		debug_exception(adapter->erp_dbf, 0,
-				&status_buffer->status_subtype, sizeof (u32));
 		ZFCP_LOG_NORMAL("bug: Undefined status subtype received "
 				"for a reopen indication on port with "
 				"d_id 0x%06x on the adapter %s. "
@@ -928,7 +896,7 @@
 		case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
 			ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
 				      zfcp_get_busid_by_adapter(adapter));
-			zfcp_fsf_link_down_info_eval(adapter,
+			zfcp_fsf_link_down_info_eval(fsf_req, 38,
 				(struct fsf_link_down_info *)
 				&status_buffer->payload);
 			break;
@@ -936,7 +904,7 @@
 			ZFCP_LOG_INFO("Local link to adapter %s is down "
 				      "due to failed FDISC login\n",
 				      zfcp_get_busid_by_adapter(adapter));
-			zfcp_fsf_link_down_info_eval(adapter,
+			zfcp_fsf_link_down_info_eval(fsf_req, 39,
 				(struct fsf_link_down_info *)
 				&status_buffer->payload);
 			break;
@@ -944,13 +912,13 @@
 			ZFCP_LOG_INFO("Local link to adapter %s is down "
 				      "due to firmware update on adapter\n",
 				      zfcp_get_busid_by_adapter(adapter));
-			zfcp_fsf_link_down_info_eval(adapter, NULL);
+			zfcp_fsf_link_down_info_eval(fsf_req, 40, NULL);
 			break;
 		default:
 			ZFCP_LOG_INFO("Local link to adapter %s is down "
 				      "due to unknown reason\n",
 				      zfcp_get_busid_by_adapter(adapter));
-			zfcp_fsf_link_down_info_eval(adapter, NULL);
+			zfcp_fsf_link_down_info_eval(fsf_req, 41, NULL);
 		};
 		break;
 
@@ -959,12 +927,13 @@
 				"Restarting operations on this adapter\n",
 				zfcp_get_busid_by_adapter(adapter));
 		/* All ports should be marked as ready to run again */
-		zfcp_erp_modify_adapter_status(adapter,
+		zfcp_erp_modify_adapter_status(adapter, 30, NULL,
 					       ZFCP_STATUS_COMMON_RUNNING,
 					       ZFCP_SET);
 		zfcp_erp_adapter_reopen(adapter,
 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
-					| ZFCP_STATUS_COMMON_ERP_FAILED);
+					| ZFCP_STATUS_COMMON_ERP_FAILED,
+					102, fsf_req);
 		break;
 
 	case FSF_STATUS_READ_NOTIFICATION_LOST:
@@ -998,13 +967,13 @@
 
 		if (status_buffer->status_subtype &
 		    FSF_STATUS_READ_SUB_ACT_UPDATED)
-			zfcp_erp_adapter_access_changed(adapter);
+			zfcp_erp_adapter_access_changed(adapter, 135, fsf_req);
 		break;
 
 	case FSF_STATUS_READ_CFDC_UPDATED:
 		ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n",
 			      zfcp_get_busid_by_adapter(adapter));
-		zfcp_erp_adapter_access_changed(adapter);
+		zfcp_erp_adapter_access_changed(adapter, 136, fsf_req);
 		break;
 
 	case FSF_STATUS_READ_CFDC_HARDENED:
@@ -1025,7 +994,6 @@
 		break;
 
 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
-		debug_text_event(adapter->erp_dbf, 2, "unsol_features:");
 		ZFCP_LOG_INFO("List of supported features on adapter %s has "
 			      "been changed from 0x%08X to 0x%08X\n",
 			      zfcp_get_busid_by_adapter(adapter),
@@ -1073,7 +1041,7 @@
 			ZFCP_LOG_INFO("restart adapter %s due to status read "
 				      "buffer shortage\n",
 				      zfcp_get_busid_by_adapter(adapter));
-			zfcp_erp_adapter_reopen(adapter, 0);
+			zfcp_erp_adapter_reopen(adapter, 0, 103, fsf_req);
 		}
 	}
  out:
@@ -1174,8 +1142,6 @@
 
 	case FSF_PORT_HANDLE_NOT_VALID:
 		if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) {
-			debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
-					 "fsf_s_phand_nv0");
 			/*
 			 * In this case a command that was sent prior to a port
 			 * reopen was aborted (handles are different). This is
@@ -1194,17 +1160,14 @@
 				      fsf_status_qual,
 				      sizeof (union fsf_status_qual));
 			/* Let's hope this sorts out the mess */
-			debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
-					 "fsf_s_phand_nv1");
-			zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+			zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104,
+						new_fsf_req);
 			new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		}
 		break;
 
 	case FSF_LUN_HANDLE_NOT_VALID:
 		if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) {
-			debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
-					 "fsf_s_lhand_nv0");
 			/*
 			 * In this case a command that was sent prior to a unit
 			 * reopen was aborted (handles are different).
@@ -1226,17 +1189,13 @@
 				      fsf_status_qual,
 				      sizeof (union fsf_status_qual));
 			/* Let's hope this sorts out the mess */
-			debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
-					 "fsf_s_lhand_nv1");
-			zfcp_erp_port_reopen(unit->port, 0);
+			zfcp_erp_port_reopen(unit->port, 0, 105, new_fsf_req);
 			new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		}
 		break;
 
 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
 		retval = 0;
-		debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
-				 "fsf_s_no_exist");
 		new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
 		break;
 
@@ -1244,9 +1203,7 @@
 		ZFCP_LOG_INFO("Remote port 0x%016Lx on adapter %s needs to "
 			      "be reopened\n", unit->port->wwpn,
 			      zfcp_get_busid_by_unit(unit));
-		debug_text_event(new_fsf_req->adapter->erp_dbf, 2,
-				 "fsf_s_pboxed");
-		zfcp_erp_port_boxed(unit->port);
+		zfcp_erp_port_boxed(unit->port, 47, new_fsf_req);
 		new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
 		    | ZFCP_STATUS_FSFREQ_RETRY;
 		break;
@@ -1257,8 +1214,7 @@
                         "to be reopened\n",
                         unit->fcp_lun, unit->port->wwpn,
                         zfcp_get_busid_by_unit(unit));
-                debug_text_event(new_fsf_req->adapter->erp_dbf, 1, "fsf_s_lboxed");
-		zfcp_erp_unit_boxed(unit);
+		zfcp_erp_unit_boxed(unit, 48, new_fsf_req);
                 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
                         | ZFCP_STATUS_FSFREQ_RETRY;
                 break;
@@ -1266,26 +1222,17 @@
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-			debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ltest");
 			zfcp_test_link(unit->port);
 			new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			/* SCSI stack will escalate */
-			debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ulp");
 			new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		default:
 			ZFCP_LOG_NORMAL
 			    ("bug: Wrong status qualifier 0x%x arrived.\n",
 			     new_fsf_req->qtcb->header.fsf_status_qual.word[0]);
-			debug_text_event(new_fsf_req->adapter->erp_dbf, 0,
-					 "fsf_sq_inval:");
-			debug_exception(new_fsf_req->adapter->erp_dbf, 0,
-					&new_fsf_req->qtcb->header.
-					fsf_status_qual.word[0], sizeof (u32));
 			break;
 		}
 		break;
@@ -1299,11 +1246,6 @@
 		ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
 				"(debug info 0x%x)\n",
 				new_fsf_req->qtcb->header.fsf_status);
-		debug_text_event(new_fsf_req->adapter->erp_dbf, 0,
-				 "fsf_s_inval:");
-		debug_exception(new_fsf_req->adapter->erp_dbf, 0,
-				&new_fsf_req->qtcb->header.fsf_status,
-				sizeof (u32));
 		break;
 	}
  skip_fsfstatus:
@@ -1506,8 +1448,7 @@
 			      zfcp_get_busid_by_port(port),
 			      ZFCP_FC_SERVICE_CLASS_DEFAULT);
 		/* stop operation for this adapter */
-		debug_text_exception(adapter->erp_dbf, 0, "fsf_s_class_nsup");
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 123, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -1515,13 +1456,11 @@
                 switch (header->fsf_status_qual.word[0]){
                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			/* reopening link to port */
-			debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ltest");
 			zfcp_test_link(port);
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			/* ERP strategy will escalate */
-			debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ulp");
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
                 default:
@@ -1549,8 +1488,7 @@
 				break;
 			}
 		}
-		debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
-		zfcp_erp_port_access_denied(port);
+		zfcp_erp_port_access_denied(port, 55, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -1562,7 +1500,6 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
 			      (char *) &header->fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(adapter->erp_dbf, 1, "fsf_s_gcom_rej");
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -1575,8 +1512,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
 			      (char *) &header->fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(adapter->erp_dbf, 1, "fsf_s_phandle_nv");
-		zfcp_erp_adapter_reopen(adapter, 0);
+		zfcp_erp_adapter_reopen(adapter, 0, 106, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -1584,8 +1520,7 @@
 		ZFCP_LOG_INFO("port needs to be reopened "
 			      "(adapter %s, port d_id=0x%06x)\n",
 			      zfcp_get_busid_by_port(port), port->d_id);
-		debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed");
-		zfcp_erp_port_boxed(port);
+		zfcp_erp_port_boxed(port, 49, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
 		    | ZFCP_STATUS_FSFREQ_RETRY;
 		break;
@@ -1624,9 +1559,6 @@
        default:
 		ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
 				"(debug info 0x%x)\n", header->fsf_status);
-		debug_text_event(adapter->erp_dbf, 0, "fsf_sq_inval:");
-		debug_exception(adapter->erp_dbf, 0,
-				&header->fsf_status_qual.word[0], sizeof (u32));
 		break;
 	}
 
@@ -1810,21 +1742,18 @@
 			      zfcp_get_busid_by_adapter(adapter),
 			      ZFCP_FC_SERVICE_CLASS_DEFAULT);
 		/* stop operation for this adapter */
-		debug_text_exception(adapter->erp_dbf, 0, "fsf_s_class_nsup");
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 124, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (header->fsf_status_qual.word[0]){
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-			debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ltest");
 			if (port && (send_els->ls_code != ZFCP_LS_ADISC))
 				zfcp_test_link(port);
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-			debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ulp");
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			retval =
 			  zfcp_handle_els_rjt(header->fsf_status_qual.word[1],
@@ -1832,7 +1761,6 @@
 					      &header->fsf_status_qual.word[2]);
 			break;
 		case FSF_SQ_RETRY_IF_POSSIBLE:
-			debug_text_event(adapter->erp_dbf, 1, "fsf_sq_retry");
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		default:
@@ -1909,9 +1837,8 @@
 				break;
 			}
 		}
-		debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
 		if (port != NULL)
-			zfcp_erp_port_access_denied(port);
+			zfcp_erp_port_access_denied(port, 56, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -1921,9 +1848,6 @@
 			"(adapter: %s, fsf_status=0x%08x)\n",
 			zfcp_get_busid_by_adapter(adapter),
 			header->fsf_status);
-		debug_text_event(adapter->erp_dbf, 0, "fsf_sq_inval");
-		debug_exception(adapter->erp_dbf, 0,
-			&header->fsf_status_qual.word[0], sizeof(u32));
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	}
@@ -2132,8 +2056,7 @@
 				"versions in comparison to this device "
 				"driver (try updated device driver)\n",
 				zfcp_get_busid_by_adapter(adapter));
-		debug_text_event(adapter->erp_dbf, 0, "low_qtcb_ver");
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 125, fsf_req);
 		return -EIO;
 	}
 	if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) {
@@ -2142,8 +2065,7 @@
 				"versions than this device driver uses"
 				"(consider a microcode upgrade)\n",
 				zfcp_get_busid_by_adapter(adapter));
-		debug_text_event(adapter->erp_dbf, 0, "high_qtcb_ver");
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 126, fsf_req);
 		return -EIO;
 	}
 	return 0;
@@ -2183,17 +2105,13 @@
 					adapter->peer_wwnn,
 					adapter->peer_wwpn,
 					adapter->peer_d_id);
-			debug_text_event(fsf_req->adapter->erp_dbf, 0,
-					"top-p-to-p");
 			break;
 		case FC_PORTTYPE_NLPORT:
 			ZFCP_LOG_NORMAL("error: Arbitrated loop fibrechannel "
 					"topology detected at adapter %s "
 					"unsupported, shutting down adapter\n",
 					zfcp_get_busid_by_adapter(adapter));
-			debug_text_event(fsf_req->adapter->erp_dbf, 0,
-					 "top-al");
-			zfcp_erp_adapter_shutdown(adapter, 0);
+			zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req);
 			return -EIO;
 		case FC_PORTTYPE_NPORT:
 			ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
@@ -2208,9 +2126,7 @@
 					"of a type known to the zfcp "
 					"driver, shutting down adapter\n",
 					zfcp_get_busid_by_adapter(adapter));
-			debug_text_exception(fsf_req->adapter->erp_dbf, 0,
-					     "unknown-topo");
-			zfcp_erp_adapter_shutdown(adapter, 0);
+			zfcp_erp_adapter_shutdown(adapter, 0, 128, fsf_req);
 			return -EIO;
 		}
 		bottom = &qtcb->bottom.config;
@@ -2222,33 +2138,24 @@
 					bottom->max_qtcb_size,
 					zfcp_get_busid_by_adapter(adapter),
 					sizeof(struct fsf_qtcb));
-			debug_text_event(fsf_req->adapter->erp_dbf, 0,
-					 "qtcb-size");
-			debug_event(fsf_req->adapter->erp_dbf, 0,
-				    &bottom->max_qtcb_size, sizeof (u32));
-			zfcp_erp_adapter_shutdown(adapter, 0);
+			zfcp_erp_adapter_shutdown(adapter, 0, 129, fsf_req);
 			return -EIO;
 		}
 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
 				&adapter->status);
 		break;
 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
-		debug_text_event(adapter->erp_dbf, 0, "xchg-inco");
-
 		if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
 			return -EIO;
 
 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
 				&adapter->status);
 
-		zfcp_fsf_link_down_info_eval(adapter,
+		zfcp_fsf_link_down_info_eval(fsf_req, 42,
 			&qtcb->header.fsf_status_qual.link_down_info);
 		break;
 	default:
-		debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng");
-		debug_event(fsf_req->adapter->erp_dbf, 0,
-			    &fsf_req->qtcb->header.fsf_status, sizeof(u32));
-		zfcp_erp_adapter_shutdown(adapter, 0);
+		zfcp_erp_adapter_shutdown(adapter, 0, 130, fsf_req);
 		return -EIO;
 	}
 	return 0;
@@ -2424,13 +2331,9 @@
 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
 		zfcp_fsf_exchange_port_evaluate(fsf_req, 0);
 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
-		zfcp_fsf_link_down_info_eval(adapter,
+		zfcp_fsf_link_down_info_eval(fsf_req, 43,
 			&qtcb->header.fsf_status_qual.link_down_info);
                 break;
-        default:
-		debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng");
-		debug_event(adapter->erp_dbf, 0,
-			    &fsf_req->qtcb->header.fsf_status, sizeof(u32));
 	}
 }
 
@@ -2528,8 +2431,6 @@
 		ZFCP_LOG_NORMAL("bug: remote port 0x%016Lx on adapter %s "
 				"is already open.\n",
 				port->wwpn, zfcp_get_busid_by_port(port));
-		debug_text_exception(fsf_req->adapter->erp_dbf, 0,
-				     "fsf_s_popen");
 		/*
 		 * This is a bug, however operation should continue normally
 		 * if it is simply ignored
@@ -2553,8 +2454,7 @@
 				break;
 			}
 		}
-		debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
-		zfcp_erp_port_access_denied(port);
+		zfcp_erp_port_access_denied(port, 57, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -2563,24 +2463,18 @@
 			      "The remote port 0x%016Lx on adapter %s "
 			      "could not be opened. Disabling it.\n",
 			      port->wwpn, zfcp_get_busid_by_port(port));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1,
-				 "fsf_s_max_ports");
-		zfcp_erp_port_failed(port);
+		zfcp_erp_port_failed(port, 31, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-			debug_text_event(fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ltest");
 			/* ERP strategy will escalate */
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			/* ERP strategy will escalate */
-			debug_text_event(fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ulp");
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		case FSF_SQ_NO_RETRY_POSSIBLE:
@@ -2589,21 +2483,13 @@
 					"Disabling it.\n",
 					port->wwpn,
 					zfcp_get_busid_by_port(port));
-			debug_text_exception(fsf_req->adapter->erp_dbf, 0,
-					     "fsf_sq_no_retry");
-			zfcp_erp_port_failed(port);
+			zfcp_erp_port_failed(port, 32, fsf_req);
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		default:
 			ZFCP_LOG_NORMAL
 			    ("bug: Wrong status qualifier 0x%x arrived.\n",
 			     header->fsf_status_qual.word[0]);
-			debug_text_event(fsf_req->adapter->erp_dbf, 0,
-					 "fsf_sq_inval:");
-			debug_exception(
-				fsf_req->adapter->erp_dbf, 0,
-				&header->fsf_status_qual.word[0],
-				sizeof (u32));
 			break;
 		}
 		break;
@@ -2646,17 +2532,12 @@
 					"warning: insufficient length of "
 					"PLOGI payload (%i)\n",
 					fsf_req->qtcb->bottom.support.els1_length);
-				debug_text_event(fsf_req->adapter->erp_dbf, 0,
-						 "fsf_s_short_plogi:");
 				/* skip sanity check and assume wwpn is ok */
 			} else {
 				if (plogi->serv_param.wwpn != port->wwpn) {
 					ZFCP_LOG_INFO("warning: d_id of port "
 						      "0x%016Lx changed during "
 						      "open\n", port->wwpn);
-					debug_text_event(
-						fsf_req->adapter->erp_dbf, 0,
-						"fsf_s_did_change:");
 					atomic_clear_mask(
 						ZFCP_STATUS_PORT_DID_DID,
 						&port->status);
@@ -2681,9 +2562,6 @@
 		ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
 				"(debug info 0x%x)\n",
 				header->fsf_status);
-		debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
-		debug_exception(fsf_req->adapter->erp_dbf, 0,
-				&header->fsf_status, sizeof (u32));
 		break;
 	}
 
@@ -2787,9 +2665,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
 			      (char *) &fsf_req->qtcb->header.fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1,
-				 "fsf_s_phand_nv");
-		zfcp_erp_adapter_reopen(port->adapter, 0);
+		zfcp_erp_adapter_reopen(port->adapter, 0, 107, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -2804,7 +2680,7 @@
 		ZFCP_LOG_TRACE("remote port 0x016%Lx on adapter %s closed, "
 			       "port handle 0x%x\n", port->wwpn,
 			       zfcp_get_busid_by_port(port), port->handle);
-		zfcp_erp_modify_port_status(port,
+		zfcp_erp_modify_port_status(port, 33, fsf_req,
 					    ZFCP_STATUS_COMMON_OPEN,
 					    ZFCP_CLEAR);
 		retval = 0;
@@ -2814,10 +2690,6 @@
 		ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
 				"(debug info 0x%x)\n",
 				fsf_req->qtcb->header.fsf_status);
-		debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
-		debug_exception(fsf_req->adapter->erp_dbf, 0,
-				&fsf_req->qtcb->header.fsf_status,
-				sizeof (u32));
 		break;
 	}
 
@@ -2930,9 +2802,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
 			      (char *) &header->fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1,
-				 "fsf_s_phand_nv");
-		zfcp_erp_adapter_reopen(port->adapter, 0);
+		zfcp_erp_adapter_reopen(port->adapter, 0, 108, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -2953,8 +2823,7 @@
 				break;
 			}
 		}
-		debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
-		zfcp_erp_port_access_denied(port);
+		zfcp_erp_port_access_denied(port, 58, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -2964,35 +2833,32 @@
 			       "to close it physically.\n",
 			       port->wwpn,
 			       zfcp_get_busid_by_port(port));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_pboxed");
-		zfcp_erp_port_boxed(port);
+		zfcp_erp_port_boxed(port, 50, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
 			ZFCP_STATUS_FSFREQ_RETRY;
+
+		/* can't use generic zfcp_erp_modify_port_status because
+		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
+		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		list_for_each_entry(unit, &port->unit_list_head, list)
+			atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+					  &unit->status);
 		break;
 
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-			debug_text_event(fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ltest");
 			/* This will now be escalated by ERP */
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			/* ERP strategy will escalate */
-			debug_text_event(fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ulp");
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		default:
 			ZFCP_LOG_NORMAL
 			    ("bug: Wrong status qualifier 0x%x arrived.\n",
 			     header->fsf_status_qual.word[0]);
-			debug_text_event(fsf_req->adapter->erp_dbf, 0,
-					 "fsf_sq_inval:");
-			debug_exception(
-				fsf_req->adapter->erp_dbf, 0,
-				&header->fsf_status_qual.word[0], sizeof (u32));
 			break;
 		}
 		break;
@@ -3015,9 +2881,6 @@
 		ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
 				"(debug info 0x%x)\n",
 				header->fsf_status);
-		debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
-		debug_exception(fsf_req->adapter->erp_dbf, 0,
-				&header->fsf_status, sizeof (u32));
 		break;
 	}
 
@@ -3149,8 +3012,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
 			      (char *) &header->fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(adapter->erp_dbf, 1, "fsf_s_ph_nv");
-		zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+		zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3159,8 +3021,6 @@
 				"remote port 0x%016Lx on adapter %s twice.\n",
 				unit->fcp_lun,
 				unit->port->wwpn, zfcp_get_busid_by_unit(unit));
-		debug_text_exception(adapter->erp_dbf, 0,
-				     "fsf_s_uopen");
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3182,8 +3042,7 @@
 				break;
 			}
 		}
-		debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
-		zfcp_erp_unit_access_denied(unit);
+		zfcp_erp_unit_access_denied(unit, 59, fsf_req);
 		atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
                 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -3193,8 +3052,7 @@
 		ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
 			       "needs to be reopened\n",
 			       unit->port->wwpn, zfcp_get_busid_by_unit(unit));
-		debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed");
-		zfcp_erp_port_boxed(unit->port);
+		zfcp_erp_port_boxed(unit->port, 51, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
 			ZFCP_STATUS_FSFREQ_RETRY;
 		break;
@@ -3234,9 +3092,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
 			      (char *) &header->fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(adapter->erp_dbf, 2,
-				 "fsf_s_l_sh_vio");
-		zfcp_erp_unit_access_denied(unit);
+		zfcp_erp_unit_access_denied(unit, 60, fsf_req);
 		atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
 		atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -3250,9 +3106,7 @@
 			      unit->fcp_lun,
 			      unit->port->wwpn,
 			      zfcp_get_busid_by_unit(unit));
-		debug_text_event(adapter->erp_dbf, 1,
-				 "fsf_s_max_units");
-		zfcp_erp_unit_failed(unit);
+		zfcp_erp_unit_failed(unit, 34, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3260,26 +3114,17 @@
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			/* Re-establish link to port */
-			debug_text_event(adapter->erp_dbf, 1,
-					 "fsf_sq_ltest");
 			zfcp_test_link(unit->port);
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			/* ERP strategy will escalate */
-			debug_text_event(adapter->erp_dbf, 1,
-					 "fsf_sq_ulp");
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		default:
 			ZFCP_LOG_NORMAL
 			    ("bug: Wrong status qualifier 0x%x arrived.\n",
 			     header->fsf_status_qual.word[0]);
-			debug_text_event(adapter->erp_dbf, 0,
-					 "fsf_sq_inval:");
-			debug_exception(adapter->erp_dbf, 0,
-					&header->fsf_status_qual.word[0],
-				sizeof (u32));
 		}
 		break;
 
@@ -3331,15 +3176,15 @@
         		if (exclusive && !readwrite) {
                 		ZFCP_LOG_NORMAL("exclusive access of read-only "
 						"unit not supported\n");
-				zfcp_erp_unit_failed(unit);
+				zfcp_erp_unit_failed(unit, 35, fsf_req);
 				fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-				zfcp_erp_unit_shutdown(unit, 0);
+				zfcp_erp_unit_shutdown(unit, 0, 80, fsf_req);
         		} else if (!exclusive && readwrite) {
                 		ZFCP_LOG_NORMAL("shared access of read-write "
 						"unit not supported\n");
-                		zfcp_erp_unit_failed(unit);
+				zfcp_erp_unit_failed(unit, 36, fsf_req);
 				fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-				zfcp_erp_unit_shutdown(unit, 0);
+				zfcp_erp_unit_shutdown(unit, 0, 81, fsf_req);
         		}
 		}
 
@@ -3350,9 +3195,6 @@
 		ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
 				"(debug info 0x%x)\n",
 				header->fsf_status);
-		debug_text_event(adapter->erp_dbf, 0, "fsf_s_inval:");
-		debug_exception(adapter->erp_dbf, 0,
-				&header->fsf_status, sizeof (u32));
 		break;
 	}
 
@@ -3465,9 +3307,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
 			      (char *) &fsf_req->qtcb->header.fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1,
-				 "fsf_s_phand_nv");
-		zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+		zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3483,9 +3323,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
 			      (char *) &fsf_req->qtcb->header.fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1,
-				 "fsf_s_lhand_nv");
-		zfcp_erp_port_reopen(unit->port, 0);
+		zfcp_erp_port_reopen(unit->port, 0, 111, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3494,8 +3332,7 @@
 			       "needs to be reopened\n",
 			       unit->port->wwpn,
 			       zfcp_get_busid_by_unit(unit));
-		debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_pboxed");
-		zfcp_erp_port_boxed(unit->port);
+		zfcp_erp_port_boxed(unit->port, 52, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
 			ZFCP_STATUS_FSFREQ_RETRY;
 		break;
@@ -3504,27 +3341,17 @@
 		switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			/* re-establish link to port */
-			debug_text_event(fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ltest");
 			zfcp_test_link(unit->port);
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			/* ERP strategy will escalate */
-			debug_text_event(fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ulp");
 			fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		default:
 			ZFCP_LOG_NORMAL
 			    ("bug: Wrong status qualifier 0x%x arrived.\n",
 			     fsf_req->qtcb->header.fsf_status_qual.word[0]);
-			debug_text_event(fsf_req->adapter->erp_dbf, 0,
-					 "fsf_sq_inval:");
-			debug_exception(
-				fsf_req->adapter->erp_dbf, 0,
-				&fsf_req->qtcb->header.fsf_status_qual.word[0],
-				sizeof (u32));
 			break;
 		}
 		break;
@@ -3545,10 +3372,6 @@
 		ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
 				"(debug info 0x%x)\n",
 				fsf_req->qtcb->header.fsf_status);
-		debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
-		debug_exception(fsf_req->adapter->erp_dbf, 0,
-				&fsf_req->qtcb->header.fsf_status,
-				sizeof (u32));
 		break;
 	}
 
@@ -3703,7 +3526,7 @@
 					zfcp_get_busid_by_unit(unit),
 					unit->port->wwpn,
 					unit->fcp_lun);
-			zfcp_erp_unit_shutdown(unit, 0);
+			zfcp_erp_unit_shutdown(unit, 0, 131, fsf_req);
 			retval = -EINVAL;
 		}
 		goto no_fit;
@@ -3739,8 +3562,8 @@
  send_failed:
  no_fit:
  failed_scsi_cmnd:
- unit_blocked:
 	zfcp_unit_put(unit);
+ unit_blocked:
 	zfcp_fsf_req_free(fsf_req);
 	fsf_req = NULL;
 	scsi_cmnd->host_scribble = NULL;
@@ -3861,9 +3684,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
 			      (char *) &header->fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1,
-				 "fsf_s_phand_nv");
-		zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+		zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3879,9 +3700,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
 			      (char *) &header->fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1,
-				 "fsf_s_uhand_nv");
-		zfcp_erp_port_reopen(unit->port, 0);
+		zfcp_erp_port_reopen(unit->port, 0, 113, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3897,9 +3716,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
 			      (char *) &header->fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1,
-				 "fsf_s_hand_mis");
-		zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+		zfcp_erp_adapter_reopen(unit->port->adapter, 0, 114, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3909,9 +3726,7 @@
 			      zfcp_get_busid_by_unit(unit),
 			      ZFCP_FC_SERVICE_CLASS_DEFAULT);
 		/* stop operation for this adapter */
-		debug_text_exception(fsf_req->adapter->erp_dbf, 0,
-				     "fsf_s_class_nsup");
-		zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
+		zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 132, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3927,9 +3742,7 @@
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
 			      (char *) &header->fsf_status_qual,
 			      sizeof (union fsf_status_qual));
-		debug_text_event(fsf_req->adapter->erp_dbf, 1,
-				 "fsf_s_fcp_lun_nv");
-		zfcp_erp_port_reopen(unit->port, 0);
+		zfcp_erp_port_reopen(unit->port, 0, 115, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3951,8 +3764,7 @@
 				break;
 			}
 		}
-		debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
-		zfcp_erp_unit_access_denied(unit);
+		zfcp_erp_unit_access_denied(unit, 61, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3965,9 +3777,7 @@
 			      zfcp_get_busid_by_unit(unit),
 			      fsf_req->qtcb->bottom.io.data_direction);
 		/* stop operation for this adapter */
-		debug_text_event(fsf_req->adapter->erp_dbf, 0,
-				 "fsf_s_dir_ind_nv");
-		zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
+		zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3980,9 +3790,7 @@
 		     zfcp_get_busid_by_unit(unit),
 		     fsf_req->qtcb->bottom.io.fcp_cmnd_length);
 		/* stop operation for this adapter */
-		debug_text_event(fsf_req->adapter->erp_dbf, 0,
-				 "fsf_s_cmd_len_nv");
-		zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
+		zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 
@@ -3990,8 +3798,7 @@
 		ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
 			       "needs to be reopened\n",
 			       unit->port->wwpn, zfcp_get_busid_by_unit(unit));
-		debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_pboxed");
-		zfcp_erp_port_boxed(unit->port);
+		zfcp_erp_port_boxed(unit->port, 53, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
 			ZFCP_STATUS_FSFREQ_RETRY;
 		break;
@@ -4001,8 +3808,7 @@
 				"wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n",
 				zfcp_get_busid_by_unit(unit),
 				unit->port->wwpn, unit->fcp_lun);
-		debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_lboxed");
-		zfcp_erp_unit_boxed(unit);
+		zfcp_erp_unit_boxed(unit, 54, fsf_req);
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
 			| ZFCP_STATUS_FSFREQ_RETRY;
 		break;
@@ -4011,25 +3817,16 @@
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			/* re-establish link to port */
-			debug_text_event(fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ltest");
  			zfcp_test_link(unit->port);
 			break;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			/* FIXME(hw) need proper specs for proper action */
 			/* let scsi stack deal with retries and escalation */
-			debug_text_event(fsf_req->adapter->erp_dbf, 1,
-					 "fsf_sq_ulp");
 			break;
 		default:
 			ZFCP_LOG_NORMAL
  			    ("Unknown status qualifier 0x%x arrived.\n",
 			     header->fsf_status_qual.word[0]);
-			debug_text_event(fsf_req->adapter->erp_dbf, 0,
-					 "fsf_sq_inval:");
-			debug_exception(fsf_req->adapter->erp_dbf, 0,
-					&header->fsf_status_qual.word[0],
-					sizeof(u32));
 			break;
 		}
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -4040,12 +3837,6 @@
 
 	case FSF_FCP_RSP_AVAILABLE:
 		break;
-
-	default:
-		debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
-		debug_exception(fsf_req->adapter->erp_dbf, 0,
-				&header->fsf_status, sizeof(u32));
-		break;
 	}
 
  skip_fsfstatus:
@@ -4625,9 +4416,6 @@
 			"was presented on the adapter %s\n",
 			header->fsf_status,
 			zfcp_get_busid_by_adapter(adapter));
-		debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval");
-		debug_exception(fsf_req->adapter->erp_dbf, 0,
-			&header->fsf_status_qual.word[0], sizeof(u32));
 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		retval = -EINVAL;
 		break;
@@ -4817,7 +4605,6 @@
 	volatile struct qdio_buffer_element *sbale;
 	int inc_seq_no;
 	int new_distance_from_int;
-	u64 dbg_tmp[2];
 	int retval = 0;
 
 	adapter = fsf_req->adapter;
@@ -4867,10 +4654,6 @@
 			 QDIO_FLAG_SYNC_OUTPUT,
 			 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
 
-	dbg_tmp[0] = (unsigned long) sbale[0].addr;
-	dbg_tmp[1] = (u64) retval;
-	debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
-
 	if (unlikely(retval)) {
 		/* Queues are down..... */
 		retval = -EIO;
@@ -4885,7 +4668,7 @@
 		req_queue->free_index -= fsf_req->sbal_number;
 		req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
 		req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
-		zfcp_erp_adapter_reopen(adapter, 0);
+		zfcp_erp_adapter_reopen(adapter, 0, 116, fsf_req);
 	} else {
 		req_queue->distance_from_int = new_distance_from_int;
 		/*
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 22fdc17..8ca5f07 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -175,8 +175,9 @@
                 * which is set again in case we have missed by a mile.
                 */
 		zfcp_erp_adapter_reopen(adapter,
-				       ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
-				       ZFCP_STATUS_COMMON_ERP_FAILED);
+					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+					ZFCP_STATUS_COMMON_ERP_FAILED, 140,
+					NULL);
 	}
 	return retval;
 }
@@ -239,8 +240,6 @@
 	struct zfcp_fsf_req *fsf_req;
 	unsigned long flags;
 
-	debug_long_event(adapter->erp_dbf, 4, req_id);
-
 	spin_lock_irqsave(&adapter->req_list_lock, flags);
 	fsf_req = zfcp_reqlist_find(adapter, req_id);
 
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b9daf5c..f818506 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -31,6 +31,7 @@
 				  void (*done) (struct scsi_cmnd *));
 static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
 static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
+static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *);
 static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
 static int zfcp_task_management_function(struct zfcp_unit *, u8,
 					 struct scsi_cmnd *);
@@ -51,6 +52,7 @@
 		.queuecommand		= zfcp_scsi_queuecommand,
 		.eh_abort_handler	= zfcp_scsi_eh_abort_handler,
 		.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+		.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
 		.eh_host_reset_handler	= zfcp_scsi_eh_host_reset_handler,
 		.can_queue		= 4096,
 		.this_id		= -1,
@@ -179,11 +181,10 @@
 	struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
 
 	if (unit) {
-		zfcp_erp_wait(unit->port->adapter);
 		atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
 		sdpnt->hostdata = NULL;
 		unit->device = NULL;
-		zfcp_erp_unit_failed(unit);
+		zfcp_erp_unit_failed(unit, 12, NULL);
 		zfcp_unit_put(unit);
 	} else
 		ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
@@ -442,58 +443,32 @@
 	return retval;
 }
 
-static int
-zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
+static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
 {
 	int retval;
-	struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
+	struct zfcp_unit *unit = scpnt->device->hostdata;
 
 	if (!unit) {
-		ZFCP_LOG_NORMAL("bug: Tried reset for nonexistent unit\n");
-		retval = SUCCESS;
-		goto out;
+		WARN_ON(1);
+		return SUCCESS;
 	}
-	ZFCP_LOG_NORMAL("resetting unit 0x%016Lx on port 0x%016Lx, adapter %s\n",
-			unit->fcp_lun, unit->port->wwpn,
-			zfcp_get_busid_by_adapter(unit->port->adapter));
+	retval = zfcp_task_management_function(unit,
+					       FCP_LOGICAL_UNIT_RESET,
+					       scpnt);
+	return retval ? FAILED : SUCCESS;
+}
 
-	/*
-	 * If we do not know whether the unit supports 'logical unit reset'
-	 * then try 'logical unit reset' and proceed with 'target reset'
-	 * if 'logical unit reset' fails.
-	 * If the unit is known not to support 'logical unit reset' then
-	 * skip 'logical unit reset' and try 'target reset' immediately.
-	 */
-	if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
-			      &unit->status)) {
-		retval = zfcp_task_management_function(unit,
-						       FCP_LOGICAL_UNIT_RESET,
-						       scpnt);
-		if (retval) {
-			ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit);
-			if (retval == -ENOTSUPP)
-				atomic_set_mask
-				    (ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
-				     &unit->status);
-			/* fall through and try 'target reset' next */
-		} else {
-			ZFCP_LOG_DEBUG("unit reset succeeded (unit=%p)\n",
-				       unit);
-			/* avoid 'target reset' */
-			retval = SUCCESS;
-			goto out;
-		}
+static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
+{
+	int retval;
+	struct zfcp_unit *unit = scpnt->device->hostdata;
+
+	if (!unit) {
+		WARN_ON(1);
+		return SUCCESS;
 	}
 	retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt);
-	if (retval) {
-		ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit);
-		retval = FAILED;
-	} else {
-		ZFCP_LOG_DEBUG("target reset succeeded (unit=%p)\n", unit);
-		retval = SUCCESS;
-	}
- out:
-	return retval;
+	return retval ? FAILED : SUCCESS;
 }
 
 static int
@@ -553,7 +528,7 @@
 		unit->fcp_lun, unit->port->wwpn,
 		zfcp_get_busid_by_adapter(unit->port->adapter));
 
-	zfcp_erp_adapter_reopen(adapter, 0);
+	zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt);
 	zfcp_erp_wait(adapter);
 
 	return SUCCESS;
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
index 705c6d4..ccbba4d 100644
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -89,7 +89,7 @@
 
 	retval = 0;
 
-	zfcp_erp_port_reopen(port, 0);
+	zfcp_erp_port_reopen(port, 0, 91, NULL);
 	zfcp_erp_wait(port->adapter);
 	zfcp_port_put(port);
  out:
@@ -147,7 +147,7 @@
 		goto out;
 	}
 
-	zfcp_erp_port_shutdown(port, 0);
+	zfcp_erp_port_shutdown(port, 0, 92, NULL);
 	zfcp_erp_wait(adapter);
 	zfcp_port_put(port);
 	zfcp_port_dequeue(port);
@@ -191,9 +191,10 @@
 		goto out;
 	}
 
-	zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
-				       ZFCP_SET);
-	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+	zfcp_erp_modify_adapter_status(adapter, 44, NULL,
+				       ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 93,
+				NULL);
 	zfcp_erp_wait(adapter);
  out:
 	up(&zfcp_data.config_sema);
diff --git a/drivers/s390/scsi/zfcp_sysfs_port.c b/drivers/s390/scsi/zfcp_sysfs_port.c
index 1320c05..703c1b5 100644
--- a/drivers/s390/scsi/zfcp_sysfs_port.c
+++ b/drivers/s390/scsi/zfcp_sysfs_port.c
@@ -94,7 +94,7 @@
 
 	retval = 0;
 
-	zfcp_erp_unit_reopen(unit, 0);
+	zfcp_erp_unit_reopen(unit, 0, 94, NULL);
 	zfcp_erp_wait(unit->port->adapter);
 	zfcp_unit_put(unit);
  out:
@@ -150,7 +150,7 @@
 		goto out;
 	}
 
-	zfcp_erp_unit_shutdown(unit, 0);
+	zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
 	zfcp_erp_wait(unit->port->adapter);
 	zfcp_unit_put(unit);
 	zfcp_unit_dequeue(unit);
@@ -193,8 +193,9 @@
 		goto out;
 	}
 
-	zfcp_erp_modify_port_status(port, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
-	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+	zfcp_erp_modify_port_status(port, 45, NULL,
+				    ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 96, NULL);
 	zfcp_erp_wait(port->adapter);
  out:
 	up(&zfcp_data.config_sema);
diff --git a/drivers/s390/scsi/zfcp_sysfs_unit.c b/drivers/s390/scsi/zfcp_sysfs_unit.c
index 63f75ee..80fb2c2 100644
--- a/drivers/s390/scsi/zfcp_sysfs_unit.c
+++ b/drivers/s390/scsi/zfcp_sysfs_unit.c
@@ -94,8 +94,9 @@
 		goto out;
 	}
 
-	zfcp_erp_modify_unit_status(unit, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
-	zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED);
+	zfcp_erp_modify_unit_status(unit, 46, NULL,
+				    ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+	zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, 97, NULL);
 	zfcp_erp_wait(unit->port->adapter);
  out:
 	up(&zfcp_data.config_sema);
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 291ff62..c3e4ab0 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -11,111 +11,13 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <asm/ebcdic.h>
+#include <asm/sysinfo.h>
 
 /* Sigh, math-emu. Don't ask. */
 #include <asm/sfp-util.h>
 #include <math-emu/soft-fp.h>
 #include <math-emu/single.h>
 
-struct sysinfo_1_1_1 {
-	char reserved_0[32];
-	char manufacturer[16];
-	char type[4];
-	char reserved_1[12];
-	char model_capacity[16];
-	char sequence[16];
-	char plant[4];
-	char model[16];
-};
-
-struct sysinfo_1_2_1 {
-	char reserved_0[80];
-	char sequence[16];
-	char plant[4];
-	char reserved_1[2];
-	unsigned short cpu_address;
-};
-
-struct sysinfo_1_2_2 {
-	char format;
-	char reserved_0[1];
-	unsigned short acc_offset;
-	char reserved_1[24];
-	unsigned int secondary_capability;
-	unsigned int capability;
-	unsigned short cpus_total;
-	unsigned short cpus_configured;
-	unsigned short cpus_standby;
-	unsigned short cpus_reserved;
-	unsigned short adjustment[0];
-};
-
-struct sysinfo_1_2_2_extension {
-	unsigned int alt_capability;
-	unsigned short alt_adjustment[0];
-};
-
-struct sysinfo_2_2_1 {
-	char reserved_0[80];
-	char sequence[16];
-	char plant[4];
-	unsigned short cpu_id;
-	unsigned short cpu_address;
-};
-
-struct sysinfo_2_2_2 {
-	char reserved_0[32];
-	unsigned short lpar_number;
-	char reserved_1;
-	unsigned char characteristics;
-	unsigned short cpus_total;
-	unsigned short cpus_configured;
-	unsigned short cpus_standby;
-	unsigned short cpus_reserved;
-	char name[8];
-	unsigned int caf;
-	char reserved_2[16];
-	unsigned short cpus_dedicated;
-	unsigned short cpus_shared;
-};
-
-#define LPAR_CHAR_DEDICATED	(1 << 7)
-#define LPAR_CHAR_SHARED	(1 << 6)
-#define LPAR_CHAR_LIMITED	(1 << 5)
-
-struct sysinfo_3_2_2 {
-	char reserved_0[31];
-	unsigned char count;
-	struct {
-		char reserved_0[4];
-		unsigned short cpus_total;
-		unsigned short cpus_configured;
-		unsigned short cpus_standby;
-		unsigned short cpus_reserved;
-		char name[8];
-		unsigned int caf;
-		char cpi[16];
-		char reserved_1[24];
-
-	} vm[8];
-};
-
-static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
-{
-	register int r0 asm("0") = (fc << 28) | sel1;
-	register int r1 asm("1") = sel2;
-
-	asm volatile(
-		"   stsi 0(%2)\n"
-		"0: jz   2f\n"
-		"1: lhi  %0,%3\n"
-		"2:\n"
-		EX_TABLE(0b,1b)
-		: "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
-		: "cc", "memory" );
-	return r0;
-}
-
 static inline int stsi_0(void)
 {
 	int rc = stsi (NULL, 0, 0, 0);
@@ -133,6 +35,8 @@
 	EBCASC(info->sequence, sizeof(info->sequence));
 	EBCASC(info->plant, sizeof(info->plant));
 	EBCASC(info->model_capacity, sizeof(info->model_capacity));
+	EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
+	EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
 	len += sprintf(page + len, "Manufacturer:         %-16.16s\n",
 		       info->manufacturer);
 	len += sprintf(page + len, "Type:                 %-4.4s\n",
@@ -155,8 +59,18 @@
 		       info->sequence);
 	len += sprintf(page + len, "Plant:                %-4.4s\n",
 		       info->plant);
-	len += sprintf(page + len, "Model Capacity:       %-16.16s\n",
-		       info->model_capacity);
+	len += sprintf(page + len, "Model Capacity:       %-16.16s %08u\n",
+		       info->model_capacity, *(u32 *) info->model_cap_rating);
+	if (info->model_perm_cap[0] != '\0')
+		len += sprintf(page + len,
+			       "Model Perm. Capacity: %-16.16s %08u\n",
+			       info->model_perm_cap,
+			       *(u32 *) info->model_perm_cap_rating);
+	if (info->model_temp_cap[0] != '\0')
+		len += sprintf(page + len,
+			       "Model Temp. Capacity: %-16.16s %08u\n",
+			       info->model_temp_cap,
+			       *(u32 *) info->model_temp_cap_rating);
 	return len;
 }
 
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b4912d1..51c3ebf 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1838,12 +1838,11 @@
 		if (scsi_sg_count(srb)) {
 			if ((scsi_sg_count(srb) == 1) &&
 			    (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
-				if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
-					struct scatterlist *sg = scsi_sglist(srb);
-					char *buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
-					memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
-					kunmap_atomic(buf - sg->offset, KM_IRQ0);
-				}
+				if (srb->sc_data_direction == DMA_TO_DEVICE ||
+				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
+					scsi_sg_copy_to_buffer(srb,
+							       tw_dev->generic_buffer_virt[request_id],
+							       TW_SECTOR_SIZE);
 				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
 			} else {
@@ -1915,13 +1914,11 @@
 	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
 	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
 		if (scsi_sg_count(cmd) == 1) {
-			struct scatterlist *sg = scsi_sglist(tw_dev->srb[request_id]);
-			char *buf;
-			unsigned long flags = 0;
+			unsigned long flags;
+			void *buf = tw_dev->generic_buffer_virt[request_id];
+
 			local_irq_save(flags);
-			buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
-			memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
-			kunmap_atomic(buf - sg->offset, KM_IRQ0);
+			scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
 			local_irq_restore(flags);
 		}
 	}
@@ -2028,8 +2025,6 @@
 	}
 	tw_dev = (TW_Device_Extension *)host->hostdata;
 
-	memset(tw_dev, 0, sizeof(TW_Device_Extension));
-
 	/* Save values to device extension */
 	tw_dev->host = host;
 	tw_dev->tw_pci_dev = pdev;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index d095321..adb98a2 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1463,18 +1463,10 @@
 				 void *data, unsigned int len)
 {
 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-	void *buf;
-	unsigned int transfer_len;
-	unsigned long flags = 0;
-	struct scatterlist *sg = scsi_sglist(cmd);
+	unsigned long flags;
 
 	local_irq_save(flags);
-	buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
-	transfer_len = min(sg->length, len);
-
-	memcpy(buf, data, transfer_len);
-
-	kunmap_atomic(buf - sg->offset, KM_IRQ0);
+	scsi_sg_copy_from_buffer(cmd, data, len);
 	local_irq_restore(flags);
 }
 
@@ -2294,8 +2286,6 @@
 	}
 	tw_dev = (TW_Device_Extension *)host->hostdata;
 
-	memset(tw_dev, 0, sizeof(TW_Device_Extension));
-
 	/* Save values to device extension */
 	tw_dev->host = host;
 	tw_dev->tw_pci_dev = pdev;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 4d3ebb1..2d689af 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -896,7 +896,7 @@
 		IRQ_Channel = PCI_Device->irq;
 		IO_Address = BaseAddress0 = pci_resource_start(PCI_Device, 0);
 		PCI_Address = BaseAddress1 = pci_resource_start(PCI_Device, 1);
-#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+#ifdef CONFIG_SCSI_FLASHPOINT
 		if (pci_resource_flags(PCI_Device, 0) & IORESOURCE_MEM) {
 			BusLogic_Error("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, BaseAddress0);
 			BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, Bus, Device, IO_Address);
@@ -1006,6 +1006,9 @@
 }
 
 
+#else
+#define BusLogic_InitializeProbeInfoList(adapter) \
+		BusLogic_InitializeProbeInfoListISA(adapter)
 #endif				/* CONFIG_PCI */
 
 
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index bfbfb5c..73f237a 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -34,23 +34,6 @@
 #endif
 
 /*
-  FlashPoint support is only available for the Intel x86 Architecture with
-  CONFIG_PCI set.
-*/
-
-#ifndef __i386__
-#undef CONFIG_SCSI_OMIT_FLASHPOINT
-#define CONFIG_SCSI_OMIT_FLASHPOINT
-#endif
-
-#ifndef CONFIG_PCI
-#undef CONFIG_SCSI_OMIT_FLASHPOINT
-#define CONFIG_SCSI_OMIT_FLASHPOINT
-#define BusLogic_InitializeProbeInfoListISA BusLogic_InitializeProbeInfoList
-#endif
-
-
-/*
   Define the maximum number of BusLogic Host Adapters supported by this driver.
 */
 
@@ -178,7 +161,7 @@
   Define macros for testing the Host Adapter Type.
 */
 
-#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+#ifdef CONFIG_SCSI_FLASHPOINT
 
 #define BusLogic_MultiMasterHostAdapterP(HostAdapter) \
   (HostAdapter->HostAdapterType == BusLogic_MultiMaster)
@@ -871,7 +854,7 @@
 	void (*CallbackFunction) (struct BusLogic_CCB *);	/* Bytes 40-43 */
 	u32 BaseAddress;	/* Bytes 44-47 */
 	enum BusLogic_CompletionCode CompletionCode;	/* Byte 48 */
-#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+#ifdef CONFIG_SCSI_FLASHPOINT
 	unsigned char:8;	/* Byte 49 */
 	unsigned short OS_Flags;	/* Bytes 50-51 */
 	unsigned char Private[48];	/* Bytes 52-99 */
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 1c90781..b374e45 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -16,7 +16,7 @@
 */
 
 
-#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+#ifdef CONFIG_SCSI_FLASHPOINT
 
 #define MAX_CARDS	8
 #undef BUSTYPE_PCI
@@ -7626,7 +7626,7 @@
 #define FlashPoint_InterruptPending	    FlashPoint__InterruptPending
 #define FlashPoint_HandleInterrupt	    FlashPoint__HandleInterrupt
 
-#else				/* CONFIG_SCSI_OMIT_FLASHPOINT */
+#else				/* !CONFIG_SCSI_FLASHPOINT */
 
 /*
   Define prototypes for the FlashPoint SCCB Manager Functions.
@@ -7641,4 +7641,4 @@
 extern int FlashPoint_HandleInterrupt(FlashPoint_CardHandle_T);
 extern void FlashPoint_ReleaseHostAdapter(FlashPoint_CardHandle_T);
 
-#endif				/* CONFIG_SCSI_OMIT_FLASHPOINT */
+#endif				/* CONFIG_SCSI_FLASHPOINT */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index b9d3740..7f78e3e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -588,18 +588,20 @@
 	  <http://www.tldp.org/docs.html#howto>, and the files
 	  <file:Documentation/scsi/BusLogic.txt> and
 	  <file:Documentation/scsi/FlashPoint.txt> for more information.
+	  Note that support for FlashPoint is only available for 32-bit
+	  x86 configurations.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called BusLogic.
 
-config SCSI_OMIT_FLASHPOINT
-	bool "Omit FlashPoint support"
-	depends on SCSI_BUSLOGIC
+config SCSI_FLASHPOINT
+	bool "FlashPoint support"
+	depends on SCSI_BUSLOGIC && PCI && X86_32
 	help
-	  This option allows you to omit the FlashPoint support from the
+	  This option allows you to add FlashPoint support to the
 	  BusLogic SCSI driver. The FlashPoint SCCB Manager code is
-	  substantial, so users of MultiMaster Host Adapters may wish to omit
-	  it.
+	  substantial, so users of MultiMaster Host Adapters may not
+	  wish to include it.
 
 config SCSI_DMX3191D
 	tristate "DMX3191D SCSI support"
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 5ac3a3e..07d572f 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -179,6 +179,9 @@
 	DMA(instance)->DAWR = DAWR_A2091;
 	regs.SASR = &(DMA(instance)->SASR);
 	regs.SCMD = &(DMA(instance)->SCMD);
+	HDATA(instance)->no_sync = 0xff;
+	HDATA(instance)->fast = 0;
+	HDATA(instance)->dma_mode = CTRL_DMA;
 	wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
 	request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
 		    instance);
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 3aeec96..8b449d8 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -178,6 +178,9 @@
     DMA(a3000_host)->DAWR = DAWR_A3000;
     regs.SASR = &(DMA(a3000_host)->SASR);
     regs.SCMD = &(DMA(a3000_host)->SCMD);
+    HDATA(a3000_host)->no_sync = 0xff;
+    HDATA(a3000_host)->fast = 0;
+    HDATA(a3000_host)->dma_mode = CTRL_DMA;
     wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
     if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
 		    a3000_intr))
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index c05092f..369fcf7 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -205,7 +205,7 @@
 
 int aac_check_reset = 1;
 module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the"
+MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
 	" adapter. a value of -1 forces the reset to adapters programmed to"
 	" ignore it.");
 
@@ -379,24 +379,6 @@
 	return status;
 }
 
-static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len)
-{
-	void *buf;
-	int transfer_len;
-	struct scatterlist *sg = scsi_sglist(scsicmd);
-
-	buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
-	transfer_len = min(sg->length, len + offset);
-
-	transfer_len -= offset;
-	if (buf && transfer_len > 0)
-		memcpy(buf + offset, data, transfer_len);
-
-	flush_kernel_dcache_page(kmap_atomic_to_page(buf - sg->offset));
-	kunmap_atomic(buf - sg->offset, KM_IRQ0);
-
-}
-
 static void get_container_name_callback(void *context, struct fib * fibptr)
 {
 	struct aac_get_name_resp * get_name_reply;
@@ -419,14 +401,17 @@
 		while (*sp == ' ')
 			++sp;
 		if (*sp) {
+			struct inquiry_data inq;
 			char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
 			int count = sizeof(d);
 			char *dp = d;
 			do {
 				*dp++ = (*sp) ? *sp++ : ' ';
 			} while (--count > 0);
-			aac_internal_transfer(scsicmd, d,
-			  offsetof(struct inquiry_data, inqd_pid), sizeof(d));
+
+			scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
+			memcpy(inq.inqd_pid, d, sizeof(d));
+			scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
 		}
 	}
 
@@ -811,7 +796,7 @@
 		sp[2] = 0;
 		sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
 		  le32_to_cpu(get_serial_reply->uid));
-		aac_internal_transfer(scsicmd, sp, 0, sizeof(sp));
+		scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp));
 	}
 
 	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
@@ -1986,8 +1971,8 @@
 				arr[4] = 0x0;
 				arr[5] = 0x80;
 				arr[1] = scsicmd->cmnd[2];
-				aac_internal_transfer(scsicmd, &inq_data, 0,
-				  sizeof(inq_data));
+				scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+							 sizeof(inq_data));
 				scsicmd->result = DID_OK << 16 |
 				  COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 			} else if (scsicmd->cmnd[2] == 0x80) {
@@ -1995,8 +1980,8 @@
 				arr[3] = setinqserial(dev, &arr[4],
 				  scmd_id(scsicmd));
 				arr[1] = scsicmd->cmnd[2];
-				aac_internal_transfer(scsicmd, &inq_data, 0,
-				  sizeof(inq_data));
+				scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+							 sizeof(inq_data));
 				return aac_get_container_serial(scsicmd);
 			} else {
 				/* vpd page not implemented */
@@ -2027,7 +2012,8 @@
 		if (cid == host->this_id) {
 			setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
 			inq_data.inqd_pdt = INQD_PDT_PROC;	/* Processor device */
-			aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
+			scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+						 sizeof(inq_data));
 			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 			scsicmd->scsi_done(scsicmd);
 			return 0;
@@ -2036,7 +2022,7 @@
 			return -1;
 		setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
 		inq_data.inqd_pdt = INQD_PDT_DA;	/* Direct/random access device */
-		aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
+		scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
 		return aac_get_container_name(scsicmd);
 	}
 	case SERVICE_ACTION_IN:
@@ -2047,6 +2033,7 @@
 	{
 		u64 capacity;
 		char cp[13];
+		unsigned int alloc_len;
 
 		dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
 		capacity = fsa_dev_ptr[cid].size - 1;
@@ -2063,18 +2050,16 @@
 		cp[10] = 2;
 		cp[11] = 0;
 		cp[12] = 0;
-		aac_internal_transfer(scsicmd, cp, 0,
-		  min_t(size_t, scsicmd->cmnd[13], sizeof(cp)));
-		if (sizeof(cp) < scsicmd->cmnd[13]) {
-			unsigned int len, offset = sizeof(cp);
 
-			memset(cp, 0, offset);
-			do {
-				len = min_t(size_t, scsicmd->cmnd[13] - offset,
-						sizeof(cp));
-				aac_internal_transfer(scsicmd, cp, offset, len);
-			} while ((offset += len) < scsicmd->cmnd[13]);
-		}
+		alloc_len = ((scsicmd->cmnd[10] << 24)
+			     + (scsicmd->cmnd[11] << 16)
+			     + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
+
+		alloc_len = min_t(size_t, alloc_len, sizeof(cp));
+		scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
+		if (alloc_len < scsi_bufflen(scsicmd))
+			scsi_set_resid(scsicmd,
+				       scsi_bufflen(scsicmd) - alloc_len);
 
 		/* Do not cache partition table for arrays */
 		scsicmd->device->removable = 1;
@@ -2104,7 +2089,7 @@
 		cp[5] = 0;
 		cp[6] = 2;
 		cp[7] = 0;
-		aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
+		scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
 		/* Do not cache partition table for arrays */
 		scsicmd->device->removable = 1;
 
@@ -2139,7 +2124,7 @@
 			if (mode_buf_length > scsicmd->cmnd[4])
 				mode_buf_length = scsicmd->cmnd[4];
 		}
-		aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
+		scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
 		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 		scsicmd->scsi_done(scsicmd);
 
@@ -2174,7 +2159,7 @@
 			if (mode_buf_length > scsicmd->cmnd[8])
 				mode_buf_length = scsicmd->cmnd[8];
 		}
-		aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
+		scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
 
 		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 		scsicmd->scsi_done(scsicmd);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 4743449..23a8e9f 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -515,10 +515,12 @@
 				}
 				udelay(5);
 			}
-		} else
-			(void)down_interruptible(&fibptr->event_wait);
+		} else if (down_interruptible(&fibptr->event_wait) == 0) {
+			fibptr->done = 2;
+			up(&fibptr->event_wait);
+		}
 		spin_lock_irqsave(&fibptr->event_lock, flags);
-		if (fibptr->done == 0) {
+		if ((fibptr->done == 0) || (fibptr->done == 2)) {
 			fibptr->done = 2; /* Tell interrupt we aborted */
 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
 			return -EINTR;
@@ -594,7 +596,7 @@
 	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
 		*q->headers.consumer = cpu_to_le32(1);
 	else
-		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
+		le32_add_cpu(q->headers.consumer, 1);
 
 	if (wasfull) {
 		switch (qid) {
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 72fccd9..0081aa3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1413,6 +1413,10 @@
 	unsigned long flags;
 	int nseg;
 
+	nseg = scsi_dma_map(cmd);
+	if (nseg < 0)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
 	ahd_lock(ahd, &flags);
 
 	/*
@@ -1430,6 +1434,7 @@
 	if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
 		ahd->flags |= AHD_RESOURCE_SHORTAGE;
 		ahd_unlock(ahd, &flags);
+		scsi_dma_unmap(cmd);
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
 
@@ -1485,8 +1490,6 @@
 	ahd_set_sense_residual(scb, 0);
 	scb->sg_count = 0;
 
-	nseg = scsi_dma_map(cmd);
-	BUG_ON(nseg < 0);
 	if (nseg > 0) {
 		void *sg = scb->sg_list;
 		struct scatterlist *cur_seg;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 282aff6..42ad48e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1398,12 +1398,18 @@
 			return SCSI_MLQUEUE_DEVICE_BUSY;
 	}
 
+	nseg = scsi_dma_map(cmd);
+	if (nseg < 0)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
 	/*
 	 * Get an scb to use.
 	 */
 	scb = ahc_get_scb(ahc);
-	if (!scb)
+	if (!scb) {
+		scsi_dma_unmap(cmd);
 		return SCSI_MLQUEUE_HOST_BUSY;
+	}
 
 	scb->io_ctx = cmd;
 	scb->platform_data->dev = dev;
@@ -1464,8 +1470,6 @@
 	ahc_set_sense_residual(scb, 0);
 	scb->sg_count = 0;
 
-	nseg = scsi_dma_map(cmd);
-	BUG_ON(nseg < 0);
 	if (nseg > 0) {
 		struct	ahc_dma_seg *sg;
 		struct	scatterlist *cur_seg;
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 6066998..702e2db 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -1837,7 +1837,7 @@
 	int and_op;
 
 	and_op = FALSE;
-	if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || AIC_OP_JZ)
+	if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || opcode == AIC_OP_JZ)
 		and_op = TRUE;
 
 	/*
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index eb8efdc..2ef459e 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -58,7 +58,6 @@
 
 extern struct kmem_cache *asd_dma_token_cache;
 extern struct kmem_cache *asd_ascb_cache;
-extern char sas_addr_str[2*SAS_ADDR_SIZE + 1];
 
 static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
 {
@@ -68,21 +67,6 @@
 	*p = '\0';
 }
 
-static inline void asd_destringify_sas_addr(u8 *sas_addr, const char *p)
-{
-	int i;
-	for (i = 0; i < SAS_ADDR_SIZE; i++) {
-		u8 h, l;
-		if (!*p)
-			break;
-		h = isdigit(*p) ? *p-'0' : *p-'A'+10;
-		p++;
-		l = isdigit(*p) ? *p-'0' : *p-'A'+10;
-		p++;
-		sas_addr[i] = (h<<4) | l;
-	}
-}
-
 struct asd_ha_struct;
 struct asd_ascb;
 
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 72042ca..2e2ddec 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -35,7 +35,7 @@
 #define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
 #define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
 
-static inline int asd_get_ddb(struct asd_ha_struct *asd_ha)
+static int asd_get_ddb(struct asd_ha_struct *asd_ha)
 {
 	int ddb, i;
 
@@ -71,7 +71,7 @@
 #define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr)
 #define ITNL_TIMEOUT    offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout)
 
-static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb)
+static void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb)
 {
 	if (!ddb || ddb >= 0xFFFF)
 		return;
@@ -79,7 +79,7 @@
 	CLEAR_DDB(ddb, asd_ha);
 }
 
-static inline void asd_set_ddb_type(struct domain_device *dev)
+static void asd_set_ddb_type(struct domain_device *dev)
 {
 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
 	int ddb = (int) (unsigned long) dev->lldd_dev;
@@ -109,7 +109,7 @@
 	return 0;
 }
 
-static inline int asd_init_sata(struct domain_device *dev)
+static int asd_init_sata(struct domain_device *dev)
 {
 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
 	int ddb = (int) (unsigned long) dev->lldd_dev;
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
index 3d8c4ff..67eeba3 100644
--- a/drivers/scsi/aic94xx/aic94xx_dump.c
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -738,6 +738,8 @@
 	PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
 }
 
+#if 0
+
 /**
  * asd_dump_ddb_site -- dump a CSEQ DDB site
  * @asd_ha: pointer to host adapter structure
@@ -880,6 +882,8 @@
 	}
 }
 
+#endif  /*  0  */
+
 /**
  * ads_dump_seq_state -- dump CSEQ and LSEQ states
  * @asd_ha: pointer to host adapter structure
@@ -922,7 +926,9 @@
 	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
 }
 
-static inline void asd_dump_scb(struct asd_ascb *ascb, int ind)
+#if 0
+
+static void asd_dump_scb(struct asd_ascb *ascb, int ind)
 {
 	asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, "
 		   "index:%d, opcode:0x%02x\n",
@@ -956,4 +962,6 @@
 	}
 }
 
+#endif  /*  0  */
+
 #endif /* ASD_DEBUG */
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h
index 0c388e7..191a753 100644
--- a/drivers/scsi/aic94xx/aic94xx_dump.h
+++ b/drivers/scsi/aic94xx/aic94xx_dump.h
@@ -29,24 +29,15 @@
 
 #ifdef ASD_DEBUG
 
-void asd_dump_ddb_0(struct asd_ha_struct *asd_ha);
-void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no);
-void asd_dump_scb_sites(struct asd_ha_struct *asd_ha);
 void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask);
 void asd_dump_frame_rcvd(struct asd_phy *phy,
 			 struct done_list_struct *dl);
-void asd_dump_scb_list(struct asd_ascb *ascb, int num);
 #else /* ASD_DEBUG */
 
-static inline void asd_dump_ddb_0(struct asd_ha_struct *asd_ha) { }
-static inline void asd_dump_target_ddb(struct asd_ha_struct *asd_ha,
-				     u16 site_no) { }
-static inline void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) { }
 static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha,
 				      u8 lseq_mask) { }
 static inline void asd_dump_frame_rcvd(struct asd_phy *phy,
 				       struct done_list_struct *dl) { }
-static inline void asd_dump_scb_list(struct asd_ascb *ascb, int num) { }
 #endif /* ASD_DEBUG */
 
 #endif /* _AIC94XX_DUMP_H_ */
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 098b5f3..83a7822 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -27,6 +27,7 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/module.h>
+#include <linux/firmware.h>
 
 #include "aic94xx.h"
 #include "aic94xx_reg.h"
@@ -38,16 +39,14 @@
 
 /* ---------- Initialization ---------- */
 
-static void asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
+static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
 {
-	extern char sas_addr_str[];
-	/* If the user has specified a WWN it overrides other settings
-	 */
-	if (sas_addr_str[0] != '\0')
-		asd_destringify_sas_addr(asd_ha->hw_prof.sas_addr,
-					 sas_addr_str);
-	else if (asd_ha->hw_prof.sas_addr[0] != 0)
-		asd_stringify_sas_addr(sas_addr_str, asd_ha->hw_prof.sas_addr);
+	/* adapter came with a sas address */
+	if (asd_ha->hw_prof.sas_addr[0])
+		return 0;
+
+	return sas_request_addr(asd_ha->sas_ha.core.shost,
+				asd_ha->hw_prof.sas_addr);
 }
 
 static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha)
@@ -251,7 +250,7 @@
 	return 0;
 }
 
-static inline void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha)
+static void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha)
 {
 	asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE;
 	asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE;
@@ -657,8 +656,7 @@
 
 	asd_init_ctxmem(asd_ha);
 
-	asd_get_user_sas_addr(asd_ha);
-	if (!asd_ha->hw_prof.sas_addr[0]) {
+	if (asd_get_user_sas_addr(asd_ha)) {
 		asd_printk("No SAS Address provided for %s\n",
 			   pci_name(asd_ha->pcidev));
 		err = -ENODEV;
@@ -773,7 +771,7 @@
  * asd_process_donelist_isr -- schedule processing of done list entries
  * @asd_ha: pointer to host adapter structure
  */
-static inline void asd_process_donelist_isr(struct asd_ha_struct *asd_ha)
+static void asd_process_donelist_isr(struct asd_ha_struct *asd_ha)
 {
 	tasklet_schedule(&asd_ha->seq.dl_tasklet);
 }
@@ -782,7 +780,7 @@
  * asd_com_sas_isr -- process device communication interrupt (COMINT)
  * @asd_ha: pointer to host adapter structure
  */
-static inline void asd_com_sas_isr(struct asd_ha_struct *asd_ha)
+static void asd_com_sas_isr(struct asd_ha_struct *asd_ha)
 {
 	u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT);
 
@@ -821,7 +819,7 @@
 	asd_chip_reset(asd_ha);
 }
 
-static inline void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus)
+static void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus)
 {
 	static const char *halt_code[256] = {
 		"UNEXPECTED_INTERRUPT0",
@@ -908,7 +906,7 @@
  * asd_dch_sas_isr -- process device channel interrupt (DEVINT)
  * @asd_ha: pointer to host adapter structure
  */
-static inline void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
+static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
 {
 	u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS);
 
@@ -923,7 +921,7 @@
  * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
  * @asd_ha: pointer to host adapter structure
  */
-static inline void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
+static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
 {
 	u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R);
 
@@ -971,7 +969,7 @@
  *
  * Asserted on PCIX errors: target abort, etc.
  */
-static inline void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha)
+static void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha)
 {
 	u16 status;
 	u32 pcix_status;
@@ -1044,8 +1042,8 @@
 
 /* ---------- SCB handling ---------- */
 
-static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
-					      gfp_t gfp_flags)
+static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
+				       gfp_t gfp_flags)
 {
 	extern struct kmem_cache *asd_ascb_cache;
 	struct asd_seq_data *seq = &asd_ha->seq;
@@ -1144,8 +1142,8 @@
  *
  * LOCKING: called with the pending list lock held.
  */
-static inline void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
-				     struct asd_ascb *ascb)
+static void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
+			      struct asd_ascb *ascb)
 {
 	struct asd_seq_data *seq = &asd_ha->seq;
 	struct asd_ascb *last = list_entry(ascb->list.prev,
@@ -1171,7 +1169,7 @@
  * intended to be called from asd_post_ascb_list(), just prior to
  * posting the SCBs to the sequencer.
  */
-static inline void asd_start_scb_timers(struct list_head *list)
+static void asd_start_scb_timers(struct list_head *list)
 {
 	struct asd_ascb *ascb;
 	list_for_each_entry(ascb, list, list) {
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
index abc7575..8c1c282 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.h
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -391,8 +391,6 @@
 void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
 void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
 int  asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask);
-void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
-				      u8 subfunc);
 
 void asd_ascb_timedout(unsigned long data);
 int  asd_chip_hardrst(struct asd_ha_struct *asd_ha);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 88d1e73..90f5e0a 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -56,8 +56,6 @@
 	"\tThe aic94xx SAS LLDD supports both modes.\n"
 	"\tDefault: 0 (Direct Mode).\n");
 
-char sas_addr_str[2*SAS_ADDR_SIZE + 1] = "";
-
 static struct scsi_transport_template *aic94xx_transport_template;
 static int asd_scan_finished(struct Scsi_Host *, unsigned long);
 static void asd_scan_start(struct Scsi_Host *);
@@ -547,7 +545,7 @@
 	},
 };
 
-static inline int asd_create_ha_caches(struct asd_ha_struct *asd_ha)
+static int asd_create_ha_caches(struct asd_ha_struct *asd_ha)
 {
 	asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool",
 					   &asd_ha->pcidev->dev,
@@ -565,7 +563,7 @@
  * asd_free_edbs -- free empty data buffers
  * asd_ha: pointer to host adapter structure
  */
-static inline void asd_free_edbs(struct asd_ha_struct *asd_ha)
+static void asd_free_edbs(struct asd_ha_struct *asd_ha)
 {
 	struct asd_seq_data *seq = &asd_ha->seq;
 	int i;
@@ -576,7 +574,7 @@
 	seq->edb_arr = NULL;
 }
 
-static inline void asd_free_escbs(struct asd_ha_struct *asd_ha)
+static void asd_free_escbs(struct asd_ha_struct *asd_ha)
 {
 	struct asd_seq_data *seq = &asd_ha->seq;
 	int i;
@@ -591,7 +589,7 @@
 	seq->escb_arr = NULL;
 }
 
-static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
+static void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
 {
 	int i;
 
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c
index f210dac..56b17c2 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg.c
+++ b/drivers/scsi/aic94xx/aic94xx_reg.c
@@ -32,8 +32,8 @@
  * Offset comes before value to remind that the operation of
  * this function is *offs = val.
  */
-static inline void asd_write_byte(struct asd_ha_struct *asd_ha,
-				  unsigned long offs, u8 val)
+static void asd_write_byte(struct asd_ha_struct *asd_ha,
+			   unsigned long offs, u8 val)
 {
 	if (unlikely(asd_ha->iospace))
 		outb(val,
@@ -43,8 +43,8 @@
 	wmb();
 }
 
-static inline void asd_write_word(struct asd_ha_struct *asd_ha,
-				  unsigned long offs, u16 val)
+static void asd_write_word(struct asd_ha_struct *asd_ha,
+			   unsigned long offs, u16 val)
 {
 	if (unlikely(asd_ha->iospace))
 		outw(val,
@@ -54,8 +54,8 @@
 	wmb();
 }
 
-static inline void asd_write_dword(struct asd_ha_struct *asd_ha,
-				   unsigned long offs, u32 val)
+static void asd_write_dword(struct asd_ha_struct *asd_ha,
+			    unsigned long offs, u32 val)
 {
 	if (unlikely(asd_ha->iospace))
 		outl(val,
@@ -67,8 +67,7 @@
 
 /* Reading from device address space.
  */
-static inline u8 asd_read_byte(struct asd_ha_struct *asd_ha,
-			       unsigned long offs)
+static u8 asd_read_byte(struct asd_ha_struct *asd_ha, unsigned long offs)
 {
 	u8 val;
 	if (unlikely(asd_ha->iospace))
@@ -80,8 +79,8 @@
 	return val;
 }
 
-static inline u16 asd_read_word(struct asd_ha_struct *asd_ha,
-				unsigned long offs)
+static u16 asd_read_word(struct asd_ha_struct *asd_ha,
+			 unsigned long offs)
 {
 	u16 val;
 	if (unlikely(asd_ha->iospace))
@@ -93,8 +92,8 @@
 	return val;
 }
 
-static inline u32 asd_read_dword(struct asd_ha_struct *asd_ha,
-				 unsigned long offs)
+static u32 asd_read_dword(struct asd_ha_struct *asd_ha,
+			  unsigned long offs)
 {
 	u32 val;
 	if (unlikely(asd_ha->iospace))
@@ -124,22 +123,22 @@
 /* We know that the register wanted is in the range
  * of the sliding window.
  */
-#define ASD_READ_SW(ww, type, ord)                                     \
-static inline type asd_read_##ww##_##ord (struct asd_ha_struct *asd_ha,\
-					  u32 reg)                     \
-{                                                                      \
-	struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];    \
-	u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\
-	return asd_read_##ord (asd_ha, (unsigned long) map_offs);      \
+#define ASD_READ_SW(ww, type, ord)					\
+static type asd_read_##ww##_##ord(struct asd_ha_struct *asd_ha,		\
+				   u32 reg)				\
+{									\
+	struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];	\
+	u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\
+	return asd_read_##ord(asd_ha, (unsigned long)map_offs);	\
 }
 
-#define ASD_WRITE_SW(ww, type, ord)                                    \
-static inline void asd_write_##ww##_##ord (struct asd_ha_struct *asd_ha,\
-				  u32 reg, type val)                   \
-{                                                                      \
-	struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];    \
-	u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\
-	asd_write_##ord (asd_ha, (unsigned long) map_offs, val);       \
+#define ASD_WRITE_SW(ww, type, ord)					\
+static void asd_write_##ww##_##ord(struct asd_ha_struct *asd_ha,	\
+				    u32 reg, type val)			\
+{									\
+	struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];	\
+	u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\
+	asd_write_##ord(asd_ha, (unsigned long)map_offs, val);		\
 }
 
 ASD_READ_SW(swa, u8,  byte);
@@ -186,7 +185,7 @@
  * @asd_ha: pointer to host adapter structure
  * @reg: register desired to be within range of the new window
  */
-static inline void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg)
+static void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg)
 {
 	u32 base = reg & ~(MBAR0_SWB_SIZE-1);
 	pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base);
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index ab35050..4664331 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -50,7 +50,7 @@
 			   | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
 			   | CURRENT_OOB_ERROR)
 
-static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode)
+static void get_lrate_mode(struct asd_phy *phy, u8 oob_mode)
 {
 	struct sas_phy *sas_phy = phy->sas_phy.phy;
 
@@ -81,7 +81,7 @@
 		phy->sas_phy.oob_mode = SATA_OOB_MODE;
 }
 
-static inline void asd_phy_event_tasklet(struct asd_ascb *ascb,
+static void asd_phy_event_tasklet(struct asd_ascb *ascb,
 					 struct done_list_struct *dl)
 {
 	struct asd_ha_struct *asd_ha = ascb->ha;
@@ -125,8 +125,7 @@
 }
 
 /* If phys are enabled sparsely, this will do the right thing. */
-static inline unsigned ord_phy(struct asd_ha_struct *asd_ha,
-			       struct asd_phy *phy)
+static unsigned ord_phy(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
 {
 	u8 enabled_mask = asd_ha->hw_prof.enabled_phys;
 	int i, k = 0;
@@ -151,7 +150,7 @@
  * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
  * buffer.
  */
-static inline void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr)
+static void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr)
 {
 	if (phy->sas_phy.frame_rcvd[0] == 0x34
 	    && phy->sas_phy.oob_mode == SATA_OOB_MODE) {
@@ -232,9 +231,9 @@
 	spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
 }
 
-static inline void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
-					   struct done_list_struct *dl,
-					   int edb_id, int phy_id)
+static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
+				    struct done_list_struct *dl,
+				    int edb_id, int phy_id)
 {
 	unsigned long flags;
 	int edb_el = edb_id + ascb->edb_index;
@@ -255,9 +254,9 @@
 	sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
 }
 
-static inline void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
-					      struct done_list_struct *dl,
-					      int phy_id)
+static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
+				       struct done_list_struct *dl,
+				       int phy_id)
 {
 	struct asd_ha_struct *asd_ha = ascb->ha;
 	struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
@@ -308,9 +307,9 @@
 	;
 }
 
-static inline void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
-					      struct done_list_struct *dl,
-					      int phy_id)
+static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
+				       struct done_list_struct *dl,
+				       int phy_id)
 {
 	unsigned long flags;
 	struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha;
@@ -715,7 +714,7 @@
 	asd_ascb_free(ascb);
 }
 
-static inline void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
+static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
 {
 	/* disable all speeds, then enable defaults */
 	*speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS
@@ -820,6 +819,8 @@
 
 /* ---------- INITIATE LINK ADM TASK ---------- */
 
+#if 0
+
 static void link_adm_tasklet_complete(struct asd_ascb *ascb,
 				      struct done_list_struct *dl)
 {
@@ -852,6 +853,8 @@
 	ascb->tasklet_complete = link_adm_tasklet_complete;
 }
 
+#endif  /*  0  */
+
 /* ---------- SCB timer ---------- */
 
 /**
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 2a4c933..4446e3d 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -590,8 +590,8 @@
 	return err;
 }
 
-static inline int asd_read_flash_seg(struct asd_ha_struct *asd_ha,
-				     void *buffer, u32 offs, int size)
+static int asd_read_flash_seg(struct asd_ha_struct *asd_ha,
+			      void *buffer, u32 offs, int size)
 {
 	asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs,
 			    size);
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index c750fbf..f4272ac 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -60,7 +60,7 @@
  *
  * Return 0 on success, negative on failure.
  */
-int asd_pause_cseq(struct asd_ha_struct *asd_ha)
+static int asd_pause_cseq(struct asd_ha_struct *asd_ha)
 {
 	int	count = PAUSE_TRIES;
 	u32	arp2ctl;
@@ -87,7 +87,7 @@
  *
  * Return 0 on success, negative on error.
  */
-int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
+static int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
 {
 	u32	arp2ctl;
 	int	count = PAUSE_TRIES;
@@ -115,7 +115,7 @@
  *
  * Return 0 on success, negative on error.
  */
-static inline int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
+static int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
 {
 	u32    arp2ctl;
 	int    count = PAUSE_TRIES;
@@ -143,7 +143,7 @@
  *
  * Return 0 on success, negative on failure.
  */
-int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
+static int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
 {
 	int lseq;
 	int err = 0;
@@ -164,7 +164,7 @@
  *
  * Return 0 on success, negative on error.
  */
-static inline int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
+static int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
 {
 	u32 arp2ctl;
 	int count = PAUSE_TRIES;
@@ -186,27 +186,6 @@
 }
 
 
-/**
- * asd_unpause_lseq - unpause the link sequencer(s)
- * @asd_ha: pointer to host adapter structure
- * @lseq_mask: mask of link sequencers of interest
- *
- * Return 0 on success, negative on failure.
- */
-int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
-{
-	int lseq;
-	int err = 0;
-
-	for_each_sequencer(lseq_mask, lseq_mask, lseq) {
-		err = asd_seq_unpause_lseq(asd_ha, lseq);
-		if (err)
-			return err;
-	}
-
-	return err;
-}
-
 /* ---------- Downloading CSEQ/LSEQ microcode ---------- */
 
 static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h
index 2ea6a0d..ad787c5 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.h
+++ b/drivers/scsi/aic94xx/aic94xx_seq.h
@@ -58,10 +58,6 @@
 } __attribute__((packed));
 
 #ifdef __KERNEL__
-int asd_pause_cseq(struct asd_ha_struct *asd_ha);
-int asd_unpause_cseq(struct asd_ha_struct *asd_ha);
-int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
-int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
 int asd_init_seqs(struct asd_ha_struct *asd_ha);
 int asd_start_seqs(struct asd_ha_struct *asd_ha);
 int asd_release_firmware(void);
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 008df9a..326765c 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -33,7 +33,7 @@
 static void asd_unbuild_smp_ascb(struct asd_ascb *a);
 static void asd_unbuild_ssp_ascb(struct asd_ascb *a);
 
-static inline void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
+static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
 {
 	unsigned long flags;
 
@@ -51,9 +51,9 @@
 	[PCI_DMA_NONE]          = DATA_DIR_NONE, /* NO TRANSFER */
 };
 
-static inline int asd_map_scatterlist(struct sas_task *task,
-				      struct sg_el *sg_arr,
-				      gfp_t gfp_flags)
+static int asd_map_scatterlist(struct sas_task *task,
+			       struct sg_el *sg_arr,
+			       gfp_t gfp_flags)
 {
 	struct asd_ascb *ascb = task->lldd_task;
 	struct asd_ha_struct *asd_ha = ascb->ha;
@@ -131,7 +131,7 @@
 	return res;
 }
 
-static inline void asd_unmap_scatterlist(struct asd_ascb *ascb)
+static void asd_unmap_scatterlist(struct asd_ascb *ascb)
 {
 	struct asd_ha_struct *asd_ha = ascb->ha;
 	struct sas_task *task = ascb->uldd_task;
@@ -527,7 +527,7 @@
 
 /* ---------- Execute Task ---------- */
 
-static inline int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
+static int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
 {
 	int res = 0;
 	unsigned long flags;
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index b9ac8f7..633ff40 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -336,7 +336,7 @@
 	asd_ascb_free(ascb);
 }
 
-static inline int asd_clear_nexus(struct sas_task *task)
+static int asd_clear_nexus(struct sas_task *task)
 {
 	int res = TMF_RESP_FUNC_FAILED;
 	int leftover;
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 3bedf24..8e53f02 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2983,7 +2983,6 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 2,
-	.unchecked_isa_dma	= 0,
 	.use_clustering		= DISABLE_CLUSTERING,
 	.proc_name		= "acornscsi",
 };
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 49d838e..a3398fe 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -222,7 +222,6 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 2,
-	.unchecked_isa_dma	= 0,
 	.use_clustering		= DISABLE_CLUSTERING,
 	.proc_name		= "CumanaSCSI-1",
 };
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 7aad154..92d1cb1 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -113,7 +113,7 @@
 	unsigned char  asc;
 	unsigned char  ascq;
 	int	       errno;
-} err[] = {
+} ch_err[] = {
 /* Just filled in what looks right. Hav'nt checked any standard paper for
    these errno assignments, so they may be wrong... */
 	{
@@ -155,11 +155,11 @@
 	/* Check to see if additional sense information is available */
 	if (scsi_sense_valid(sshdr) &&
 	    sshdr->asc != 0) {
-		for (i = 0; err[i].errno != 0; i++) {
-			if (err[i].sense == sshdr->sense_key &&
-			    err[i].asc   == sshdr->asc &&
-			    err[i].ascq  == sshdr->ascq) {
-				errno = -err[i].errno;
+		for (i = 0; ch_err[i].errno != 0; i++) {
+			if (ch_err[i].sense == sshdr->sense_key &&
+			    ch_err[i].asc   == sshdr->asc &&
+			    ch_err[i].ascq  == sshdr->ascq) {
+				errno = -ch_err[i].errno;
 				break;
 			}
 		}
@@ -721,8 +721,8 @@
 	case CHIOGELEM:
 	{
 		struct changer_get_element cge;
-		u_char  cmd[12];
-		u_char  *buffer;
+		u_char ch_cmd[12];
+		u_char *buffer;
 		unsigned int elem;
 		int     result,i;
 
@@ -739,17 +739,18 @@
 		mutex_lock(&ch->lock);
 
 	voltag_retry:
-		memset(cmd,0,sizeof(cmd));
-		cmd[0] = READ_ELEMENT_STATUS;
-		cmd[1] = (ch->device->lun << 5) |
+		memset(ch_cmd, 0, sizeof(ch_cmd));
+		ch_cmd[0] = READ_ELEMENT_STATUS;
+		ch_cmd[1] = (ch->device->lun << 5) |
 			(ch->voltags ? 0x10 : 0) |
 			ch_elem_to_typecode(ch,elem);
-		cmd[2] = (elem >> 8) & 0xff;
-		cmd[3] = elem        & 0xff;
-		cmd[5] = 1;
-		cmd[9] = 255;
+		ch_cmd[2] = (elem >> 8) & 0xff;
+		ch_cmd[3] = elem        & 0xff;
+		ch_cmd[5] = 1;
+		ch_cmd[9] = 255;
 
-		if (0 == (result = ch_do_scsi(ch, cmd, buffer, 256, DMA_FROM_DEVICE))) {
+		result = ch_do_scsi(ch, ch_cmd, buffer, 256, DMA_FROM_DEVICE);
+		if (!result) {
 			cge.cge_status = buffer[18];
 			cge.cge_flags = 0;
 			if (buffer[18] & CESTATUS_EXCEPT) {
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index e351db6..075e239 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4761,7 +4761,6 @@
 	.cmd_per_lun            = DC395x_MAX_CMD_PER_LUN,
 	.eh_abort_handler       = dc395x_eh_abort,
 	.eh_bus_reset_handler   = dc395x_eh_bus_reset,
-	.unchecked_isa_dma      = 0,
 	.use_clustering         = DISABLE_CLUSTERING,
 };
 
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index b5a6092..952505c 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -815,8 +815,6 @@
 	else
 		hd->primary = 1;
 
-	sh->unchecked_isa_dma = 0;	/* We can only do PIO */
-
 	hd->next = NULL;	/* build a linked list of all HBAs */
 	hd->prev = last_HBA;
 	if (hd->prev != NULL)
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 0b2080d..c6d6e7c 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -85,10 +85,10 @@
 
 /* The meaning of the Scsi_Pointer members in this driver is as follows:
  * ptr:                     Chaining
- * this_residual:           gdth_bufflen
- * buffer:                  gdth_sglist
+ * this_residual:           unused
+ * buffer:                  unused
  * dma_handle:              unused
- * buffers_residual:        gdth_sg_count
+ * buffers_residual:        unused
  * Status:                  unused
  * Message:                 unused
  * have_data_in:            unused
@@ -372,47 +372,6 @@
     .release = gdth_close,
 };
 
-/*
- * gdth scsi_command access wrappers.
- *   below 6 functions are used throughout the driver to access scsi_command's
- *   io parameters. The reason we do not use the regular accessors from
- *   scsi_cmnd.h is because of gdth_execute(). Since it is unrecommended for
- *   llds to directly set scsi_cmnd's IO members. This driver will use SCp
- *   members for IO parameters, and will copy scsi_cmnd's members to Scp
- *   members in queuecommand. For internal commands through gdth_execute()
- *   SCp's members will be set directly.
- */
-static inline unsigned gdth_bufflen(struct scsi_cmnd *cmd)
-{
-	return (unsigned)cmd->SCp.this_residual;
-}
-
-static inline void gdth_set_bufflen(struct scsi_cmnd *cmd, unsigned bufflen)
-{
-	cmd->SCp.this_residual = bufflen;
-}
-
-static inline unsigned gdth_sg_count(struct scsi_cmnd *cmd)
-{
-	return (unsigned)cmd->SCp.buffers_residual;
-}
-
-static inline void gdth_set_sg_count(struct scsi_cmnd *cmd, unsigned sg_count)
-{
-	cmd->SCp.buffers_residual = sg_count;
-}
-
-static inline struct scatterlist *gdth_sglist(struct scsi_cmnd *cmd)
-{
-	return cmd->SCp.buffer;
-}
-
-static inline void gdth_set_sglist(struct scsi_cmnd *cmd,
-                                   struct scatterlist *sglist)
-{
-	cmd->SCp.buffer = sglist;
-}
-
 #include "gdth_proc.h"
 #include "gdth_proc.c"
 
@@ -591,125 +550,111 @@
 #endif /* CONFIG_ISA */
 
 #ifdef CONFIG_PCI
-static void gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt,
-                            ushort vendor, ushort dev);
+static bool gdth_pci_registered;
 
-static int __init gdth_search_pci(gdth_pci_str *pcistr)
+static bool gdth_search_vortex(ushort device)
 {
-    ushort device, cnt;
-    
-    TRACE(("gdth_search_pci()\n"));
-
-    cnt = 0;
-    for (device = 0; device <= PCI_DEVICE_ID_VORTEX_GDT6555; ++device)
-        gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, device);
-    for (device = PCI_DEVICE_ID_VORTEX_GDT6x17RP; 
-         device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP; ++device)
-        gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, device);
-    gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, 
-                    PCI_DEVICE_ID_VORTEX_GDTNEWRX);
-    gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, 
-                    PCI_DEVICE_ID_VORTEX_GDTNEWRX2);
-    gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_INTEL,
-                    PCI_DEVICE_ID_INTEL_SRC);
-    gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_INTEL,
-                    PCI_DEVICE_ID_INTEL_SRC_XSCALE);
-    return cnt;
+	if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
+		return true;
+	if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP &&
+	    device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP)
+		return true;
+	if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX ||
+	    device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2)
+		return true;
+	return false;
 }
 
+static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out);
+static int gdth_pci_init_one(struct pci_dev *pdev,
+			     const struct pci_device_id *ent);
+static void gdth_pci_remove_one(struct pci_dev *pdev);
+static void gdth_remove_one(gdth_ha_str *ha);
+
 /* Vortex only makes RAID controllers.
  * We do not really want to specify all 550 ids here, so wildcard match.
  */
-static struct pci_device_id gdthtable[] __maybe_unused = {
-    {PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID},
-    {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID}, 
-    {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID}, 
-    {0}
+static const struct pci_device_id gdthtable[] = {
+	{ PCI_VDEVICE(VORTEX, PCI_ANY_ID) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) },
+	{ }	/* terminate list */
 };
-MODULE_DEVICE_TABLE(pci,gdthtable);
+MODULE_DEVICE_TABLE(pci, gdthtable);
 
-static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt,
-                                   ushort vendor, ushort device)
+static struct pci_driver gdth_pci_driver = {
+	.name		= "gdth",
+	.id_table	= gdthtable,
+	.probe		= gdth_pci_init_one,
+	.remove		= gdth_pci_remove_one,
+};
+
+static void gdth_pci_remove_one(struct pci_dev *pdev)
 {
-    ulong base0, base1, base2;
-    struct pci_dev *pdev;
-    
-    TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
-          *cnt, vendor, device));
+	gdth_ha_str *ha = pci_get_drvdata(pdev);
 
-    pdev = NULL;
-    while ((pdev = pci_get_device(vendor, device, pdev))
-           != NULL) {
-        if (pci_enable_device(pdev))
-            continue;
-        if (*cnt >= MAXHA) {
-            pci_dev_put(pdev);
-            return;
-        }
+	pci_set_drvdata(pdev, NULL);
+
+	list_del(&ha->list);
+	gdth_remove_one(ha);
+
+	pci_disable_device(pdev);
+}
+
+static int gdth_pci_init_one(struct pci_dev *pdev,
+				       const struct pci_device_id *ent)
+{
+	ushort vendor = pdev->vendor;
+	ushort device = pdev->device;
+	ulong base0, base1, base2;
+	int rc;
+	gdth_pci_str gdth_pcistr;
+	gdth_ha_str *ha = NULL;
+    
+	TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
+	       gdth_ctr_count, vendor, device));
+
+	memset(&gdth_pcistr, 0, sizeof(gdth_pcistr));
+
+	if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device))
+		return -ENODEV;
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if (gdth_ctr_count >= MAXHA)
+		return -EBUSY;
 
         /* GDT PCI controller found, resources are already in pdev */
-        pcistr[*cnt].pdev = pdev;
-        pcistr[*cnt].irq = pdev->irq;
+	gdth_pcistr.pdev = pdev;
         base0 = pci_resource_flags(pdev, 0);
         base1 = pci_resource_flags(pdev, 1);
         base2 = pci_resource_flags(pdev, 2);
         if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B ||   /* GDT6000/B */
             device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) {  /* MPR */
             if (!(base0 & IORESOURCE_MEM)) 
-                continue;
-            pcistr[*cnt].dpmem = pci_resource_start(pdev, 0);
+		return -ENODEV;
+	    gdth_pcistr.dpmem = pci_resource_start(pdev, 0);
         } else {                                  /* GDT6110, GDT6120, .. */
             if (!(base0 & IORESOURCE_MEM) ||
                 !(base2 & IORESOURCE_MEM) ||
                 !(base1 & IORESOURCE_IO)) 
-                continue;
-            pcistr[*cnt].dpmem = pci_resource_start(pdev, 2);
-            pcistr[*cnt].io_mm = pci_resource_start(pdev, 0);
-            pcistr[*cnt].io    = pci_resource_start(pdev, 1);
+		return -ENODEV;
+	    gdth_pcistr.dpmem = pci_resource_start(pdev, 2);
+	    gdth_pcistr.io    = pci_resource_start(pdev, 1);
         }
         TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
-                pcistr[*cnt].pdev->bus->number,
-		PCI_SLOT(pcistr[*cnt].pdev->devfn),
-                pcistr[*cnt].irq, pcistr[*cnt].dpmem));
-        (*cnt)++;
-    }       
-}   
+		gdth_pcistr.pdev->bus->number,
+		PCI_SLOT(gdth_pcistr.pdev->devfn),
+		gdth_pcistr.irq,
+		gdth_pcistr.dpmem));
 
-static void __init gdth_sort_pci(gdth_pci_str *pcistr, int cnt)
-{    
-    gdth_pci_str temp;
-    int i, changed;
-    
-    TRACE(("gdth_sort_pci() cnt %d\n",cnt));
-    if (cnt == 0)
-        return;
+	rc = gdth_pci_probe_one(&gdth_pcistr, &ha);
+	if (rc)
+		return rc;
 
-    do {
-        changed = FALSE;
-        for (i = 0; i < cnt-1; ++i) {
-            if (!reverse_scan) {
-                if ((pcistr[i].pdev->bus->number > pcistr[i+1].pdev->bus->number) ||
-                    (pcistr[i].pdev->bus->number == pcistr[i+1].pdev->bus->number &&
-                     PCI_SLOT(pcistr[i].pdev->devfn) >
-                     PCI_SLOT(pcistr[i+1].pdev->devfn))) {
-                    temp = pcistr[i];
-                    pcistr[i] = pcistr[i+1];
-                    pcistr[i+1] = temp;
-                    changed = TRUE;
-                }
-            } else {
-                if ((pcistr[i].pdev->bus->number < pcistr[i+1].pdev->bus->number) ||
-                    (pcistr[i].pdev->bus->number == pcistr[i+1].pdev->bus->number &&
-                     PCI_SLOT(pcistr[i].pdev->devfn) <
-                     PCI_SLOT(pcistr[i+1].pdev->devfn))) {
-                    temp = pcistr[i];
-                    pcistr[i] = pcistr[i+1];
-                    pcistr[i+1] = temp;
-                    changed = TRUE;
-                }
-            }
-        }
-    } while (changed);
+	return 0;
 }
 #endif /* CONFIG_PCI */
 
@@ -909,7 +854,8 @@
 #endif /* CONFIG_ISA */
 
 #ifdef CONFIG_PCI
-static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha)
+static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
+				   gdth_ha_str *ha)
 {
     register gdt6_dpram_str __iomem *dp6_ptr;
     register gdt6c_dpram_str __iomem *dp6c_ptr;
@@ -921,14 +867,14 @@
 
     TRACE(("gdth_init_pci()\n"));
 
-    if (pcistr->pdev->vendor == PCI_VENDOR_ID_INTEL)
+    if (pdev->vendor == PCI_VENDOR_ID_INTEL)
         ha->oem_id = OEM_ID_INTEL;
     else
         ha->oem_id = OEM_ID_ICP;
-    ha->brd_phys = (pcistr->pdev->bus->number << 8) | (pcistr->pdev->devfn & 0xf8);
-    ha->stype = (ulong32)pcistr->pdev->device;
-    ha->irq = pcistr->irq;
-    ha->pdev = pcistr->pdev;
+    ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
+    ha->stype = (ulong32)pdev->device;
+    ha->irq = pdev->irq;
+    ha->pdev = pdev;
     
     if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) {  /* GDT6000/B */
         TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
@@ -956,8 +902,7 @@
                     continue;
                 }
                 iounmap(ha->brd);
-                pci_write_config_dword(pcistr->pdev, 
-                                       PCI_BASE_ADDRESS_0, i);
+		pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
                 ha->brd = ioremap(i, sizeof(gdt6_dpram_str)); 
                 if (ha->brd == NULL) {
                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
@@ -1066,8 +1011,7 @@
                     continue;
                 }
                 iounmap(ha->brd);
-                pci_write_config_dword(pcistr->pdev, 
-                                       PCI_BASE_ADDRESS_2, i);
+		pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i);
                 ha->brd = ioremap(i, sizeof(gdt6c_dpram_str)); 
                 if (ha->brd == NULL) {
                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
@@ -1159,16 +1103,16 @@
         }
 
         /* manipulate config. space to enable DPMEM, start RP controller */
-        pci_read_config_word(pcistr->pdev, PCI_COMMAND, &command);
+	pci_read_config_word(pdev, PCI_COMMAND, &command);
         command |= 6;
-        pci_write_config_word(pcistr->pdev, PCI_COMMAND, command);
-        if (pci_resource_start(pcistr->pdev, 8) == 1UL)
-            pci_resource_start(pcistr->pdev, 8) = 0UL;
+	pci_write_config_word(pdev, PCI_COMMAND, command);
+	if (pci_resource_start(pdev, 8) == 1UL)
+	    pci_resource_start(pdev, 8) = 0UL;
         i = 0xFEFF0001UL;
-        pci_write_config_dword(pcistr->pdev, PCI_ROM_ADDRESS, i);
+	pci_write_config_dword(pdev, PCI_ROM_ADDRESS, i);
         gdth_delay(1);
-        pci_write_config_dword(pcistr->pdev, PCI_ROM_ADDRESS,
-                               pci_resource_start(pcistr->pdev, 8));
+	pci_write_config_dword(pdev, PCI_ROM_ADDRESS,
+			       pci_resource_start(pdev, 8));
         
         dp6m_ptr = ha->brd;
 
@@ -1195,8 +1139,7 @@
                     continue;
                 }
                 iounmap(ha->brd);
-                pci_write_config_dword(pcistr->pdev, 
-                                       PCI_BASE_ADDRESS_0, i);
+		pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
                 ha->brd = ioremap(i, sizeof(gdt6m_dpram_str)); 
                 if (ha->brd == NULL) {
                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
@@ -2353,12 +2296,12 @@
 static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
                                     char *buffer, ushort count)
 {
-    ushort cpcount,i, max_sg = gdth_sg_count(scp);
+    ushort cpcount,i, max_sg = scsi_sg_count(scp);
     ushort cpsum,cpnow;
     struct scatterlist *sl;
     char *address;
 
-    cpcount = min_t(ushort, count, gdth_bufflen(scp));
+    cpcount = min_t(ushort, count, scsi_bufflen(scp));
 
     if (cpcount) {
         cpsum=0;
@@ -2366,7 +2309,7 @@
             unsigned long flags;
             cpnow = (ushort)sl->length;
             TRACE(("copy_internal() now %d sum %d count %d %d\n",
-                          cpnow, cpsum, cpcount, gdth_bufflen(scp)));
+                          cpnow, cpsum, cpcount, scsi_bufflen(scp)));
             if (cpsum+cpnow > cpcount) 
                 cpnow = cpcount - cpsum;
             cpsum += cpnow;
@@ -2589,10 +2532,10 @@
             cmdp->u.cache.BlockCnt = blockcnt;
         }
 
-        if (gdth_bufflen(scp)) {
+        if (scsi_bufflen(scp)) {
             cmndinfo->dma_dir = (read_write == 1 ?
                 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);   
-            sgcnt = pci_map_sg(ha->pdev, gdth_sglist(scp), gdth_sg_count(scp),
+            sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
                                cmndinfo->dma_dir);
             if (mode64) {
                 struct scatterlist *sl;
@@ -2739,7 +2682,7 @@
             cmdp->u.raw64.lun        = l;
             cmdp->u.raw64.bus        = b;
             cmdp->u.raw64.priority   = 0;
-            cmdp->u.raw64.sdlen      = gdth_bufflen(scp);
+            cmdp->u.raw64.sdlen      = scsi_bufflen(scp);
             cmdp->u.raw64.sense_len  = 16;
             cmdp->u.raw64.sense_data = sense_paddr;
             cmdp->u.raw64.direction  = 
@@ -2756,7 +2699,7 @@
             cmdp->u.raw.bus        = b;
             cmdp->u.raw.priority   = 0;
             cmdp->u.raw.link_p     = 0;
-            cmdp->u.raw.sdlen      = gdth_bufflen(scp);
+            cmdp->u.raw.sdlen      = scsi_bufflen(scp);
             cmdp->u.raw.sense_len  = 16;
             cmdp->u.raw.sense_data = sense_paddr;
             cmdp->u.raw.direction  = 
@@ -2765,9 +2708,9 @@
             cmdp->u.raw.sg_ranz    = 0;
         }
 
-        if (gdth_bufflen(scp)) {
+        if (scsi_bufflen(scp)) {
             cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL;
-            sgcnt = pci_map_sg(ha->pdev, gdth_sglist(scp), gdth_sg_count(scp),
+            sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
                                cmndinfo->dma_dir);
             if (mode64) {
                 struct scatterlist *sl;
@@ -3388,8 +3331,8 @@
             /* retry */
             return 2;
         }
-        if (gdth_bufflen(scp))
-            pci_unmap_sg(ha->pdev, gdth_sglist(scp), gdth_sg_count(scp),
+        if (scsi_bufflen(scp))
+            pci_unmap_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
                          cmndinfo->dma_dir);
 
         if (cmndinfo->sense_paddr)
@@ -4031,10 +3974,6 @@
     gdth_update_timeout(scp, scp->timeout_per_command * 6);
     cmndinfo->priority = DEFAULT_PRI;
 
-    gdth_set_bufflen(scp, scsi_bufflen(scp));
-    gdth_set_sg_count(scp, scsi_sg_count(scp));
-    gdth_set_sglist(scp, scsi_sglist(scp));
-
     return __gdth_queuecommand(ha, scp, cmndinfo);
 }
 
@@ -4955,12 +4894,16 @@
 #endif /* CONFIG_EISA */
 
 #ifdef CONFIG_PCI
-static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr)
+static int gdth_pci_probe_one(gdth_pci_str *pcistr,
+			     gdth_ha_str **ha_out)
 {
 	struct Scsi_Host *shp;
 	gdth_ha_str *ha;
 	dma_addr_t scratch_dma_handle = 0;
 	int error, i;
+	struct pci_dev *pdev = pcistr->pdev;
+
+	*ha_out = NULL;
 
 	shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
 	if (!shp)
@@ -4968,13 +4911,13 @@
 	ha = shost_priv(shp);
 
 	error = -ENODEV;
-	if (!gdth_init_pci(&pcistr[ctr],ha))
+	if (!gdth_init_pci(pdev, pcistr, ha))
 		goto out_host_put;
 
 	/* controller found and initialized */
 	printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
-		pcistr[ctr].pdev->bus->number,
-		PCI_SLOT(pcistr[ctr].pdev->devfn),
+		pdev->bus->number,
+		PCI_SLOT(pdev->devfn),
 		ha->irq);
 
 	error = request_irq(ha->irq, gdth_interrupt,
@@ -5019,7 +4962,7 @@
 
 	ha->scratch_busy = FALSE;
 	ha->req_first = NULL;
-	ha->tid_cnt = pcistr[ctr].pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
+	ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
 	if (max_ids > 0 && max_ids < ha->tid_cnt)
 		ha->tid_cnt = max_ids;
 	for (i = 0; i < GDTH_MAXCMDS; ++i)
@@ -5039,16 +4982,16 @@
 	/* 64-bit DMA only supported from FW >= x.43 */
 	if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) ||
 	    !ha->dma64_support) {
-		if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) {
+		if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
 			printk(KERN_WARNING "GDT-PCI %d: "
 				"Unable to set 32-bit DMA\n", ha->hanum);
 				goto out_free_coal_stat;
 		}
 	} else {
 		shp->max_cmd_len = 16;
-		if (!pci_set_dma_mask(pcistr[ctr].pdev, DMA_64BIT_MASK)) {
+		if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
 			printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
-		} else if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) {
+		} else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
 			printk(KERN_WARNING "GDT-PCI %d: "
 				"Unable to set 64/32-bit DMA\n", ha->hanum);
 			goto out_free_coal_stat;
@@ -5062,13 +5005,17 @@
 	spin_lock_init(&ha->smp_lock);
 	gdth_enable_int(ha);
 
-	error = scsi_add_host(shp, &pcistr[ctr].pdev->dev);
+	error = scsi_add_host(shp, &pdev->dev);
 	if (error)
 		goto out_free_coal_stat;
 	list_add_tail(&ha->list, &gdth_instances);
 
+	pci_set_drvdata(ha->pdev, ha);
+
 	scsi_scan_host(shp);
 
+	*ha_out = ha;
+
 	return 0;
 
  out_free_coal_stat:
@@ -5185,16 +5132,8 @@
 
 #ifdef CONFIG_PCI
 	/* scanning for PCI controllers */
-	{
-		gdth_pci_str pcistr[MAXHA];
-		int cnt,ctr;
-
-		cnt = gdth_search_pci(pcistr);
-		printk("GDT-HA: Found %d PCI Storage RAID Controllers\n", cnt);
-		gdth_sort_pci(pcistr,cnt);
-		for (ctr = 0; ctr < cnt; ++ctr)
-			gdth_pci_probe_one(pcistr, ctr);
-	}
+	if (pci_register_driver(&gdth_pci_driver) == 0)
+		gdth_pci_registered = true;
 #endif /* CONFIG_PCI */
 
 	TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
@@ -5227,6 +5166,11 @@
 	del_timer_sync(&gdth_timer);
 #endif
 
+#ifdef CONFIG_PCI
+	if (gdth_pci_registered)
+		pci_unregister_driver(&gdth_pci_driver);
+#endif
+
 	list_for_each_entry(ha, &gdth_instances, list)
 		gdth_remove_one(ha);
 }
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 26e4e92..ca92476 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -839,8 +839,6 @@
     struct pci_dev      *pdev;
     ulong               dpmem;                  /* DPRAM address */
     ulong               io;                     /* IO address */
-    ulong               io_mm;                  /* IO address mem. mapped */
-    unchar              irq;                    /* IRQ */
 } gdth_pci_str;
 
 
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 91f8522..ca73637 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -322,6 +322,9 @@
 	 */
 	regs.SASR = &(DMA(instance)->SASR);
 	regs.SCMD = &(DMA(instance)->SCMD);
+	HDATA(instance)->no_sync = 0xff;
+	HDATA(instance)->fast = 0;
+	HDATA(instance)->dma_mode = CTRL_DMA;
 	wd33c93_init(instance, regs, dma_setup, dma_stop,
 		     (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
 					     : WD33C93_FS_12_15);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index ed7e0a1..1592640 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -347,7 +347,6 @@
 	shost->unchecked_isa_dma = sht->unchecked_isa_dma;
 	shost->use_clustering = sht->use_clustering;
 	shost->ordered_tag = sht->ordered_tag;
-	shost->active_mode = sht->supported_mode;
 
 	if (sht->supported_mode == MODE_UNKNOWN)
 		/* means we didn't set it ... default to INITIATOR */
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index ff149ad..beecda9 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -338,7 +338,8 @@
 	req->header.size =
 		cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
-	req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_GET_CONFIG<<5);
+	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
+	req->header.context_hi32 = 0;
 
 	if (iop_send_sync_request_mv(hba, 0, 20000)) {
 		dprintk("Get config send cmd failed\n");
@@ -392,7 +393,8 @@
 	req->header.size =
 		cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
-	req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+	req->header.context_hi32 = 0;
 
 	if (iop_send_sync_request_mv(hba, 0, 20000)) {
 		dprintk("Set config send cmd failed\n");
@@ -903,7 +905,6 @@
 	.eh_device_reset_handler    = hptiop_reset,
 	.eh_bus_reset_handler       = hptiop_reset,
 	.info                       = hptiop_info,
-	.unchecked_isa_dma          = 0,
 	.emulated                   = 0,
 	.use_clustering             = ENABLE_CLUSTERING,
 	.proc_name                  = driver_name,
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 68e5c63..93c3fc2 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -60,31 +60,6 @@
 
 #define IDESCSI_DEBUG_LOG		0
 
-typedef struct idescsi_pc_s {
-	u8 c[12];				/* Actual packet bytes */
-	int request_transfer;			/* Bytes to transfer */
-	int actually_transferred;		/* Bytes actually transferred */
-	int buffer_size;			/* Size of our data buffer */
-	struct request *rq;			/* The corresponding request */
-	u8 *buffer;				/* Data buffer */
-	u8 *current_position;			/* Pointer into the above buffer */
-	struct scatterlist *sg;			/* Scatter gather table */
-	unsigned int sg_cnt;			/* Number of entries in sg */
-	int b_count;				/* Bytes transferred from current entry */
-	struct scsi_cmnd *scsi_cmd;		/* SCSI command */
-	void (*done)(struct scsi_cmnd *);	/* Scsi completion routine */
-	unsigned long flags;			/* Status/Action flags */
-	unsigned long timeout;			/* Command timeout */
-} idescsi_pc_t;
-
-/*
- *	Packet command status bits.
- */
-#define PC_DMA_IN_PROGRESS		0	/* 1 while DMA in progress */
-#define PC_WRITING			1	/* Data direction */
-#define PC_TIMEDOUT			3	/* command timed out */
-#define PC_DMA_OK			4	/* Use DMA */
-
 /*
  *	SCSI command transformation layer
  */
@@ -101,14 +76,15 @@
 	struct gendisk		*disk;
 	struct Scsi_Host	*host;
 
-	idescsi_pc_t *pc;			/* Current packet command */
+	struct ide_atapi_pc *pc;		/* Current packet command */
 	unsigned long flags;			/* Status/Action flags */
 	unsigned long transform;		/* SCSI cmd translation layer */
 	unsigned long log;			/* log flags */
 } idescsi_scsi_t;
 
 static DEFINE_MUTEX(idescsi_ref_mutex);
-static int idescsi_nocd;			/* Set by module param to skip cd */
+/* Set by module param to skip cd */
+static int idescsi_nocd;
 
 #define ide_scsi_g(disk) \
 	container_of((disk)->private_data, struct ide_scsi_obj, driver)
@@ -152,22 +128,11 @@
  */
 #define IDESCSI_PC_RQ			90
 
-static void idescsi_discard_data (ide_drive_t *drive, unsigned int bcount)
-{
-	while (bcount--)
-		(void) HWIF(drive)->INB(IDE_DATA_REG);
-}
-
-static void idescsi_output_zeros (ide_drive_t *drive, unsigned int bcount)
-{
-	while (bcount--)
-		HWIF(drive)->OUTB(0, IDE_DATA_REG);
-}
-
 /*
  *	PIO data transfer routines using the scatter gather table.
  */
-static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount)
+static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
+		unsigned int bcount)
 {
 	int count;
 	char *buf;
@@ -200,11 +165,12 @@
 
 	if (bcount) {
 		printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
-		idescsi_discard_data (drive, bcount);
+		ide_atapi_discard_data(drive, bcount);
 	}
 }
 
-static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount)
+static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
+		unsigned int bcount)
 {
 	int count;
 	char *buf;
@@ -237,7 +203,7 @@
 
 	if (bcount) {
 		printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
-		idescsi_output_zeros (drive, bcount);
+		ide_atapi_write_zeros(drive, bcount);
 	}
 }
 
@@ -246,15 +212,16 @@
 	print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, data, len, 0);
 }
 
-static int idescsi_check_condition(ide_drive_t *drive, struct request *failed_command)
+static int idescsi_check_condition(ide_drive_t *drive,
+		struct request *failed_cmd)
 {
 	idescsi_scsi_t *scsi = drive_to_idescsi(drive);
-	idescsi_pc_t   *pc;
+	struct ide_atapi_pc   *pc;
 	struct request *rq;
 	u8             *buf;
 
 	/* stuff a sense request in front of our current request */
-	pc = kzalloc(sizeof(idescsi_pc_t), GFP_ATOMIC);
+	pc = kzalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC);
 	rq = kmalloc(sizeof(struct request), GFP_ATOMIC);
 	buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC);
 	if (!pc || !rq || !buf) {
@@ -266,14 +233,14 @@
 	ide_init_drive_cmd(rq);
 	rq->special = (char *) pc;
 	pc->rq = rq;
-	pc->buffer = buf;
+	pc->buf = buf;
 	pc->c[0] = REQUEST_SENSE;
-	pc->c[4] = pc->request_transfer = pc->buffer_size = SCSI_SENSE_BUFFERSIZE;
+	pc->c[4] = pc->req_xfer = pc->buf_size = SCSI_SENSE_BUFFERSIZE;
 	rq->cmd_type = REQ_TYPE_SENSE;
 	pc->timeout = jiffies + WAIT_READY;
 	/* NOTE! Save the failed packet command in "rq->buffer" */
-	rq->buffer = (void *) failed_command->special;
-	pc->scsi_cmd = ((idescsi_pc_t *) failed_command->special)->scsi_cmd;
+	rq->buffer = (void *) failed_cmd->special;
+	pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd;
 	if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
 		printk ("ide-scsi: %s: queue cmd = ", drive->name);
 		ide_scsi_hex_dump(pc->c, 6);
@@ -287,9 +254,12 @@
 static ide_startstop_t
 idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
 {
+	ide_hwif_t *hwif = drive->hwif;
+
 	if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
 		/* force an abort */
-		HWIF(drive)->OUTB(WIN_IDLEIMMEDIATE,IDE_COMMAND_REG);
+		hwif->OUTB(WIN_IDLEIMMEDIATE,
+			   hwif->io_ports[IDE_COMMAND_OFFSET]);
 
 	rq->errors++;
 
@@ -303,7 +273,7 @@
 {
 #if IDESCSI_DEBUG_LOG
 	printk(KERN_WARNING "idescsi_atapi_abort called for %lu\n",
-			((idescsi_pc_t *) rq->special)->scsi_cmd->serial_number);
+		((struct ide_atapi_pc *) rq->special)->scsi_cmd->serial_number);
 #endif
 	rq->errors |= ERROR_MAX;
 
@@ -316,7 +286,7 @@
 {
 	idescsi_scsi_t *scsi = drive_to_idescsi(drive);
 	struct request *rq = HWGROUP(drive)->rq;
-	idescsi_pc_t *pc = (idescsi_pc_t *) rq->special;
+	struct ide_atapi_pc *pc = (struct ide_atapi_pc *) rq->special;
 	int log = test_bit(IDESCSI_LOG_CMD, &scsi->log);
 	struct Scsi_Host *host;
 	int errors = rq->errors;
@@ -328,20 +298,23 @@
 	}
 	ide_end_drive_cmd (drive, 0, 0);
 	if (blk_sense_request(rq)) {
-		idescsi_pc_t *opc = (idescsi_pc_t *) rq->buffer;
+		struct ide_atapi_pc *opc = (struct ide_atapi_pc *) rq->buffer;
 		if (log) {
 			printk ("ide-scsi: %s: wrap up check %lu, rst = ", drive->name, opc->scsi_cmd->serial_number);
-			ide_scsi_hex_dump(pc->buffer, 16);
+			ide_scsi_hex_dump(pc->buf, 16);
 		}
-		memcpy((void *) opc->scsi_cmd->sense_buffer, pc->buffer, SCSI_SENSE_BUFFERSIZE);
-		kfree(pc->buffer);
+		memcpy((void *) opc->scsi_cmd->sense_buffer, pc->buf,
+			SCSI_SENSE_BUFFERSIZE);
+		kfree(pc->buf);
 		kfree(pc);
 		kfree(rq);
 		pc = opc;
 		rq = pc->rq;
 		pc->scsi_cmd->result = (CHECK_CONDITION << 1) |
-					((test_bit(PC_TIMEDOUT, &pc->flags)?DID_TIME_OUT:DID_OK) << 16);
-	} else if (test_bit(PC_TIMEDOUT, &pc->flags)) {
+				(((pc->flags & PC_FLAG_TIMEDOUT) ?
+				  DID_TIME_OUT :
+				  DID_OK) << 16);
+	} else if (pc->flags & PC_FLAG_TIMEDOUT) {
 		if (log)
 			printk (KERN_WARNING "ide-scsi: %s: timed out for %lu\n",
 					drive->name, pc->scsi_cmd->serial_number);
@@ -370,7 +343,7 @@
 	return 0;
 }
 
-static inline unsigned long get_timeout(idescsi_pc_t *pc)
+static inline unsigned long get_timeout(struct ide_atapi_pc *pc)
 {
 	return max_t(unsigned long, WAIT_CMD, pc->timeout - jiffies);
 }
@@ -378,12 +351,12 @@
 static int idescsi_expiry(ide_drive_t *drive)
 {
 	idescsi_scsi_t *scsi = drive_to_idescsi(drive);
-	idescsi_pc_t   *pc   = scsi->pc;
+	struct ide_atapi_pc   *pc   = scsi->pc;
 
 #if IDESCSI_DEBUG_LOG
 	printk(KERN_WARNING "idescsi_expiry called for %lu at %lu\n", pc->scsi_cmd->serial_number, jiffies);
 #endif
-	set_bit(PC_TIMEDOUT, &pc->flags);
+	pc->flags |= PC_FLAG_TIMEDOUT;
 
 	return 0;					/* we do not want the ide subsystem to retry */
 }
@@ -395,7 +368,7 @@
 {
 	idescsi_scsi_t *scsi = drive_to_idescsi(drive);
 	ide_hwif_t *hwif = drive->hwif;
-	idescsi_pc_t *pc = scsi->pc;
+	struct ide_atapi_pc *pc = scsi->pc;
 	struct request *rq = pc->rq;
 	unsigned int temp;
 	u16 bcount;
@@ -405,7 +378,7 @@
 	printk (KERN_INFO "ide-scsi: Reached idescsi_pc_intr interrupt handler\n");
 #endif /* IDESCSI_DEBUG_LOG */
 
-	if (test_bit(PC_TIMEDOUT, &pc->flags)){
+	if (pc->flags & PC_FLAG_TIMEDOUT) {
 #if IDESCSI_DEBUG_LOG
 		printk(KERN_WARNING "idescsi_pc_intr: got timed out packet  %lu at %lu\n",
 				pc->scsi_cmd->serial_number, jiffies);
@@ -414,11 +387,12 @@
 		idescsi_end_request (drive, 1, 0);
 		return ide_stopped;
 	}
-	if (test_and_clear_bit (PC_DMA_IN_PROGRESS, &pc->flags)) {
+	if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
+		pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
 #if IDESCSI_DEBUG_LOG
 		printk ("ide-scsi: %s: DMA complete\n", drive->name);
 #endif /* IDESCSI_DEBUG_LOG */
-		pc->actually_transferred=pc->request_transfer;
+		pc->xferred = pc->req_xfer;
 		(void) HWIF(drive)->ide_dma_end(drive);
 	}
 
@@ -428,42 +402,44 @@
 	if ((stat & DRQ_STAT) == 0) {
 		/* No more interrupts */
 		if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
-			printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred);
+			printk(KERN_INFO "Packet command completed, %d bytes"
+					" transferred\n", pc->xferred);
 		local_irq_enable_in_hardirq();
 		if (stat & ERR_STAT)
 			rq->errors++;
 		idescsi_end_request (drive, 1, 0);
 		return ide_stopped;
 	}
-	bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) |
-		  hwif->INB(IDE_BCOUNTL_REG);
-	ireason = hwif->INB(IDE_IREASON_REG);
+	bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
+		  hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
+	ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
 
 	if (ireason & CD) {
 		printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n");
 		return ide_do_reset (drive);
 	}
 	if (ireason & IO) {
-		temp = pc->actually_transferred + bcount;
-		if (temp > pc->request_transfer) {
-			if (temp > pc->buffer_size) {
+		temp = pc->xferred + bcount;
+		if (temp > pc->req_xfer) {
+			if (temp > pc->buf_size) {
 				printk(KERN_ERR "ide-scsi: The scsi wants to "
 					"send us more data than expected "
 					"- discarding data\n");
-				temp = pc->buffer_size - pc->actually_transferred;
+				temp = pc->buf_size - pc->xferred;
 				if (temp) {
-					clear_bit(PC_WRITING, &pc->flags);
+					pc->flags &= ~PC_FLAG_WRITING;
 					if (pc->sg)
-						idescsi_input_buffers(drive, pc, temp);
+						idescsi_input_buffers(drive, pc,
+									temp);
 					else
-						drive->hwif->atapi_input_bytes(drive, pc->current_position, temp);
+						drive->hwif->atapi_input_bytes(drive, pc->cur_pos, temp);
 					printk(KERN_ERR "ide-scsi: transferred"
 							" %d of %d bytes\n",
 							temp, bcount);
 				}
-				pc->actually_transferred += temp;
-				pc->current_position += temp;
-				idescsi_discard_data(drive, bcount - temp);
+				pc->xferred += temp;
+				pc->cur_pos += temp;
+				ide_atapi_discard_data(drive, bcount - temp);
 				ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
 				return ide_started;
 			}
@@ -473,23 +449,23 @@
 		}
 	}
 	if (ireason & IO) {
-		clear_bit(PC_WRITING, &pc->flags);
+		pc->flags &= ~PC_FLAG_WRITING;
 		if (pc->sg)
 			idescsi_input_buffers(drive, pc, bcount);
 		else
-			hwif->atapi_input_bytes(drive, pc->current_position,
+			hwif->atapi_input_bytes(drive, pc->cur_pos,
 						bcount);
 	} else {
-		set_bit(PC_WRITING, &pc->flags);
+		pc->flags |= PC_FLAG_WRITING;
 		if (pc->sg)
 			idescsi_output_buffers(drive, pc, bcount);
 		else
-			hwif->atapi_output_bytes(drive, pc->current_position,
+			hwif->atapi_output_bytes(drive, pc->cur_pos,
 						 bcount);
 	}
 	/* Update the current position */
-	pc->actually_transferred += bcount;
-	pc->current_position += bcount;
+	pc->xferred += bcount;
+	pc->cur_pos += bcount;
 
 	/* And set the interrupt handler again */
 	ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
@@ -500,7 +476,7 @@
 {
 	ide_hwif_t *hwif = drive->hwif;
 	idescsi_scsi_t *scsi = drive_to_idescsi(drive);
-	idescsi_pc_t *pc = scsi->pc;
+	struct ide_atapi_pc *pc = scsi->pc;
 	ide_startstop_t startstop;
 	u8 ireason;
 
@@ -509,7 +485,7 @@
 			"initiated yet DRQ isn't asserted\n");
 		return startstop;
 	}
-	ireason = hwif->INB(IDE_IREASON_REG);
+	ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
 	if ((ireason & CD) == 0 || (ireason & IO)) {
 		printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while "
 				"issuing a packet command\n");
@@ -520,34 +496,34 @@
 	ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
 	/* Send the actual packet */
 	drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12);
-	if (test_bit (PC_DMA_OK, &pc->flags)) {
-		set_bit (PC_DMA_IN_PROGRESS, &pc->flags);
+	if (pc->flags & PC_FLAG_DMA_OK) {
+		pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
 		hwif->dma_start(drive);
 	}
 	return ide_started;
 }
 
-static inline int idescsi_set_direction(idescsi_pc_t *pc)
+static inline int idescsi_set_direction(struct ide_atapi_pc *pc)
 {
 	switch (pc->c[0]) {
 		case READ_6: case READ_10: case READ_12:
-			clear_bit(PC_WRITING, &pc->flags);
+			pc->flags &= ~PC_FLAG_WRITING;
 			return 0;
 		case WRITE_6: case WRITE_10: case WRITE_12:
-			set_bit(PC_WRITING, &pc->flags);
+			pc->flags |= PC_FLAG_WRITING;
 			return 0;
 		default:
 			return 1;
 	}
 }
 
-static int idescsi_map_sg(ide_drive_t *drive, idescsi_pc_t *pc)
+static int idescsi_map_sg(ide_drive_t *drive, struct ide_atapi_pc *pc)
 {
 	ide_hwif_t *hwif = drive->hwif;
 	struct scatterlist *sg, *scsi_sg;
 	int segments;
 
-	if (!pc->request_transfer || pc->request_transfer % 1024)
+	if (!pc->req_xfer || pc->req_xfer % 1024)
 		return 1;
 
 	if (idescsi_set_direction(pc))
@@ -566,21 +542,21 @@
 	return 0;
 }
 
-/*
- *	Issue a packet command
- */
-static ide_startstop_t idescsi_issue_pc (ide_drive_t *drive, idescsi_pc_t *pc)
+static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
+		struct ide_atapi_pc *pc)
 {
 	idescsi_scsi_t *scsi = drive_to_idescsi(drive);
 	ide_hwif_t *hwif = drive->hwif;
 	u16 bcount;
 	u8 dma = 0;
 
-	scsi->pc=pc;							/* Set the current packet command */
-	pc->actually_transferred=0;					/* We haven't transferred any data yet */
-	pc->current_position=pc->buffer;
+	/* Set the current packet command */
+	scsi->pc = pc;
+	/* We haven't transferred any data yet */
+	pc->xferred = 0;
+	pc->cur_pos = pc->buf;
 	/* Request to transfer the entire buffer at once */
-	bcount = min(pc->request_transfer, 63 * 1024);
+	bcount = min(pc->req_xfer, 63 * 1024);
 
 	if (drive->using_dma && !idescsi_map_sg(drive, pc)) {
 		hwif->sg_mapped = 1;
@@ -591,7 +567,7 @@
 	ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK, bcount, dma);
 
 	if (dma)
-		set_bit(PC_DMA_OK, &pc->flags);
+		pc->flags |= PC_FLAG_DMA_OK;
 
 	if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags)) {
 		ide_execute_command(drive, WIN_PACKETCMD, &idescsi_transfer_pc,
@@ -599,7 +575,7 @@
 		return ide_started;
 	} else {
 		/* Issue the packet command */
-		HWIF(drive)->OUTB(WIN_PACKETCMD, IDE_COMMAND_REG);
+		hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
 		return idescsi_transfer_pc(drive);
 	}
 }
@@ -615,7 +591,8 @@
 #endif /* IDESCSI_DEBUG_LOG */
 
 	if (blk_sense_request(rq) || blk_special_request(rq)) {
-		return idescsi_issue_pc (drive, (idescsi_pc_t *) rq->special);
+		return idescsi_issue_pc(drive,
+				(struct ide_atapi_pc *) rq->special);
 	}
 	blk_dump_rq_flags(rq, "ide-scsi: unsup command");
 	idescsi_end_request (drive, 0, 0);
@@ -773,15 +750,15 @@
 	idescsi_scsi_t *scsi = scsihost_to_idescsi(host);
 	ide_drive_t *drive = scsi->drive;
 	struct request *rq = NULL;
-	idescsi_pc_t *pc = NULL;
+	struct ide_atapi_pc *pc = NULL;
 
 	if (!drive) {
 		scmd_printk (KERN_ERR, cmd, "drive not present\n");
 		goto abort;
 	}
 	scsi = drive_to_idescsi(drive);
-	pc = kmalloc (sizeof (idescsi_pc_t), GFP_ATOMIC);
-	rq = kmalloc (sizeof (struct request), GFP_ATOMIC);
+	pc = kmalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC);
+	rq = kmalloc(sizeof(struct request), GFP_ATOMIC);
 	if (rq == NULL || pc == NULL) {
 		printk (KERN_ERR "ide-scsi: %s: out of memory\n", drive->name);
 		goto abort;
@@ -791,11 +768,11 @@
 	pc->flags = 0;
 	pc->rq = rq;
 	memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
-	pc->buffer = NULL;
+	pc->buf = NULL;
 	pc->sg = scsi_sglist(cmd);
 	pc->sg_cnt = scsi_sg_count(cmd);
 	pc->b_count = 0;
-	pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd);
+	pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
 	pc->scsi_cmd = cmd;
 	pc->done = done;
 	pc->timeout = jiffies + cmd->timeout_per_command;
@@ -866,7 +843,7 @@
 		printk (KERN_ERR "ide-scsi: cmd aborted!\n");
 
 		if (blk_sense_request(scsi->pc->rq))
-			kfree(scsi->pc->buffer);
+			kfree(scsi->pc->buf);
 		kfree(scsi->pc->rq);
 		kfree(scsi->pc);
 		scsi->pc = NULL;
@@ -916,7 +893,7 @@
 	if (__blk_end_request(req, -EIO, 0))
 		BUG();
 	if (blk_sense_request(req))
-		kfree(scsi->pc->buffer);
+		kfree(scsi->pc->buf);
 	kfree(scsi->pc);
 	scsi->pc = NULL;
 	kfree(req);
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 0cc8868..dbae3fd 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2581,8 +2581,8 @@
 	/* Map the sense buffer into bus memory */
 	dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer,
 				  SENSE_SIZE, DMA_FROM_DEVICE);
-	cblk->senseptr = cpu_to_le32((u32)dma_addr);
-	cblk->senselen = cpu_to_le32(SENSE_SIZE);
+	cblk->senseptr = (u32)dma_addr;
+	cblk->senselen = SENSE_SIZE;
 	cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
 	cblk->cdblen = cmnd->cmd_len;
 
@@ -2606,7 +2606,7 @@
 		dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0],
 					  sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
 					  DMA_BIDIRECTIONAL);
-		cblk->bufptr = cpu_to_le32((u32)dma_addr);
+		cblk->bufptr = (u32)dma_addr;
 		cmnd->SCp.dma_handle = dma_addr;
 
 		cblk->sglen = nseg;
@@ -2616,7 +2616,8 @@
 		sg = &cblk->sglist[0];
 		scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) {
 			sg->data = cpu_to_le32((u32)sg_dma_address(sglist));
-			total_len += sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
+			sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
+			total_len += sg_dma_len(sglist);
 			++sg;
 		}
 
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index c72014a..65dc18d 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3937,7 +3937,7 @@
 	if (ipr_is_gata(res) && res->sata_port) {
 		ap = res->sata_port->ap;
 		spin_unlock_irq(scsi_cmd->device->host->host_lock);
-		ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
+		ata_std_error_handler(ap);
 		spin_lock_irq(scsi_cmd->device->host->host_lock);
 
 		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
@@ -5041,33 +5041,6 @@
 }
 
 /**
- * ipr_tf_read - Read the current ATA taskfile for the ATA port
- * @ap:	ATA port
- * @tf:	destination ATA taskfile
- *
- * Return value:
- * 	none
- **/
-static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
-{
-	struct ipr_sata_port *sata_port = ap->private_data;
-	struct ipr_ioasa_gata *g = &sata_port->ioasa;
-
-	tf->feature = g->error;
-	tf->nsect = g->nsect;
-	tf->lbal = g->lbal;
-	tf->lbam = g->lbam;
-	tf->lbah = g->lbah;
-	tf->device = g->device;
-	tf->command = g->status;
-	tf->hob_nsect = g->hob_nsect;
-	tf->hob_lbal = g->hob_lbal;
-	tf->hob_lbam = g->hob_lbam;
-	tf->hob_lbah = g->hob_lbah;
-	tf->ctl = g->alt_status;
-}
-
-/**
  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
  * @regs:	destination
  * @tf:	source ATA taskfile
@@ -5245,40 +5218,41 @@
 }
 
 /**
- * ipr_ata_check_status - Return last ATA status
- * @ap:	ATA port
+ * ipr_qc_fill_rtf - Read result TF
+ * @qc: ATA queued command
  *
  * Return value:
- * 	ATA status
+ * 	true
  **/
-static u8 ipr_ata_check_status(struct ata_port *ap)
+static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
-	struct ipr_sata_port *sata_port = ap->private_data;
-	return sata_port->ioasa.status;
-}
+	struct ipr_sata_port *sata_port = qc->ap->private_data;
+	struct ipr_ioasa_gata *g = &sata_port->ioasa;
+	struct ata_taskfile *tf = &qc->result_tf;
 
-/**
- * ipr_ata_check_altstatus - Return last ATA altstatus
- * @ap:	ATA port
- *
- * Return value:
- * 	Alt ATA status
- **/
-static u8 ipr_ata_check_altstatus(struct ata_port *ap)
-{
-	struct ipr_sata_port *sata_port = ap->private_data;
-	return sata_port->ioasa.alt_status;
+	tf->feature = g->error;
+	tf->nsect = g->nsect;
+	tf->lbal = g->lbal;
+	tf->lbam = g->lbam;
+	tf->lbah = g->lbah;
+	tf->device = g->device;
+	tf->command = g->status;
+	tf->hob_nsect = g->hob_nsect;
+	tf->hob_lbal = g->hob_lbal;
+	tf->hob_lbam = g->hob_lbam;
+	tf->hob_lbah = g->hob_lbah;
+	tf->ctl = g->alt_status;
+
+	return true;
 }
 
 static struct ata_port_operations ipr_sata_ops = {
-	.check_status = ipr_ata_check_status,
-	.check_altstatus = ipr_ata_check_altstatus,
-	.dev_select = ata_noop_dev_select,
 	.phy_reset = ipr_ata_phy_reset,
+	.hardreset = ipr_sata_reset,
 	.post_internal_cmd = ipr_ata_post_internal,
-	.tf_read = ipr_tf_read,
 	.qc_prep = ata_noop_qc_prep,
 	.qc_issue = ipr_qc_issue,
+	.qc_fill_rtf = ipr_qc_fill_rtf,
 	.port_start = ata_sas_port_start,
 	.port_stop = ata_sas_port_stop
 };
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 7ed568f..7c615c7 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -2377,7 +2377,7 @@
 			if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
 				return;
 
-			outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP);
+			outl(1, ha->io_addr + IPS_REG_FLAP);
 			if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
 				udelay(25);	/* 25 us */
 
@@ -2385,21 +2385,21 @@
 				return;
 
 			/* Get Major version */
-			outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP);
+			outl(0x1FF, ha->io_addr + IPS_REG_FLAP);
 			if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
 				udelay(25);	/* 25 us */
 
 			major = inb(ha->io_addr + IPS_REG_FLDP);
 
 			/* Get Minor version */
-			outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP);
+			outl(0x1FE, ha->io_addr + IPS_REG_FLAP);
 			if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
 				udelay(25);	/* 25 us */
 
 			minor = inb(ha->io_addr + IPS_REG_FLDP);
 
 			/* Get SubMinor version */
-			outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP);
+			outl(0x1FD, ha->io_addr + IPS_REG_FLAP);
 			if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
 				udelay(25);	/* 25 us */
 
@@ -3502,27 +3502,11 @@
 static void
 ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
 {
-        int i;
-        unsigned int min_cnt, xfer_cnt;
-        char *cdata = (char *) data;
-        unsigned char *buffer;
-        unsigned long flags;
-        struct scatterlist *sg = scsi_sglist(scmd);
+	unsigned long flags;
 
-        for (i = 0, xfer_cnt = 0;
-             (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
-                min_cnt = min(count - xfer_cnt, sg[i].length);
-
-                /* kmap_atomic() ensures addressability of the data buffer.*/
-                /* local_irq_save() protects the KM_IRQ0 address slot.     */
-                local_irq_save(flags);
-                buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
-                memcpy(buffer, &cdata[xfer_cnt], min_cnt);
-                kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
-                local_irq_restore(flags);
-
-                xfer_cnt += min_cnt;
-        }
+	local_irq_save(flags);
+	scsi_sg_copy_from_buffer(scmd, data, count);
+	local_irq_restore(flags);
 }
 
 /****************************************************************************/
@@ -3535,27 +3519,11 @@
 static void
 ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
 {
-        int i;
-        unsigned int min_cnt, xfer_cnt;
-        char *cdata = (char *) data;
-        unsigned char *buffer;
-        unsigned long flags;
-        struct scatterlist *sg = scsi_sglist(scmd);
+	unsigned long flags;
 
-        for (i = 0, xfer_cnt = 0;
-             (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
-                min_cnt = min(count - xfer_cnt, sg[i].length);
-
-                /* kmap_atomic() ensures addressability of the data buffer.*/
-                /* local_irq_save() protects the KM_IRQ0 address slot.     */
-                local_irq_save(flags);
-                buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
-                memcpy(&cdata[xfer_cnt], buffer, min_cnt);
-                kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
-                local_irq_restore(flags);
-
-                xfer_cnt += min_cnt;
-        }
+	local_irq_save(flags);
+	scsi_sg_copy_to_buffer(scmd, data, count);
+	local_irq_restore(flags);
 }
 
 /****************************************************************************/
@@ -3696,9 +3664,7 @@
 			scb->cmd.basic_io.sg_count = scb->sg_len;
 
 			if (scb->cmd.basic_io.lba)
-				scb->cmd.basic_io.lba =
-				    cpu_to_le32(le32_to_cpu
-						(scb->cmd.basic_io.lba) +
+				le32_add_cpu(&scb->cmd.basic_io.lba,
 						le16_to_cpu(scb->cmd.basic_io.
 							    sector_count));
 			else
@@ -3744,9 +3710,7 @@
 			scb->cmd.basic_io.sg_count = scb->sg_len;
 
 			if (scb->cmd.basic_io.lba)
-				scb->cmd.basic_io.lba =
-				    cpu_to_le32(le32_to_cpu
-						(scb->cmd.basic_io.lba) +
+				le32_add_cpu(&scb->cmd.basic_io.lba,
 						le16_to_cpu(scb->cmd.basic_io.
 							    sector_count));
 			else
@@ -4888,7 +4852,7 @@
 		return (0);
 
 	/* setup CCCR */
-	outl(cpu_to_le32(0x1010), ha->io_addr + IPS_REG_CCCR);
+	outl(0x1010, ha->io_addr + IPS_REG_CCCR);
 
 	/* Enable busmastering */
 	outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
@@ -5270,12 +5234,12 @@
 	ha->adapt->p_status_tail = ha->adapt->status;
 
 	phys_status_start = ha->adapt->hw_status_start;
-	outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQSR);
-	outl(cpu_to_le32(phys_status_start + IPS_STATUS_Q_SIZE),
+	outl(phys_status_start, ha->io_addr + IPS_REG_SQSR);
+	outl(phys_status_start + IPS_STATUS_Q_SIZE,
 	     ha->io_addr + IPS_REG_SQER);
-	outl(cpu_to_le32(phys_status_start + IPS_STATUS_SIZE),
+	outl(phys_status_start + IPS_STATUS_SIZE,
 	     ha->io_addr + IPS_REG_SQHR);
-	outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQTR);
+	outl(phys_status_start, ha->io_addr + IPS_REG_SQTR);
 
 	ha->adapt->hw_status_tail = phys_status_start;
 }
@@ -5332,7 +5296,7 @@
 		ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
 	}
 
-	outl(cpu_to_le32(ha->adapt->hw_status_tail),
+	outl(ha->adapt->hw_status_tail,
 	     ha->io_addr + IPS_REG_SQTR);
 
 	return (ha->adapt->p_status_tail->value);
@@ -5434,8 +5398,8 @@
 		}		/* end if */
 	}			/* end while */
 
-	outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_CCSAR);
-	outw(cpu_to_le32(IPS_BIT_START_CMD), ha->io_addr + IPS_REG_CCCR);
+	outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR);
+	outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR);
 
 	return (IPS_SUCCESS);
 }
@@ -5520,7 +5484,7 @@
 			  ips_name, ha->host_num, scb->cmd.basic_io.command_id);
 	}
 
-	outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_I2O_INMSGQ);
+	outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ);
 
 	return (IPS_SUCCESS);
 }
@@ -6412,7 +6376,7 @@
 
 	for (i = 0; i < buffersize; i++) {
 		/* write a byte */
-		outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP);
+		outl(i + offset, ha->io_addr + IPS_REG_FLAP);
 		if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
 			udelay(25);	/* 25 us */
 
@@ -6597,7 +6561,7 @@
 	if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
 		return (1);
 
-	outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP);
+	outl(1, ha->io_addr + IPS_REG_FLAP);
 	if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
 		udelay(25);	/* 25 us */
 	if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
@@ -6606,7 +6570,7 @@
 	checksum = 0xff;
 	for (i = 2; i < buffersize; i++) {
 
-		outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP);
+		outl(i + offset, ha->io_addr + IPS_REG_FLAP);
 		if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
 			udelay(25);	/* 25 us */
 
@@ -6842,7 +6806,6 @@
 	sh->sg_tablesize = sh->hostt->sg_tablesize;
 	sh->can_queue = sh->hostt->can_queue;
 	sh->cmd_per_lun = sh->hostt->cmd_per_lun;
-	sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma;
 	sh->use_clustering = sh->hostt->use_clustering;
 	sh->max_sectors = 128;
 
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 8a178674..72b9b2a 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -528,6 +528,7 @@
 	struct iscsi_session *session = conn->session;
 	struct scsi_cmnd *sc = ctask->sc;
 	int datasn = be32_to_cpu(rhdr->datasn);
+	unsigned total_in_length = scsi_in(sc)->length;
 
 	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
 	if (tcp_conn->in.datalen == 0)
@@ -542,10 +543,10 @@
 	tcp_ctask->exp_datasn++;
 
 	tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
-	if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+	if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) {
 		debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
 		          __FUNCTION__, tcp_ctask->data_offset,
-		          tcp_conn->in.datalen, scsi_bufflen(sc));
+		          tcp_conn->in.datalen, total_in_length);
 		return ISCSI_ERR_DATA_OFFSET;
 	}
 
@@ -558,8 +559,8 @@
 
 			if (res_count > 0 &&
 			    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
-			     res_count <= scsi_bufflen(sc)))
-				scsi_set_resid(sc, res_count);
+			     res_count <= total_in_length))
+				scsi_in(sc)->resid = res_count;
 			else
 				sc->result = (DID_BAD_TARGET << 16) |
 					rhdr->cmd_status;
@@ -670,11 +671,11 @@
 			    r2t->data_length, session->max_burst);
 
 	r2t->data_offset = be32_to_cpu(rhdr->data_offset);
-	if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
+	if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) {
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "invalid R2T with data len %u at offset %u "
 				  "and total length %d\n", r2t->data_length,
-				  r2t->data_offset, scsi_bufflen(ctask->sc));
+				  r2t->data_offset, scsi_out(ctask->sc)->length);
 		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
 			    sizeof(void*));
 		return ISCSI_ERR_DATALEN;
@@ -771,6 +772,7 @@
 		if (tcp_conn->in.datalen) {
 			struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
 			struct hash_desc *rx_hash = NULL;
+			struct scsi_data_buffer *sdb = scsi_in(ctask->sc);
 
 			/*
 			 * Setup copy of Data-In into the Scsi_Cmnd
@@ -788,8 +790,8 @@
 				  tcp_ctask->data_offset,
 				  tcp_conn->in.datalen);
 			return iscsi_segment_seek_sg(&tcp_conn->in.segment,
-						     scsi_sglist(ctask->sc),
-						     scsi_sg_count(ctask->sc),
+						     sdb->table.sgl,
+						     sdb->table.nents,
 						     tcp_ctask->data_offset,
 						     tcp_conn->in.datalen,
 						     iscsi_tcp_process_data_in,
@@ -1332,7 +1334,8 @@
 		return 0;
 
 	/* If we have immediate data, attach a payload */
-	err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
+	err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+				       scsi_out(sc)->table.nents,
 				       0, ctask->imm_count);
 	if (err)
 		return err;
@@ -1386,6 +1389,7 @@
 {
 	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
 	struct scsi_cmnd *sc = ctask->sc;
+	struct scsi_data_buffer *sdb = scsi_out(sc);
 	int rc = 0;
 
 flush:
@@ -1412,9 +1416,8 @@
 				ctask->itt, tcp_ctask->sent, ctask->data_count);
 
 		iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
-		rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
-					      scsi_sg_count(sc),
-					      tcp_ctask->sent,
+		rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+					      sdb->table.nents, tcp_ctask->sent,
 					      ctask->data_count);
 		if (rc)
 			goto fail;
@@ -1460,8 +1463,8 @@
 		iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
 					sizeof(struct iscsi_hdr));
 
-		rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
-					      scsi_sg_count(sc),
+		rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+					      sdb->table.nents,
 					      r2t->data_offset + r2t->sent,
 					      r2t->data_count);
 		if (rc)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index bdd7de7..010c1b9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -137,6 +137,70 @@
 	return 0;
 }
 
+/*
+ * make an extended cdb AHS
+ */
+static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
+{
+	struct scsi_cmnd *cmd = ctask->sc;
+	unsigned rlen, pad_len;
+	unsigned short ahslength;
+	struct iscsi_ecdb_ahdr *ecdb_ahdr;
+	int rc;
+
+	ecdb_ahdr = iscsi_next_hdr(ctask);
+	rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+
+	BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+	ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+
+	pad_len = iscsi_padding(rlen);
+
+	rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) +
+	                   sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+	if (rc)
+		return rc;
+
+	if (pad_len)
+		memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+
+	ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+	ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+	ecdb_ahdr->reserved = 0;
+	memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+
+	debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+		   "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+		   cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len);
+
+	return 0;
+}
+
+static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
+{
+	struct scsi_cmnd *sc = ctask->sc;
+	struct iscsi_rlength_ahdr *rlen_ahdr;
+	int rc;
+
+	rlen_ahdr = iscsi_next_hdr(ctask);
+	rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr));
+	if (rc)
+		return rc;
+
+	rlen_ahdr->ahslength =
+		cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+						  sizeof(rlen_ahdr->reserved));
+	rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+	rlen_ahdr->reserved = 0;
+	rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+
+	debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+		   "rlen_ahdr->ahslength(%d)\n",
+		   be32_to_cpu(rlen_ahdr->read_length),
+		   be16_to_cpu(rlen_ahdr->ahslength));
+	return 0;
+}
+
 /**
  * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
  * @ctask: iscsi cmd task
@@ -150,7 +214,7 @@
 	struct iscsi_session *session = conn->session;
 	struct iscsi_cmd *hdr = ctask->hdr;
 	struct scsi_cmnd *sc = ctask->sc;
-	unsigned hdrlength;
+	unsigned hdrlength, cmd_len;
 	int rc;
 
 	ctask->hdr_len = 0;
@@ -161,17 +225,30 @@
 	hdr->flags = ISCSI_ATTR_SIMPLE;
 	int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
 	hdr->itt = build_itt(ctask->itt, session->age);
-	hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
 	hdr->cmdsn = cpu_to_be32(session->cmdsn);
 	session->cmdsn++;
 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
-	memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
-	if (sc->cmd_len < MAX_COMMAND_SIZE)
-		memset(&hdr->cdb[sc->cmd_len], 0,
-			MAX_COMMAND_SIZE - sc->cmd_len);
+	cmd_len = sc->cmd_len;
+	if (cmd_len < ISCSI_CDB_SIZE)
+		memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+	else if (cmd_len > ISCSI_CDB_SIZE) {
+		rc = iscsi_prep_ecdb_ahs(ctask);
+		if (rc)
+			return rc;
+		cmd_len = ISCSI_CDB_SIZE;
+	}
+	memcpy(hdr->cdb, sc->cmnd, cmd_len);
 
 	ctask->imm_count = 0;
+	if (scsi_bidi_cmnd(sc)) {
+		hdr->flags |= ISCSI_FLAG_CMD_READ;
+		rc = iscsi_prep_bidi_ahs(ctask);
+		if (rc)
+			return rc;
+	}
 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
+		unsigned out_len = scsi_out(sc)->length;
+		hdr->data_length = cpu_to_be32(out_len);
 		hdr->flags |= ISCSI_FLAG_CMD_WRITE;
 		/*
 		 * Write counters:
@@ -192,19 +269,19 @@
 		ctask->unsol_datasn = 0;
 
 		if (session->imm_data_en) {
-			if (scsi_bufflen(sc) >= session->first_burst)
+			if (out_len >= session->first_burst)
 				ctask->imm_count = min(session->first_burst,
 							conn->max_xmit_dlength);
 			else
-				ctask->imm_count = min(scsi_bufflen(sc),
+				ctask->imm_count = min(out_len,
 							conn->max_xmit_dlength);
 			hton24(hdr->dlength, ctask->imm_count);
 		} else
 			zero_data(hdr->dlength);
 
 		if (!session->initial_r2t_en) {
-			ctask->unsol_count = min((session->first_burst),
-				(scsi_bufflen(sc))) - ctask->imm_count;
+			ctask->unsol_count = min(session->first_burst, out_len)
+							     - ctask->imm_count;
 			ctask->unsol_offset = ctask->imm_count;
 		}
 
@@ -214,6 +291,7 @@
 	} else {
 		hdr->flags |= ISCSI_FLAG_CMD_FINAL;
 		zero_data(hdr->dlength);
+		hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
 
 		if (sc->sc_data_direction == DMA_FROM_DEVICE)
 			hdr->flags |= ISCSI_FLAG_CMD_READ;
@@ -232,10 +310,12 @@
 		return EIO;
 
 	conn->scsicmd_pdus_cnt++;
-	debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
-		"cmdsn %d win %d]\n",
-		sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
-		conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
+	debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x "
+		"len %d bidi_len %d cmdsn %d win %d]\n",
+		scsi_bidi_cmnd(sc) ? "bidirectional" :
+		     sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
+		conn->id, sc, sc->cmnd[0], ctask->itt,
+		scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
 		session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
 	return 0;
 }
@@ -298,7 +378,12 @@
 		conn->session->tt->cleanup_cmd_task(conn, ctask);
 
 	sc->result = err;
-	scsi_set_resid(sc, scsi_bufflen(sc));
+	if (!scsi_bidi_cmnd(sc))
+		scsi_set_resid(sc, scsi_bufflen(sc));
+	else {
+		scsi_out(sc)->resid = scsi_out(sc)->length;
+		scsi_in(sc)->resid = scsi_in(sc)->length;
+	}
 	if (conn->ctask == ctask)
 		conn->ctask = NULL;
 	/* release ref from queuecommand */
@@ -433,6 +518,18 @@
 			   min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
 	}
 
+	if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+			   ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+		int res_count = be32_to_cpu(rhdr->bi_residual_count);
+
+		if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+				(rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+				 res_count <= scsi_in(sc)->length))
+			scsi_in(sc)->resid = res_count;
+		else
+			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+	}
+
 	if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
 	                   ISCSI_FLAG_CMD_OVERFLOW)) {
 		int res_count = be32_to_cpu(rhdr->residual_count);
@@ -440,13 +537,11 @@
 		if (res_count > 0 &&
 		    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
 		     res_count <= scsi_bufflen(sc)))
+			/* write side for bidi or uni-io set_resid */
 			scsi_set_resid(sc, res_count);
 		else
 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
-	} else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
-	                          ISCSI_FLAG_CMD_BIDI_OVERFLOW))
-		sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
-
+	}
 out:
 	debug_scsi("done [sc %lx res %d itt 0x%x]\n",
 		   (long)sc, sc->result, ctask->itt);
@@ -1102,7 +1197,12 @@
 fault:
 	spin_unlock(&session->lock);
 	debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
-	scsi_set_resid(sc, scsi_bufflen(sc));
+	if (!scsi_bidi_cmnd(sc))
+		scsi_set_resid(sc, scsi_bufflen(sc));
+	else {
+		scsi_out(sc)->resid = scsi_out(sc)->length;
+		scsi_in(sc)->resid = scsi_in(sc)->length;
+	}
 	sc->scsi_done(sc);
 	spin_lock(host->host_lock);
 	return 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index b0e5ac3..744f06d 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -225,10 +225,12 @@
 	return 0;
 }
 
-static u8 sas_ata_check_status(struct ata_port *ap)
+static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
-	struct domain_device *dev = ap->private_data;
-	return dev->sata_dev.tf.command;
+	struct domain_device *dev = qc->ap->private_data;
+
+	memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
+	return true;
 }
 
 static void sas_ata_phy_reset(struct ata_port *ap)
@@ -292,12 +294,6 @@
 	}
 }
 
-static void sas_ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
-{
-	struct domain_device *dev = ap->private_data;
-	memcpy(tf, &dev->sata_dev.tf, sizeof (*tf));
-}
-
 static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
 			      u32 val)
 {
@@ -348,14 +344,11 @@
 }
 
 static struct ata_port_operations sas_sata_ops = {
-	.check_status		= sas_ata_check_status,
-	.check_altstatus	= sas_ata_check_status,
-	.dev_select		= ata_noop_dev_select,
 	.phy_reset		= sas_ata_phy_reset,
 	.post_internal_cmd	= sas_ata_post_internal,
-	.tf_read		= sas_ata_tf_read,
 	.qc_prep		= ata_noop_qc_prep,
 	.qc_issue		= sas_ata_qc_issue,
+	.qc_fill_rtf		= sas_ata_qc_fill_rtf,
 	.port_start		= ata_sas_port_start,
 	.port_stop		= ata_sas_port_stop,
 	.scr_read		= sas_ata_scr_read,
@@ -698,7 +691,7 @@
 		/* incomplete response */
 		SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
 			    "dev %llx\n", SAS_ADDR(dev->sas_addr));
-		if (!le16_to_cpu(identify_x[83] & (1<<6)))
+		if (!(identify_x[83] & cpu_to_le16(1<<6)))
 			goto cont1;
 		res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
 					ATA_FEATURE_PUP_STBY_SPIN_UP,
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 1f82415..601ec5b 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -24,6 +24,8 @@
  */
 
 #include <linux/kthread.h>
+#include <linux/firmware.h>
+#include <linux/ctype.h>
 
 #include "sas_internal.h"
 
@@ -1064,6 +1066,45 @@
 	return;
 }
 
+static void sas_parse_addr(u8 *sas_addr, const char *p)
+{
+	int i;
+	for (i = 0; i < SAS_ADDR_SIZE; i++) {
+		u8 h, l;
+		if (!*p)
+			break;
+		h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
+		p++;
+		l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
+		p++;
+		sas_addr[i] = (h<<4) | l;
+	}
+}
+
+#define SAS_STRING_ADDR_SIZE	16
+
+int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
+{
+	int res;
+	const struct firmware *fw;
+
+	res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
+	if (res)
+		return res;
+
+	if (fw->size < SAS_STRING_ADDR_SIZE) {
+		res = -ENODEV;
+		goto out;
+	}
+
+	sas_parse_addr(addr, fw->data);
+
+out:
+	release_firmware(fw);
+	return res;
+}
+EXPORT_SYMBOL_GPL(sas_request_addr);
+
 EXPORT_SYMBOL_GPL(sas_queuecommand);
 EXPORT_SYMBOL_GPL(sas_target_alloc);
 EXPORT_SYMBOL_GPL(sas_slave_configure);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2ab2d24..ec0b0f6 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -23,7 +23,7 @@
 
 struct lpfc_sli2_slim;
 
-#define LPFC_MAX_TARGET		256	/* max number of targets supported */
+#define LPFC_MAX_TARGET		4096	/* max number of targets supported */
 #define LPFC_MAX_DISC_THREADS	64	/* max outstanding discovery els
 					   requests */
 #define LPFC_MAX_NS_RETRY	3	/* Number of retry attempts to contact
@@ -268,7 +268,6 @@
 #define FC_NLP_MORE             0x40	 /* More node to process in node tbl */
 #define FC_OFFLINE_MODE         0x80	 /* Interface is offline for diag */
 #define FC_FABRIC               0x100	 /* We are fabric attached */
-#define FC_ESTABLISH_LINK       0x200	 /* Reestablish Link */
 #define FC_RSCN_DISCOVERY       0x400	 /* Auth all devices after RSCN */
 #define FC_SCSI_SCAN_TMO        0x4000	 /* scsi scan timer running */
 #define FC_ABORT_DISCOVERY      0x8000	 /* we want to abort discovery */
@@ -433,8 +432,6 @@
 
 	uint32_t fc_eventTag;	/* event tag for link attention */
 
-
-	struct timer_list fc_estabtmo;	/* link establishment timer */
 	/* These fields used to be binfo */
 	uint32_t fc_pref_DID;	/* preferred D_ID */
 	uint8_t  fc_pref_ALPA;	/* preferred AL_PA */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b12a841..74c9fc2 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1954,7 +1954,9 @@
 			(phba->sysfs_mbox.mbox->mb.mbxCommand !=
 				MBX_DUMP_MEMORY &&
 			 phba->sysfs_mbox.mbox->mb.mbxCommand !=
-				MBX_RESTART)) {
+				MBX_RESTART &&
+			 phba->sysfs_mbox.mbox->mb.mbxCommand !=
+				MBX_WRITE_VPARMS)) {
 			sysfs_mbox_idle(phba);
 			spin_unlock_irq(&phba->hbalock);
 			return -EPERM;
@@ -1962,7 +1964,11 @@
 
 		phba->sysfs_mbox.mbox->vport = vport;
 
-		if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+		/* Don't allow mailbox commands to be sent when blocked
+		 * or when in the middle of discovery
+		 */
+		if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO ||
+		    vport->fc_flag & FC_NDISC_ACTIVE) {
 			sysfs_mbox_idle(phba);
 			spin_unlock_irq(&phba->hbalock);
 			return  -EAGAIN;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 3d0ccd9..153afae 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -63,7 +63,7 @@
 {
 	if (!mp) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-				"0146 Ignoring unsolicted CT No HBQ "
+				"0146 Ignoring unsolicited CT No HBQ "
 				"status = x%x\n",
 				piocbq->iocb.ulpStatus);
 	}
@@ -438,7 +438,7 @@
 				    (!(vport->ct_flags & FC_CT_RFF_ID)) ||
 				    (!vport->cfg_restrict_login)) {
 					ndlp = lpfc_setup_disc_node(vport, Did);
-					if (ndlp) {
+					if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 						lpfc_debugfs_disc_trc(vport,
 						LPFC_DISC_TRC_CT,
 						"Parse GID_FTrsp: "
@@ -543,7 +543,7 @@
 	struct lpfc_dmabuf *outp;
 	struct lpfc_sli_ct_request *CTrsp;
 	struct lpfc_nodelist *ndlp;
-	int rc, retry;
+	int rc;
 
 	/* First save ndlp, before we overwrite it */
 	ndlp = cmdiocb->context_un.ndlp;
@@ -563,45 +563,29 @@
 	if (vport->load_flag & FC_UNLOADING)
 		goto out;
 
-	if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) {
+	if (lpfc_els_chk_latt(vport)) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 				 "0216 Link event during NS query\n");
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 		goto out;
 	}
-
+	if (lpfc_error_lost_link(irsp)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0226 NS query failed due to link event\n");
+		goto out;
+	}
 	if (irsp->ulpStatus) {
 		/* Check for retry */
 		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
-			retry = 1;
-			if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-				switch (irsp->un.ulpWord[4]) {
-				case IOERR_NO_RESOURCES:
-					/* We don't increment the retry
-					 * count for this case.
-					 */
-					break;
-				case IOERR_LINK_DOWN:
-				case IOERR_SLI_ABORTED:
-				case IOERR_SLI_DOWN:
-					retry = 0;
-					break;
-				default:
-					vport->fc_ns_retry++;
-				}
-			}
-			else
+			if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
+			    irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)
 				vport->fc_ns_retry++;
 
-			if (retry) {
-				/* CT command is being retried */
-				rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+			/* CT command is being retried */
+			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
 					 vport->fc_ns_retry, 0);
-				if (rc == 0) {
-					/* success */
-					goto out;
-				}
-			}
+			if (rc == 0)
+				goto out;
 		}
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -780,7 +764,7 @@
 
 	/* This is a target port, unregistered port, or the GFF_ID failed */
 	ndlp = lpfc_setup_disc_node(vport, did);
-	if (ndlp) {
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 				 "0242 Process x%x GFF "
 				 "NameServer Rsp Data: x%x x%x x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 783d1ee..90272e6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -503,6 +503,8 @@
 				ndlp->nlp_sid);
 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
 			len +=  snprintf(buf+len, size-len, "FCP_INITIATOR ");
+		len += snprintf(buf+len, size-len, "usgmap:%x ",
+			ndlp->nlp_usg_map);
 		len += snprintf(buf+len, size-len, "refcnt:%x",
 			atomic_read(&ndlp->kref.refcount));
 		len +=  snprintf(buf+len, size-len, "\n");
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index cbb68a9..886c5f1 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -719,9 +719,9 @@
 		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
 		    icmd->un.elsreq64.bdl.ulpIoTag32) {
 			ndlp = (struct lpfc_nodelist *)(iocb->context1);
-			if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+			    (ndlp->nlp_DID == Fabric_DID))
 				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
-			}
 		}
 	}
 	spin_unlock_irq(&phba->hbalock);
@@ -829,7 +829,7 @@
 	struct fc_rport *rport;
 	struct serv_parm *sp;
 	uint8_t  name[sizeof(struct lpfc_name)];
-	uint32_t rc;
+	uint32_t rc, keepDID = 0;
 
 	/* Fabric nodes can have the same WWPN so we don't bother searching
 	 * by WWPN.  Just return the ndlp that was given to us.
@@ -858,11 +858,17 @@
 			return ndlp;
 		lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
 	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
+		rc = memcmp(&ndlp->nlp_portname, name,
+			    sizeof(struct lpfc_name));
+		if (!rc)
+			return ndlp;
 		new_ndlp = lpfc_enable_node(vport, new_ndlp,
 						NLP_STE_UNUSED_NODE);
 		if (!new_ndlp)
 			return ndlp;
-	}
+		keepDID = new_ndlp->nlp_DID;
+	} else
+		keepDID = new_ndlp->nlp_DID;
 
 	lpfc_unreg_rpi(vport, new_ndlp);
 	new_ndlp->nlp_DID = ndlp->nlp_DID;
@@ -893,12 +899,24 @@
 			}
 			new_ndlp->nlp_type = ndlp->nlp_type;
 		}
+		/* We shall actually free the ndlp with both nlp_DID and
+		 * nlp_portname fields equals 0 to avoid any ndlp on the
+		 * nodelist never to be used.
+		 */
+		if (ndlp->nlp_DID == 0) {
+			spin_lock_irq(&phba->ndlp_lock);
+			NLP_SET_FREE_REQ(ndlp);
+			spin_unlock_irq(&phba->ndlp_lock);
+		}
 
+		/* Two ndlps cannot have the same did on the nodelist */
+		ndlp->nlp_DID = keepDID;
 		lpfc_drop_node(vport, ndlp);
 	}
 	else {
 		lpfc_unreg_rpi(vport, ndlp);
-		ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
+		/* Two ndlps cannot have the same did */
+		ndlp->nlp_DID = keepDID;
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 	}
 	return new_ndlp;
@@ -2091,7 +2109,7 @@
 		}
 
 		phba->fc_stat.elsXmitRetry++;
-		if (ndlp && delay) {
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
 			phba->fc_stat.elsDelayRetry++;
 			ndlp->nlp_retry = cmdiocb->retry;
 
@@ -2121,7 +2139,7 @@
 			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
 			return 1;
 		case ELS_CMD_PLOGI:
-			if (ndlp) {
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 				ndlp->nlp_prev_state = ndlp->nlp_state;
 				lpfc_nlp_set_state(vport, ndlp,
 						   NLP_STE_PLOGI_ISSUE);
@@ -2302,7 +2320,7 @@
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
 	mempool_free(pmb, phba->mbox_mem_pool);
-	if (ndlp) {
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 		lpfc_nlp_put(ndlp);
 		/* This is the end of the default RPI cleanup logic for this
 		 * ndlp. If no other discovery threads are using this ndlp.
@@ -2335,7 +2353,8 @@
 	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
 	 */
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
-	if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+	    (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
 		/* A LS_RJT associated with Default RPI cleanup has its own
 		 * seperate code path.
 		 */
@@ -2344,7 +2363,7 @@
 	}
 
 	/* Check to see if link went down during discovery */
-	if (!ndlp || lpfc_els_chk_latt(vport)) {
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
 		if (mbox) {
 			mp = (struct lpfc_dmabuf *) mbox->context1;
 			if (mp) {
@@ -2353,7 +2372,8 @@
 			}
 			mempool_free(mbox, phba->mbox_mem_pool);
 		}
-		if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+		    (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
 			if (lpfc_nlp_not_used(ndlp)) {
 				ndlp = NULL;
 				/* Indicate the node has already released,
@@ -2443,7 +2463,7 @@
 		mempool_free(mbox, phba->mbox_mem_pool);
 	}
 out:
-	if (ndlp) {
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
 		spin_unlock_irq(shost->host_lock);
@@ -3139,6 +3159,8 @@
 		/* Another thread is walking fc_rscn_id_list on this vport */
 		spin_unlock_irq(shost->host_lock);
 		vport->fc_flag |= FC_RSCN_DISCOVERY;
+		/* Send back ACC */
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 		return 0;
 	}
 	/* Indicate we are walking fc_rscn_id_list on this vport */
@@ -3928,7 +3950,7 @@
 		else {
 			struct lpfc_nodelist *ndlp;
 			ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
-			if (ndlp)
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp))
 				remote_ID = ndlp->nlp_DID;
 		}
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -4097,21 +4119,22 @@
 		newnode = 1;
 		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
 			ndlp->nlp_type |= NLP_FABRIC;
-	} else {
-		if (!NLP_CHK_NODE_ACT(ndlp)) {
-			ndlp = lpfc_enable_node(vport, ndlp,
-						NLP_STE_UNUSED_NODE);
-			if (!ndlp)
-				goto dropit;
-		}
-		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
-			/* This is simular to the new node path */
-			ndlp = lpfc_nlp_get(ndlp);
-			if (!ndlp)
-				goto dropit;
-			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
-			newnode = 1;
-		}
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp,
+					NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			goto dropit;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		newnode = 1;
+		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+			ndlp->nlp_type |= NLP_FABRIC;
+	} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+		/* This is similar to the new node path */
+		ndlp = lpfc_nlp_get(ndlp);
+		if (!ndlp)
+			goto dropit;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		newnode = 1;
 	}
 
 	phba->fc_stat.elsRcvFrame++;
@@ -4451,7 +4474,6 @@
 			return;
 		}
 		lpfc_nlp_init(vport, ndlp, NameServer_DID);
-		ndlp->nlp_type |= NLP_FABRIC;
 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
 		if (!ndlp) {
@@ -4465,6 +4487,7 @@
 			return;
 		}
 	}
+	ndlp->nlp_type |= NLP_FABRIC;
 
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
 
@@ -4481,8 +4504,8 @@
 		if (ndlp_fdmi) {
 			lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
 			ndlp_fdmi->nlp_type |= NLP_FABRIC;
-			ndlp_fdmi->nlp_state =
-				NLP_STE_PLOGI_ISSUE;
+			lpfc_nlp_set_state(vport, ndlp_fdmi,
+				NLP_STE_PLOGI_ISSUE);
 			lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
 					     0);
 		}
@@ -5074,39 +5097,3 @@
 		(piocb->iocb_cmpl) (phba, piocb, piocb);
 	}
 }
-
-
-#if 0
-void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
-{
-	LIST_HEAD(completions);
-	struct lpfc_iocbq *tmp_iocb, *piocb;
-	IOCB_t *cmd;
-	struct lpfc_nodelist *ndlp;
-
-	spin_lock_irq(&phba->hbalock);
-	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
-				 list) {
-
-		cmd = &piocb->iocb;
-		ndlp = (struct lpfc_nodelist *) piocb->context1;
-		if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
-		    ndlp != NULL &&
-		    ndlp->nlp_DID == Fabric_DID)
-			list_move_tail(&piocb->list, &completions);
-	}
-	spin_unlock_irq(&phba->hbalock);
-
-	while (!list_empty(&completions)) {
-		piocb = list_get_first(&completions, struct lpfc_iocbq, list);
-		list_del_init(&piocb->list);
-
-		cmd = &piocb->iocb;
-		cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-		cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-		(piocb->iocb_cmpl) (phba, piocb, piocb);
-	}
-}
-#endif  /*  0  */
-
-
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9766534..7cb68fe 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -69,7 +69,7 @@
 	rdata = rport->dd_data;
 	ndlp = rdata->pnode;
 
-	if (!ndlp) {
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 		if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
 			printk(KERN_ERR "Cannot find remote node"
 			" to terminate I/O Data x%x\n",
@@ -114,7 +114,7 @@
 
 	rdata = rport->dd_data;
 	ndlp = rdata->pnode;
-	if (!ndlp)
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
 		return;
 
 	vport = ndlp->vport;
@@ -243,8 +243,8 @@
 	if (warn_on) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
 				 "0203 Devloss timeout on "
-				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
-				 "NPort x%x Data: x%x x%x x%x\n",
+				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+				 "NPort x%06x Data: x%x x%x x%x\n",
 				 *name, *(name+1), *(name+2), *(name+3),
 				 *(name+4), *(name+5), *(name+6), *(name+7),
 				 ndlp->nlp_DID, ndlp->nlp_flag,
@@ -252,8 +252,8 @@
 	} else {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 				 "0204 Devloss timeout on "
-				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
-				 "NPort x%x Data: x%x x%x x%x\n",
+				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+				 "NPort x%06x Data: x%x x%x x%x\n",
 				 *name, *(name+1), *(name+2), *(name+3),
 				 *(name+4), *(name+5), *(name+6), *(name+7),
 				 ndlp->nlp_DID, ndlp->nlp_flag,
@@ -399,7 +399,10 @@
 				vport = vports[i];
 			if (vport == NULL)
 				break;
+			spin_lock_irq(&vport->work_port_lock);
 			work_port_events = vport->work_port_events;
+			vport->work_port_events &= ~work_port_events;
+			spin_unlock_irq(&vport->work_port_lock);
 			if (work_port_events & WORKER_DISC_TMO)
 				lpfc_disc_timeout_handler(vport);
 			if (work_port_events & WORKER_ELS_TMO)
@@ -416,9 +419,6 @@
 				lpfc_ramp_down_queue_handler(phba);
 			if (work_port_events & WORKER_RAMP_UP_QUEUE)
 				lpfc_ramp_up_queue_handler(phba);
-			spin_lock_irq(&vport->work_port_lock);
-			vport->work_port_events &= ~work_port_events;
-			spin_unlock_irq(&vport->work_port_lock);
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
 
@@ -430,10 +430,10 @@
 		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
 		} else {
+			pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
 			lpfc_sli_handle_slow_ring_event(phba, pring,
 							(status &
 							 HA_RXMASK));
-			pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
 		}
 		/*
 		 * Turn on Ring interrupts
@@ -519,7 +519,9 @@
 			schedule();
 		}
 	}
+	spin_lock_irq(&phba->hbalock);
 	phba->work_wait = NULL;
+	spin_unlock_irq(&phba->hbalock);
 	return 0;
 }
 
@@ -809,11 +811,9 @@
 	mempool_free(pmb, phba->mbox_mem_pool);
 
 	spin_lock_irq(shost->host_lock);
-	vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
+	vport->fc_flag &= ~FC_ABORT_DISCOVERY;
 	spin_unlock_irq(shost->host_lock);
 
-	del_timer_sync(&phba->fc_estabtmo);
-
 	lpfc_can_disctmo(vport);
 
 	/* turn on Link Attention interrupts */
@@ -1340,10 +1340,14 @@
 			    i++) {
 				if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
 					continue;
+				if (phba->fc_topology == TOPOLOGY_LOOP) {
+					lpfc_vport_set_state(vports[i],
+							FC_VPORT_LINKDOWN);
+					continue;
+				}
 				if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
 					lpfc_initial_fdisc(vports[i]);
-				else if (phba->sli3_options &
-						LPFC_SLI3_NPIV_ENABLED) {
+				else {
 					lpfc_vport_set_state(vports[i],
 						FC_VPORT_NO_FABRIC_SUPP);
 					lpfc_printf_vlog(vport, KERN_ERR,
@@ -2190,10 +2194,6 @@
 	if (did == Bcast_DID)
 		return 0;
 
-	if (ndlp->nlp_DID == 0) {
-		return 0;
-	}
-
 	/* First check for Direct match */
 	if (ndlp->nlp_DID == did)
 		return 1;
@@ -2301,7 +2301,8 @@
 		return ndlp;
 	}
 
-	if (vport->fc_flag & FC_RSCN_MODE) {
+	if ((vport->fc_flag & FC_RSCN_MODE) &&
+	    !(vport->fc_flag & FC_NDISC_ACTIVE)) {
 		if (lpfc_rscn_payload_check(vport, did)) {
 			/* If we've already recieved a PLOGI from this NPort
 			 * we don't need to try to discover it again.
@@ -2947,24 +2948,6 @@
 	return NULL;
 }
 
-#if 0
-/*
- * Search node lists for a remote port matching filter criteria
- * Caller needs to hold host_lock before calling this routine.
- */
-struct lpfc_nodelist *
-lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
-{
-	struct Scsi_Host     *shost = lpfc_shost_from_vport(vport);
-	struct lpfc_nodelist *ndlp;
-
-	spin_lock_irq(shost->host_lock);
-	ndlp = __lpfc_find_node(vport, filter, param);
-	spin_unlock_irq(shost->host_lock);
-	return ndlp;
-}
-#endif  /*  0  */
-
 /*
  * This routine looks up the ndlp lists for the given RPI. If rpi found it
  * returns the node list element pointer else return NULL.
@@ -2975,20 +2958,6 @@
 	return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
 }
 
-#if 0
-struct lpfc_nodelist *
-lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
-{
-	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-	struct lpfc_nodelist *ndlp;
-
-	spin_lock_irq(shost->host_lock);
-	ndlp = __lpfc_findnode_rpi(vport, rpi);
-	spin_unlock_irq(shost->host_lock);
-	return ndlp;
-}
-#endif  /*  0  */
-
 /*
  * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
  * returns the node element list pointer else return NULL.
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2284375..fa757b2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -559,8 +559,10 @@
 		phba->pport->work_port_events |= WORKER_HB_TMO;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
 
+	spin_lock_irqsave(&phba->hbalock, iflag);
 	if (phba->work_wait)
 		wake_up(phba->work_wait);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
 	return;
 }
 
@@ -714,12 +716,10 @@
 	struct lpfc_vport *vport = phba->pport;
 	struct lpfc_sli   *psli = &phba->sli;
 	struct lpfc_sli_ring  *pring;
-	struct lpfc_vport **vports;
 	uint32_t event_data;
 	unsigned long temperature;
 	struct temp_event temp_event_data;
 	struct Scsi_Host  *shost;
-	int i;
 
 	/* If the pci channel is offline, ignore possible errors,
 	 * since we cannot communicate with the pci card anyway. */
@@ -729,25 +729,14 @@
 	if (!phba->cfg_enable_hba_reset)
 		return;
 
-	if (phba->work_hs & HS_FFER6 ||
-	    phba->work_hs & HS_FFER5) {
+	if (phba->work_hs & HS_FFER6) {
 		/* Re-establishing Link */
 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
 				"1301 Re-establishing Link "
 				"Data: x%x x%x x%x\n",
 				phba->work_hs,
 				phba->work_status[0], phba->work_status[1]);
-		vports = lpfc_create_vport_work_array(phba);
-		if (vports != NULL)
-			for(i = 0;
-			    i <= phba->max_vpi && vports[i] != NULL;
-			    i++){
-				shost = lpfc_shost_from_vport(vports[i]);
-				spin_lock_irq(shost->host_lock);
-				vports[i]->fc_flag |= FC_ESTABLISH_LINK;
-				spin_unlock_irq(shost->host_lock);
-			}
-		lpfc_destroy_vport_work_array(phba, vports);
+
 		spin_lock_irq(&phba->hbalock);
 		psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
 		spin_unlock_irq(&phba->hbalock);
@@ -761,7 +750,6 @@
 		pring = &psli->ring[psli->fcp_ring];
 		lpfc_sli_abort_iocb_ring(phba, pring);
 
-
 		/*
 		 * There was a firmware error.  Take the hba offline and then
 		 * attempt to restart it.
@@ -770,7 +758,6 @@
 		lpfc_offline(phba);
 		lpfc_sli_brdrestart(phba);
 		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
-			mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
 			lpfc_unblock_mgmt_io(phba);
 			return;
 		}
@@ -1454,6 +1441,13 @@
 			NLP_SET_FREE_REQ(ndlp);
 		spin_unlock_irq(&phba->ndlp_lock);
 
+		if (vport->port_type != LPFC_PHYSICAL_PORT &&
+		    ndlp->nlp_DID == Fabric_DID) {
+			/* Just free up ndlp with Fabric_DID for vports */
+			lpfc_nlp_put(ndlp);
+			continue;
+		}
+
 		if (ndlp->nlp_type & NLP_FABRIC)
 			lpfc_disc_state_machine(vport, ndlp, NULL,
 					NLP_EVT_DEVICE_RECOVERY);
@@ -1491,31 +1485,6 @@
 	return;
 }
 
-static void
-lpfc_establish_link_tmo(unsigned long ptr)
-{
-	struct lpfc_hba   *phba = (struct lpfc_hba *) ptr;
-	struct lpfc_vport **vports;
-	unsigned long iflag;
-	int i;
-
-	/* Re-establishing Link, timer expired */
-	lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-			"1300 Re-establishing Link, timer expired "
-			"Data: x%x x%x\n",
-			phba->pport->fc_flag, phba->pport->port_state);
-	vports = lpfc_create_vport_work_array(phba);
-	if (vports != NULL)
-		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
-			struct Scsi_Host *shost;
-			shost = lpfc_shost_from_vport(vports[i]);
-			spin_lock_irqsave(shost->host_lock, iflag);
-			vports[i]->fc_flag &= ~FC_ESTABLISH_LINK;
-			spin_unlock_irqrestore(shost->host_lock, iflag);
-		}
-	lpfc_destroy_vport_work_array(phba, vports);
-}
-
 void
 lpfc_stop_vport_timers(struct lpfc_vport *vport)
 {
@@ -1529,7 +1498,6 @@
 lpfc_stop_phba_timers(struct lpfc_hba *phba)
 {
 	del_timer_sync(&phba->fcp_poll_timer);
-	del_timer_sync(&phba->fc_estabtmo);
 	lpfc_stop_vport_timers(phba->pport);
 	del_timer_sync(&phba->sli.mbox_tmo);
 	del_timer_sync(&phba->fabric_block_timer);
@@ -2005,10 +1973,6 @@
 	phba->max_vpi = LPFC_MAX_VPI;
 
 	/* Initialize timers used by driver */
-	init_timer(&phba->fc_estabtmo);
-	phba->fc_estabtmo.function = lpfc_establish_link_tmo;
-	phba->fc_estabtmo.data = (unsigned long)phba;
-
 	init_timer(&phba->hb_tmofunc);
 	phba->hb_tmofunc.function = lpfc_hb_timeout;
 	phba->hb_tmofunc.data = (unsigned long)phba;
@@ -2406,6 +2370,7 @@
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 	struct lpfc_sli *psli = &phba->sli;
+	int error, retval;
 
 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
 	if (pci_enable_device_mem(pdev)) {
@@ -2416,15 +2381,40 @@
 
 	pci_set_master(pdev);
 
-	/* Re-establishing Link */
-	spin_lock_irq(shost->host_lock);
-	phba->pport->fc_flag |= FC_ESTABLISH_LINK;
-	spin_unlock_irq(shost->host_lock);
-
 	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
 	spin_unlock_irq(&phba->hbalock);
 
+	/* Enable configured interrupt method */
+	phba->intr_type = NONE;
+	if (phba->cfg_use_msi == 2) {
+		error = lpfc_enable_msix(phba);
+		if (!error)
+			phba->intr_type = MSIX;
+	}
+
+	/* Fallback to MSI if MSI-X initialization failed */
+	if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
+		retval = pci_enable_msi(phba->pcidev);
+		if (!retval)
+			phba->intr_type = MSI;
+		else
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"0470 Enable MSI failed, continuing "
+					"with IRQ\n");
+	}
+
+	/* MSI-X is the only case the doesn't need to call request_irq */
+	if (phba->intr_type != MSIX) {
+		retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+		if (retval) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0471 Enable interrupt handler "
+					"failed\n");
+		} else if (phba->intr_type != MSI)
+			phba->intr_type = INTx;
+	}
 
 	/* Take device offline; this will perform cleanup */
 	lpfc_offline(phba);
@@ -2445,9 +2435,7 @@
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	if (lpfc_online(phba) == 0) {
-		mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
-	}
+	lpfc_online(phba);
 }
 
 static struct pci_device_id lpfc_id_table[] = {
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d513813..d08c4c8 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -451,7 +451,7 @@
 			spin_unlock_irq(shost->host_lock);
 
 			if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
-				(vport->num_disc_nodes)) {
+			    (vport->num_disc_nodes)) {
 				/* Check to see if there are more
 				 * ADISCs to be sent
 				 */
@@ -469,20 +469,23 @@
 					lpfc_end_rscn(vport);
 				}
 			}
-			else if (vport->num_disc_nodes) {
-				/* Check to see if there are more
-				 * PLOGIs to be sent
-				 */
-				lpfc_more_plogi(vport);
-
-				if (vport->num_disc_nodes == 0) {
-					spin_lock_irq(shost->host_lock);
-					vport->fc_flag &= ~FC_NDISC_ACTIVE;
-					spin_unlock_irq(shost->host_lock);
-					lpfc_can_disctmo(vport);
-					lpfc_end_rscn(vport);
-				}
-			}
+		}
+	} else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+		   (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+		   (vport->num_disc_nodes)) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		/* Check to see if there are more
+		 * PLOGIs to be sent
+		 */
+		lpfc_more_plogi(vport);
+		if (vport->num_disc_nodes == 0) {
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag &= ~FC_NDISC_ACTIVE;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_can_disctmo(vport);
+			lpfc_end_rscn(vport);
 		}
 	}
 
@@ -869,8 +872,11 @@
 
 	lp = (uint32_t *) prsp->virt;
 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
-	if (wwn_to_u64(sp->portName.u.wwn) == 0 ||
-	    wwn_to_u64(sp->nodeName.u.wwn) == 0) {
+
+	/* Some switches have FDMI servers returning 0 for WWN */
+	if ((ndlp->nlp_DID != FDMI_DID) &&
+		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
+		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 				 "0142 PLOGI RSP: Invalid WWN.\n");
 		goto out;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 70255c1..0910a9a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -169,6 +169,9 @@
 		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
 			shost = lpfc_shost_from_vport(vports[i]);
 			shost_for_each_device(sdev, shost) {
+				if (vports[i]->cfg_lun_queue_depth <=
+				    sdev->queue_depth)
+					continue;
 				if (sdev->ordered_tags)
 					scsi_adjust_queue_depth(sdev,
 							MSG_ORDERED_TAG,
@@ -578,14 +581,14 @@
 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
 			    lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
 				cmd->result = ScsiResult(DID_REQUEUE, 0);
-			break;
-		} /* else: fall through */
+				break;
+			} /* else: fall through */
 		default:
 			cmd->result = ScsiResult(DID_ERROR, 0);
 			break;
 		}
 
-		if ((pnode == NULL )
+		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
 			cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
 	} else {
@@ -606,6 +609,9 @@
 	result = cmd->result;
 	sdev = cmd->device;
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+	spin_lock_irqsave(sdev->host->host_lock, flags);
+	lpfc_cmd->pCmd = NULL;	/* This must be done before scsi_done */
+	spin_unlock_irqrestore(sdev->host->host_lock, flags);
 	cmd->scsi_done(cmd);
 
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -614,7 +620,6 @@
 		 * wake up the thread.
 		 */
 		spin_lock_irqsave(sdev->host->host_lock, flags);
-		lpfc_cmd->pCmd = NULL;
 		if (lpfc_cmd->waitq)
 			wake_up(lpfc_cmd->waitq);
 		spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -626,7 +631,7 @@
 	if (!result)
 		lpfc_rampup_queue_depth(vport, sdev);
 
-	if (!result && pnode != NULL &&
+	if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
 	   ((jiffies - pnode->last_ramp_up_time) >
 		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
 	   ((jiffies - pnode->last_q_full_time) >
@@ -654,7 +659,8 @@
 	 * Check for queue full.  If the lun is reporting queue full, then
 	 * back off the lun queue depth to prevent target overloads.
 	 */
-	if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) {
+	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
+	    NLP_CHK_NODE_ACT(pnode)) {
 		pnode->last_q_full_time = jiffies;
 
 		shost_for_each_device(tmp_sdev, sdev->host) {
@@ -684,7 +690,6 @@
 	 * wake up the thread.
 	 */
 	spin_lock_irqsave(sdev->host->host_lock, flags);
-	lpfc_cmd->pCmd = NULL;
 	if (lpfc_cmd->waitq)
 		wake_up(lpfc_cmd->waitq);
 	spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -704,6 +709,9 @@
 	int datadir = scsi_cmnd->sc_data_direction;
 	char tag[2];
 
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+		return;
+
 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
 	/* clear task management bits */
 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
@@ -785,9 +793,9 @@
 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
 	struct lpfc_nodelist *ndlp = rdata->pnode;
 
-	if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
 		return 0;
-	}
 
 	piocbq = &(lpfc_cmd->cur_iocbq);
 	piocbq->vport = vport;
@@ -842,7 +850,7 @@
 	struct lpfc_iocbq *iocbqrsp;
 	int ret;
 
-	if (!rdata->pnode)
+	if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
 		return FAILED;
 
 	lpfc_cmd->rdata = rdata;
@@ -959,7 +967,7 @@
 	 * Catch race where our node has transitioned, but the
 	 * transport is still transitioning.
 	 */
-	if (!ndlp) {
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 		cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
 		goto out_fail_command;
 	}
@@ -1146,7 +1154,7 @@
 	 * target is rediscovered or devloss timeout expires.
 	 */
 	while (1) {
-		if (!pnode)
+		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
 			goto out;
 
 		if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
@@ -1162,7 +1170,7 @@
 				goto out;
 			}
 			pnode = rdata->pnode;
-			if (!pnode)
+			if (!pnode || !NLP_CHK_NODE_ACT(pnode))
 				goto out;
 		}
 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fc0d950..70a0a9e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2648,7 +2648,6 @@
 	spin_unlock_irq(&phba->pport->work_port_lock);
 	spin_lock_irq(&phba->hbalock);
 	phba->link_state = LPFC_LINK_UNKNOWN;
-	phba->pport->fc_flag |= FC_ESTABLISH_LINK;
 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
 	spin_unlock_irq(&phba->hbalock);
 
@@ -2669,8 +2668,7 @@
 	lpfc_offline_prep(phba);
 	lpfc_offline(phba);
 	lpfc_sli_brdrestart(phba);
-	if (lpfc_online(phba) == 0)		/* Initialize the HBA */
-		mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
+	lpfc_online(phba);
 	lpfc_unblock_mgmt_io(phba);
 	return;
 }
@@ -2687,28 +2685,41 @@
 	unsigned long drvr_flag = 0;
 	volatile uint32_t word0, ldata;
 	void __iomem *to_slim;
+	int processing_queue = 0;
+
+	spin_lock_irqsave(&phba->hbalock, drvr_flag);
+	if (!pmbox) {
+		/* processing mbox queue from intr_handler */
+		processing_queue = 1;
+		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		pmbox = lpfc_mbox_get(phba);
+		if (!pmbox) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			return MBX_SUCCESS;
+		}
+	}
 
 	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
 		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
 		if(!pmbox->vport) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 			lpfc_printf_log(phba, KERN_ERR,
 					LOG_MBOX | LOG_VPORT,
 					"1806 Mbox x%x failed. No vport\n",
 					pmbox->mb.mbxCommand);
 			dump_stack();
-			return MBX_NOT_FINISHED;
+			goto out_not_finished;
 		}
 	}
 
-
 	/* If the PCI channel is in offline state, do not post mbox. */
-	if (unlikely(pci_channel_offline(phba->pcidev)))
-		return MBX_NOT_FINISHED;
+	if (unlikely(pci_channel_offline(phba->pcidev))) {
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+		goto out_not_finished;
+	}
 
-	spin_lock_irqsave(&phba->hbalock, drvr_flag);
 	psli = &phba->sli;
 
-
 	mb = &pmbox->mb;
 	status = MBX_SUCCESS;
 
@@ -2717,14 +2728,14 @@
 
 		/* Mbox command <mbxCommand> cannot issue */
 		LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
-		return MBX_NOT_FINISHED;
+		goto out_not_finished;
 	}
 
 	if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
 	    !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 		LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
-		return MBX_NOT_FINISHED;
+		goto out_not_finished;
 	}
 
 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -2738,14 +2749,14 @@
 
 			/* Mbox command <mbxCommand> cannot issue */
 			LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
-			return MBX_NOT_FINISHED;
+			goto out_not_finished;
 		}
 
 		if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 			/* Mbox command <mbxCommand> cannot issue */
 			LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
-			return MBX_NOT_FINISHED;
+			goto out_not_finished;
 		}
 
 		/* Another mailbox command is still being processed, queue this
@@ -2792,7 +2803,7 @@
 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 			/* Mbox command <mbxCommand> cannot issue */
 			LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
-			return MBX_NOT_FINISHED;
+			goto out_not_finished;
 		}
 		/* timeout active mbox command */
 		mod_timer(&psli->mbox_tmo, (jiffies +
@@ -2900,7 +2911,7 @@
 				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
 				spin_unlock_irqrestore(&phba->hbalock,
 						       drvr_flag);
-				return MBX_NOT_FINISHED;
+				goto out_not_finished;
 			}
 
 			/* Check if we took a mbox interrupt while we were
@@ -2967,6 +2978,13 @@
 
 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 	return status;
+
+out_not_finished:
+	if (processing_queue) {
+		pmbox->mb.mbxStatus = MBX_NOT_FINISHED;
+		lpfc_mbox_cmpl_put(phba, pmbox);
+	}
+	return MBX_NOT_FINISHED;
 }
 
 /*
@@ -3463,26 +3481,21 @@
 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
 	spin_unlock(&phba->pport->work_port_lock);
 
+	/* Return any pending or completed mbox cmds */
+	list_splice_init(&phba->sli.mboxq, &completions);
 	if (psli->mbox_active) {
 		list_add_tail(&psli->mbox_active->list, &completions);
 		psli->mbox_active = NULL;
 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
 	}
-
-	/* Return any pending or completed mbox cmds */
-	list_splice_init(&phba->sli.mboxq, &completions);
 	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
-	INIT_LIST_HEAD(&psli->mboxq);
-	INIT_LIST_HEAD(&psli->mboxq_cmpl);
-
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
 	while (!list_empty(&completions)) {
 		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
 		pmb->mb.mbxStatus = MBX_NOT_FINISHED;
-		if (pmb->mbox_cmpl) {
+		if (pmb->mbox_cmpl)
 			pmb->mbox_cmpl(phba,pmb);
-		}
 	}
 	return 1;
 }
@@ -3613,6 +3626,15 @@
 				irsp->ulpStatus, irsp->un.ulpWord[4]);
 
 		/*
+		 *  If the iocb is not found in Firmware queue the iocb
+		 *  might have completed already. Do not free it again.
+		 */
+		if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_sli_release_iocbq(phba, cmdiocb);
+			return;
+		}
+		/*
 		 * make sure we have the right iocbq before taking it
 		 * off the txcmplq and try to call completion routine.
 		 */
@@ -4174,6 +4196,7 @@
 			phba->pport->stopped = 1;
 		}
 
+		spin_lock(&phba->hbalock);
 		if ((work_ha_copy & HA_MBATT) &&
 		    (phba->sli.mbox_active)) {
 			pmb = phba->sli.mbox_active;
@@ -4184,6 +4207,7 @@
 			/* First check out the status word */
 			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
 			if (pmbox->mbxOwner != OWN_HOST) {
+				spin_unlock(&phba->hbalock);
 				/*
 				 * Stray Mailbox Interrupt, mbxCommand <cmd>
 				 * mbxStatus <status>
@@ -4199,10 +4223,10 @@
 				/* clear mailbox attention bit */
 				work_ha_copy &= ~HA_MBATT;
 			} else {
+				phba->sli.mbox_active = NULL;
+				spin_unlock(&phba->hbalock);
 				phba->last_completion_time = jiffies;
 				del_timer(&phba->sli.mbox_tmo);
-
-				phba->sli.mbox_active = NULL;
 				if (pmb->mbox_cmpl) {
 					lpfc_sli_pcimem_bcopy(mbox, pmbox,
 							MAILBOX_CMD_SIZE);
@@ -4237,10 +4261,15 @@
 						pmb->context1 = mp;
 						pmb->context2 = ndlp;
 						pmb->vport = vport;
-						spin_lock(&phba->hbalock);
-						phba->sli.sli_flag &=
-							~LPFC_SLI_MBOX_ACTIVE;
-						spin_unlock(&phba->hbalock);
+						rc = lpfc_sli_issue_mbox(phba,
+								pmb,
+								MBX_NOWAIT);
+						if (rc != MBX_BUSY)
+							lpfc_printf_log(phba,
+							KERN_ERR,
+							LOG_MBOX | LOG_SLI,
+							"0306 rc should have"
+							"been MBX_BUSY");
 						goto send_current_mbox;
 					}
 				}
@@ -4250,25 +4279,20 @@
 				spin_unlock(&phba->pport->work_port_lock);
 				lpfc_mbox_cmpl_put(phba, pmb);
 			}
-		}
+		} else
+			spin_unlock(&phba->hbalock);
 		if ((work_ha_copy & HA_MBATT) &&
 		    (phba->sli.mbox_active == NULL)) {
-send_next_mbox:
-			spin_lock(&phba->hbalock);
-			phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-			pmb = lpfc_mbox_get(phba);
-			spin_unlock(&phba->hbalock);
 send_current_mbox:
 			/* Process next mailbox command if there is one */
-			if (pmb != NULL) {
-				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-				if (rc == MBX_NOT_FINISHED) {
-					pmb->mb.mbxStatus = MBX_NOT_FINISHED;
-					lpfc_mbox_cmpl_put(phba, pmb);
-					goto send_next_mbox;
-				}
-			}
-
+			do {
+				rc = lpfc_sli_issue_mbox(phba, NULL,
+							 MBX_NOWAIT);
+			} while (rc == MBX_NOT_FINISHED);
+			if (rc != MBX_SUCCESS)
+				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+						LOG_SLI, "0349 rc should be "
+						"MBX_SUCCESS");
 		}
 
 		spin_lock(&phba->hbalock);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ca540d1..b22b893 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.2.5"
+#define LPFC_DRIVER_VERSION "8.2.6"
 
 #define LPFC_DRIVER_NAME "lpfc"
 
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 86d05be..6feaf59 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -538,7 +538,8 @@
 	/* Otherwise, we will perform fabric logo as needed */
 	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
 	    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
-	    phba->link_state >= LPFC_LINK_UP) {
+	    phba->link_state >= LPFC_LINK_UP &&
+	    phba->fc_topology != TOPOLOGY_LOOP) {
 		if (vport->cfg_enable_da_id) {
 			timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
 			if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 3b09ab2..0248919 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -592,7 +592,6 @@
 	.this_id			= 7,
 	.sg_tablesize			= SG_ALL,
 	.cmd_per_lun			= CMD_PER_LUN,
-	.unchecked_isa_dma		= 0,
 	.use_clustering			= DISABLE_CLUSTERING
 };
 
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 77a62a1..b937e9c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -68,6 +68,8 @@
 	/* xscale IOP */
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
 	/* ppc IOP */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
+	/* ppc IOP */
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
 	/* xscale IOP, vega */
 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
@@ -488,12 +490,13 @@
 
  /**
  * megasas_get_frame_count - Computes the number of frames
+ * @frame_type		: type of frame- io or pthru frame
  * @sge_count		: number of sg elements
  *
  * Returns the number of frames required for numnber of sge's (sge_count)
  */
 
-static u32 megasas_get_frame_count(u8 sge_count)
+static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type)
 {
 	int num_cnt;
 	int sge_bytes;
@@ -504,13 +507,22 @@
 	    sizeof(struct megasas_sge32);
 
 	/*
-	* Main frame can contain 2 SGEs for 64-bit SGLs and
-	* 3 SGEs for 32-bit SGLs
-	*/
-	if (IS_DMA64)
-		num_cnt = sge_count - 2;
-	else
-		num_cnt = sge_count - 3;
+	 * Main frame can contain 2 SGEs for 64-bit SGLs and
+	 * 3 SGEs for 32-bit SGLs for ldio &
+	 * 1 SGEs for 64-bit SGLs and
+	 * 2 SGEs for 32-bit SGLs for pthru frame
+	 */
+	if (unlikely(frame_type == PTHRU_FRAME)) {
+		if (IS_DMA64)
+			num_cnt = sge_count - 1;
+		else
+			num_cnt = sge_count - 2;
+	} else {
+		if (IS_DMA64)
+			num_cnt = sge_count - 2;
+		else
+			num_cnt = sge_count - 3;
+	}
 
 	if(num_cnt>0){
 		sge_bytes = sge_sz * num_cnt;
@@ -592,7 +604,8 @@
 	 * Compute the total number of frames this command consumes. FW uses
 	 * this number to pull sufficient number of frames from host memory.
 	 */
-	cmd->frame_count = megasas_get_frame_count(pthru->sge_count);
+	cmd->frame_count = megasas_get_frame_count(pthru->sge_count,
+							PTHRU_FRAME);
 
 	return cmd->frame_count;
 }
@@ -709,7 +722,7 @@
 	 * Compute the total number of frames this command consumes. FW uses
 	 * this number to pull sufficient number of frames from host memory.
 	 */
-	cmd->frame_count = megasas_get_frame_count(ldio->sge_count);
+	cmd->frame_count = megasas_get_frame_count(ldio->sge_count, IO_FRAME);
 
 	return cmd->frame_count;
 }
@@ -1460,7 +1473,7 @@
 			instance->instancet->disable_intr(instance->reg_set);
 			writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell);
 
-			max_wait = 10;
+			max_wait = 60;
 			cur_state = MFI_STATE_OPERATIONAL;
 			break;
 
@@ -1980,7 +1993,8 @@
 
 	switch(instance->pdev->device)
 	{
-		case PCI_DEVICE_ID_LSI_SAS1078R:	
+		case PCI_DEVICE_ID_LSI_SAS1078R:
+		case PCI_DEVICE_ID_LSI_SAS1078DE:
 			instance->instancet = &megasas_instance_template_ppc;
 			break;
 		case PCI_DEVICE_ID_LSI_SAS1064R:
@@ -2909,7 +2923,6 @@
 	void *sense = NULL;
 	dma_addr_t sense_handle;
 	u32 *sense_ptr;
-	unsigned long *sense_buff;
 
 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
 
@@ -3014,14 +3027,14 @@
 	 */
 	if (ioc->sense_len) {
 		/*
-		 * sense_buff points to the location that has the user
+		 * sense_ptr points to the location that has the user
 		 * sense buffer address
 		 */
-		sense_buff = (unsigned long *) ((unsigned long)ioc->frame.raw +
-								ioc->sense_off);
+		sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
+				     ioc->sense_off);
 
-		if (copy_to_user((void __user *)(unsigned long)(*sense_buff),
-				sense, ioc->sense_len)) {
+		if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
+				 sense, ioc->sense_len)) {
 			printk(KERN_ERR "megasas: Failed to copy out to user "
 					"sense data\n");
 			error = -EFAULT;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 6466bdf..3a997eb 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -26,6 +26,7 @@
  * Device IDs
  */
 #define	PCI_DEVICE_ID_LSI_SAS1078R		0x0060
+#define	PCI_DEVICE_ID_LSI_SAS1078DE		0x007C
 #define	PCI_DEVICE_ID_LSI_VERDE_ZCR		0x0413
 
 /*
@@ -542,6 +543,10 @@
 
 #define MEGASAS_FW_BUSY				1
 
+/* Frame Type */
+#define IO_FRAME				0
+#define PTHRU_FRAME				1
+
 /*
  * When SCSI mid-layer calls driver's reset routine, driver waits for
  * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index be41aad..d722235 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -82,6 +82,9 @@
     mvme147_host->irq = MVME147_IRQ_SCSI_PORT;
     regs.SASR = (volatile unsigned char *)0xfffe4000;
     regs.SCMD = (volatile unsigned char *)0xfffe4001;
+    HDATA(mvme147_host)->no_sync = 0xff;
+    HDATA(mvme147_host)->fast = 0;
+    HDATA(mvme147_host)->dma_mode = CTRL_DMA;
     wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
 
     if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_intr))
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index fad6cb5..ce48e2d 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -26,6 +26,7 @@
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
 
 #include <asm/lv1call.h>
 #include <asm/ps3stor.h>
@@ -90,78 +91,6 @@
 	return 0;
 }
 
-/*
- * copy data from device into scatter/gather buffer
- */
-static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
-{
-	int k, req_len, act_len, len, active;
-	void *kaddr;
-	struct scatterlist *sgpnt;
-	unsigned int buflen;
-
-	buflen = scsi_bufflen(cmd);
-	if (!buflen)
-		return 0;
-
-	if (!scsi_sglist(cmd))
-		return -1;
-
-	active = 1;
-	req_len = act_len = 0;
-	scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
-		if (active) {
-			kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
-			len = sgpnt->length;
-			if ((req_len + len) > buflen) {
-				active = 0;
-				len = buflen - req_len;
-			}
-			memcpy(kaddr + sgpnt->offset, buf + req_len, len);
-			flush_kernel_dcache_page(sg_page(sgpnt));
-			kunmap_atomic(kaddr, KM_IRQ0);
-			act_len += len;
-		}
-		req_len += sgpnt->length;
-	}
-	scsi_set_resid(cmd, buflen - act_len);
-	return 0;
-}
-
-/*
- * copy data from scatter/gather into device's buffer
- */
-static int fetch_to_dev_buffer(struct scsi_cmnd *cmd, void *buf)
-{
-	int k, req_len, len, fin;
-	void *kaddr;
-	struct scatterlist *sgpnt;
-	unsigned int buflen;
-
-	buflen = scsi_bufflen(cmd);
-	if (!buflen)
-		return 0;
-
-	if (!scsi_sglist(cmd))
-		return -1;
-
-	req_len = fin = 0;
-	scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
-		kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
-		len = sgpnt->length;
-		if ((req_len + len) > buflen) {
-			len = buflen - req_len;
-			fin = 1;
-		}
-		memcpy(buf + req_len, kaddr + sgpnt->offset, len);
-		kunmap_atomic(kaddr, KM_IRQ0);
-		if (fin)
-			return req_len + len;
-		req_len += sgpnt->length;
-	}
-	return req_len;
-}
-
 static int ps3rom_atapi_request(struct ps3_storage_device *dev,
 				struct scsi_cmnd *cmd)
 {
@@ -195,9 +124,7 @@
 		else
 			atapi_cmnd.proto = PIO_DATA_OUT_PROTO;
 		atapi_cmnd.in_out = DIR_WRITE;
-		res = fetch_to_dev_buffer(cmd, dev->bounce_buf);
-		if (res < 0)
-			return DID_ERROR << 16;
+		scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size);
 		break;
 
 	default:
@@ -269,9 +196,7 @@
 	dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n",
 		__func__, __LINE__, sectors, start_sector);
 
-	res = fetch_to_dev_buffer(cmd, dev->bounce_buf);
-	if (res < 0)
-		return DID_ERROR << 16;
+	scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size);
 
 	res = lv1_storage_write(dev->sbd.dev_id,
 				dev->regions[dev->region_idx].id, start_sector,
@@ -381,11 +306,13 @@
 	if (!status) {
 		/* OK, completed */
 		if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-			res = fill_from_dev_buffer(cmd, dev->bounce_buf);
-			if (res) {
-				cmd->result = DID_ERROR << 16;
-				goto done;
-			}
+			int len;
+
+			len = scsi_sg_copy_from_buffer(cmd,
+						       dev->bounce_buf,
+						       dev->bounce_size);
+
+			scsi_set_resid(cmd, scsi_bufflen(cmd) - len);
 		}
 		cmd->result = DID_OK << 16;
 		goto done;
@@ -404,11 +331,7 @@
 		goto done;
 	}
 
-	cmd->sense_buffer[0]  = 0x70;
-	cmd->sense_buffer[2]  = sense_key;
-	cmd->sense_buffer[7]  = 16 - 6;
-	cmd->sense_buffer[12] = asc;
-	cmd->sense_buffer[13] = ascq;
+	scsi_build_sense_buffer(0, cmd->sense_buffer, sense_key, asc, ascq);
 	cmd->result = SAM_STAT_CHECK_CONDITION;
 
 done:
@@ -427,7 +350,7 @@
 	.cmd_per_lun =		1,
 	.emulated =             1,		/* only sg driver uses this */
 	.max_sectors =		PS3ROM_MAX_SECTORS,
-	.use_clustering =	DISABLE_CLUSTERING,
+	.use_clustering =	ENABLE_CLUSTERING,
 	.module =		THIS_MODULE,
 };
 
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 68c0d09..09ab3ea 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -333,7 +333,6 @@
 
 #include <linux/module.h>
 
-#include <linux/version.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/errno.h>
@@ -367,10 +366,6 @@
 #include <asm/sn/io.h>
 #endif
 
-#if LINUX_VERSION_CODE < 0x020600
-#error "Kernels older than 2.6.0 are no longer supported"
-#endif
-
 
 /*
  * Compile time Options:
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 8c865b9..6208d56 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -16,7 +16,8 @@
 	22xx              ql2200_fw.bin
 	2300, 2312, 6312  ql2300_fw.bin
 	2322, 6322        ql2322_fw.bin
-	24xx              ql2400_fw.bin
+	24xx, 54xx        ql2400_fw.bin
+	25xx              ql2500_fw.bin
 
 	Upon request, the driver caches the firmware image until
 	the driver is unloaded.
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4894dc8..413d8cd 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -849,20 +849,20 @@
 qla2x00_get_host_speed(struct Scsi_Host *shost)
 {
 	scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
-	uint32_t speed = 0;
+	u32 speed = FC_PORTSPEED_UNKNOWN;
 
 	switch (ha->link_data_rate) {
 	case PORT_SPEED_1GB:
-		speed = 1;
+		speed = FC_PORTSPEED_1GBIT;
 		break;
 	case PORT_SPEED_2GB:
-		speed = 2;
+		speed = FC_PORTSPEED_2GBIT;
 		break;
 	case PORT_SPEED_4GB:
-		speed = 4;
+		speed = FC_PORTSPEED_4GBIT;
 		break;
 	case PORT_SPEED_8GB:
-		speed = 8;
+		speed = FC_PORTSPEED_8GBIT;
 		break;
 	}
 	fc_host_speed(shost) = speed;
@@ -900,7 +900,8 @@
 	u64 node_name = 0;
 
 	list_for_each_entry(fcport, &ha->fcports, list) {
-		if (starget->id == fcport->os_target_id) {
+		if (fcport->rport &&
+		    starget->id == fcport->rport->scsi_target_id) {
 			node_name = wwn_to_u64(fcport->node_name);
 			break;
 		}
@@ -918,7 +919,8 @@
 	u64 port_name = 0;
 
 	list_for_each_entry(fcport, &ha->fcports, list) {
-		if (starget->id == fcport->os_target_id) {
+		if (fcport->rport &&
+		    starget->id == fcport->rport->scsi_target_id) {
 			port_name = wwn_to_u64(fcport->port_name);
 			break;
 		}
@@ -936,7 +938,8 @@
 	uint32_t port_id = ~0U;
 
 	list_for_each_entry(fcport, &ha->fcports, list) {
-		if (starget->id == fcport->os_target_id) {
+		if (fcport->rport &&
+		    starget->id == fcport->rport->scsi_target_id) {
 			port_id = fcport->d_id.b.domain << 16 |
 			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
 			break;
@@ -1196,6 +1199,7 @@
 	.show_host_node_name = 1,
 	.show_host_port_name = 1,
 	.show_host_supported_classes = 1,
+	.show_host_supported_speeds = 1,
 
 	.get_host_port_id = qla2x00_get_host_port_id,
 	.show_host_port_id = 1,
@@ -1276,9 +1280,23 @@
 void
 qla2x00_init_host_attr(scsi_qla_host_t *ha)
 {
+	u32 speed = FC_PORTSPEED_UNKNOWN;
+
 	fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name);
 	fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name);
 	fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
 	fc_host_max_npiv_vports(ha->host) = ha->max_npiv_vports;;
 	fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count;
+
+	if (IS_QLA25XX(ha))
+		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
+		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+	else if (IS_QLA24XX_TYPE(ha))
+		speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
+		    FC_PORTSPEED_1GBIT;
+	else if (IS_QLA23XX(ha))
+		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+	else
+		speed = FC_PORTSPEED_1GBIT;
+	fc_host_supported_speeds(ha->host) = speed;
 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d88e98c..9d12d9f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1410,125 +1410,3 @@
 	if (cnt % 16)
 		printk("\n");
 }
-
-/**************************************************************************
- *   qla2x00_print_scsi_cmd
- *	 Dumps out info about the scsi cmd and srb.
- *   Input
- *	 cmd : struct scsi_cmnd
- **************************************************************************/
-void
-qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
-{
-	int i;
-	struct scsi_qla_host *ha;
-	srb_t *sp;
-
-	ha = shost_priv(cmd->device->host);
-
-	sp = (srb_t *) cmd->SCp.ptr;
-	printk("SCSI Command @=0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
-	printk("  chan=0x%02x, target=0x%02x, lun=0x%02x, cmd_len=0x%02x\n",
-	    cmd->device->channel, cmd->device->id, cmd->device->lun,
-	    cmd->cmd_len);
-	printk(" CDB: ");
-	for (i = 0; i < cmd->cmd_len; i++) {
-		printk("0x%02x ", cmd->cmnd[i]);
-	}
-	printk("\n  seg_cnt=%d, allowed=%d, retries=%d\n",
-	       scsi_sg_count(cmd), cmd->allowed, cmd->retries);
-	printk("  request buffer=0x%p, request buffer len=0x%x\n",
-	       scsi_sglist(cmd), scsi_bufflen(cmd));
-	printk("  tag=%d, transfersize=0x%x\n",
-	    cmd->tag, cmd->transfersize);
-	printk("  serial_number=%lx, SP=%p\n", cmd->serial_number, sp);
-	printk("  data direction=%d\n", cmd->sc_data_direction);
-
-	if (!sp)
-		return;
-
-	printk("  sp flags=0x%x\n", sp->flags);
-}
-
-#if defined(QL_DEBUG_ROUTINES)
-/*
- * qla2x00_formatted_dump_buffer
- *       Prints string plus buffer.
- *
- * Input:
- *       string  = Null terminated string (no newline at end).
- *       buffer  = buffer address.
- *       wd_size = word size 8, 16, 32 or 64 bits
- *       count   = number of words.
- */
-void
-qla2x00_formatted_dump_buffer(char *string, uint8_t * buffer,
-				uint8_t wd_size, uint32_t count)
-{
-	uint32_t cnt;
-	uint16_t *buf16;
-	uint32_t *buf32;
-
-	if (strcmp(string, "") != 0)
-		printk("%s\n",string);
-
-	switch (wd_size) {
-		case 8:
-			printk(" 0    1    2    3    4    5    6    7    "
-				"8    9    Ah   Bh   Ch   Dh   Eh   Fh\n");
-			printk("-----------------------------------------"
-				"-------------------------------------\n");
-
-			for (cnt = 1; cnt <= count; cnt++, buffer++) {
-				printk("%02x",*buffer);
-				if (cnt % 16 == 0)
-					printk("\n");
-				else
-					printk("  ");
-			}
-			if (cnt % 16 != 0)
-				printk("\n");
-			break;
-		case 16:
-			printk("   0      2      4      6      8      Ah "
-				"	Ch     Eh\n");
-			printk("-----------------------------------------"
-				"-------------\n");
-
-			buf16 = (uint16_t *) buffer;
-			for (cnt = 1; cnt <= count; cnt++, buf16++) {
-				printk("%4x",*buf16);
-
-				if (cnt % 8 == 0)
-					printk("\n");
-				else if (*buf16 < 10)
-					printk("   ");
-				else
-					printk("  ");
-			}
-			if (cnt % 8 != 0)
-				printk("\n");
-			break;
-		case 32:
-			printk("       0          4          8          Ch\n");
-			printk("------------------------------------------\n");
-
-			buf32 = (uint32_t *) buffer;
-			for (cnt = 1; cnt <= count; cnt++, buf32++) {
-				printk("%8x", *buf32);
-
-				if (cnt % 4 == 0)
-					printk("\n");
-				else if (*buf32 < 10)
-					printk("   ");
-				else
-					printk("  ");
-			}
-			if (cnt % 4 != 0)
-				printk("\n");
-			break;
-		default:
-			break;
-	}
-}
-#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 524598a..2e9c0c0 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -22,19 +22,7 @@
 /* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
 /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
 /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
-/*
- *  Local Macro Definitions.
- */
-#if defined(QL_DEBUG_LEVEL_1)  || defined(QL_DEBUG_LEVEL_2) || \
-    defined(QL_DEBUG_LEVEL_3)  || defined(QL_DEBUG_LEVEL_4) || \
-    defined(QL_DEBUG_LEVEL_5)  || defined(QL_DEBUG_LEVEL_6) || \
-    defined(QL_DEBUG_LEVEL_7)  || defined(QL_DEBUG_LEVEL_8) || \
-    defined(QL_DEBUG_LEVEL_9)  || defined(QL_DEBUG_LEVEL_10) || \
-    defined(QL_DEBUG_LEVEL_11) || defined(QL_DEBUG_LEVEL_12) || \
-    defined(QL_DEBUG_LEVEL_13) || defined(QL_DEBUG_LEVEL_14) || \
-    defined(QL_DEBUG_LEVEL_15)
-    #define QL_DEBUG_ROUTINES
-#endif
+/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
 
 /*
 * Macros use for debugging the driver.
@@ -54,6 +42,7 @@
 #define DEBUG2_9_10(x)	do { if (ql2xextended_error_logging) { x; } } while (0)
 #define DEBUG2_11(x)	do { if (ql2xextended_error_logging) { x; } } while (0)
 #define DEBUG2_13(x)	do { if (ql2xextended_error_logging) { x; } } while (0)
+#define DEBUG2_16(x)	do { if (ql2xextended_error_logging) { x; } } while (0)
 
 #if defined(QL_DEBUG_LEVEL_3)
 #define DEBUG3(x)	do {x;} while (0)
@@ -133,6 +122,12 @@
 #define DEBUG15(x)	do {} while (0)
 #endif
 
+#if defined(QL_DEBUG_LEVEL_16)
+#define DEBUG16(x)	do {x;} while (0)
+#else
+#define DEBUG16(x)	do {} while (0)
+#endif
+
 /*
  * Firmware Dump structure definition
  */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3750319..094d95f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -24,6 +24,7 @@
 #include <linux/workqueue.h>
 #include <linux/firmware.h>
 #include <linux/aer.h>
+#include <linux/mutex.h>
 #include <asm/semaphore.h>
 
 #include <scsi/scsi.h>
@@ -192,9 +193,6 @@
 
 	uint16_t flags;
 
-	/* Single transfer DMA context */
-	dma_addr_t dma_handle;
-
 	uint32_t request_sense_length;
 	uint8_t *request_sense_ptr;
 } srb_t;
@@ -1542,8 +1540,6 @@
 	atomic_t state;
 	uint32_t flags;
 
-	unsigned int os_target_id;
-
 	int port_login_retry_count;
 	int login_retry;
 	atomic_t port_down_timer;
@@ -1613,6 +1609,7 @@
 #define CT_ACCEPT_RESPONSE	0x8002
 #define CT_REASON_INVALID_COMMAND_CODE	0x01
 #define CT_REASON_CANNOT_PERFORM	0x09
+#define CT_REASON_COMMAND_UNSUPPORTED	0x0b
 #define CT_EXPL_ALREADY_REGISTERED	0x10
 
 #define NS_N_PORT_TYPE	0x01
@@ -2063,7 +2060,8 @@
 	void (*disable_intrs) (struct scsi_qla_host *);
 
 	int (*abort_command) (struct scsi_qla_host *, srb_t *);
-	int (*abort_target) (struct fc_port *);
+	int (*target_reset) (struct fc_port *, unsigned int);
+	int (*lun_reset) (struct fc_port *, unsigned int);
 	int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
 		uint8_t, uint8_t, uint16_t *, uint8_t);
 	int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2117,6 +2115,46 @@
 
 #define	WATCH_INTERVAL		1       /* number of seconds */
 
+/* Work events.  */
+enum qla_work_type {
+	QLA_EVT_AEN,
+	QLA_EVT_HWE_LOG,
+};
+
+
+struct qla_work_evt {
+	struct list_head	list;
+	enum qla_work_type	type;
+	u32			flags;
+#define QLA_EVT_FLAG_FREE	0x1
+
+	union {
+		struct {
+			enum fc_host_event_code code;
+			u32 data;
+		} aen;
+		struct {
+			uint16_t code;
+			uint16_t d1, d2, d3;
+		} hwe;
+	} u;
+};
+
+struct qla_chip_state_84xx {
+	struct list_head list;
+	struct kref kref;
+
+	void *bus;
+	spinlock_t access_lock;
+	struct mutex fw_update_mutex;
+	uint32_t fw_update;
+	uint32_t op_fw_version;
+	uint32_t op_fw_size;
+	uint32_t op_fw_seq_size;
+	uint32_t diag_fw_version;
+	uint32_t gold_fw_version;
+};
+
 /*
  * Linux Host Adapter structure
  */
@@ -2155,6 +2193,7 @@
 		uint32_t        vsan_enabled            :1;
 		uint32_t	npiv_supported		:1;
 		uint32_t	fce_enabled		:1;
+		uint32_t	hw_event_marker_found	:1;
 	} flags;
 
 	atomic_t	loop_state;
@@ -2204,6 +2243,7 @@
 #define	DFLG_NO_CABLE			BIT_4
 
 #define PCI_DEVICE_ID_QLOGIC_ISP2532	0x2532
+#define PCI_DEVICE_ID_QLOGIC_ISP8432	0x8432
 	uint32_t	device_type;
 #define DT_ISP2100			BIT_0
 #define DT_ISP2200			BIT_1
@@ -2217,7 +2257,8 @@
 #define DT_ISP5422			BIT_9
 #define DT_ISP5432			BIT_10
 #define DT_ISP2532			BIT_11
-#define DT_ISP_LAST			(DT_ISP2532 << 1)
+#define DT_ISP8432			BIT_12
+#define DT_ISP_LAST			(DT_ISP8432 << 1)
 
 #define DT_IIDMA			BIT_26
 #define DT_FWI2				BIT_27
@@ -2239,12 +2280,16 @@
 #define IS_QLA5422(ha)	(DT_MASK(ha) & DT_ISP5422)
 #define IS_QLA5432(ha)	(DT_MASK(ha) & DT_ISP5432)
 #define IS_QLA2532(ha)	(DT_MASK(ha) & DT_ISP2532)
+#define IS_QLA8432(ha)	(DT_MASK(ha) & DT_ISP8432)
 
 #define IS_QLA23XX(ha)	(IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
     			 IS_QLA6312(ha) || IS_QLA6322(ha))
 #define IS_QLA24XX(ha)	(IS_QLA2422(ha) || IS_QLA2432(ha))
 #define IS_QLA54XX(ha)	(IS_QLA5422(ha) || IS_QLA5432(ha))
 #define IS_QLA25XX(ha)	(IS_QLA2532(ha))
+#define IS_QLA84XX(ha)	(IS_QLA8432(ha))
+#define IS_QLA24XX_TYPE(ha)	(IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
+			IS_QLA84XX(ha))
 
 #define IS_IIDMA_CAPABLE(ha)	((ha)->device_type & DT_IIDMA)
 #define IS_FWI2_CAPABLE(ha)	((ha)->device_type & DT_FWI2)
@@ -2356,6 +2401,8 @@
         uint32_t	login_retry_count;
 	int		max_q_depth;
 
+	struct list_head	work_list;
+
 	/* Fibre Channel Device List. */
 	struct list_head	fcports;
 
@@ -2423,8 +2470,6 @@
 #define  MBX_TIMEDOUT		BIT_5
 #define  MBX_ACCESS_TIMEDOUT	BIT_6
 
-	mbx_cmd_t 	mc;
-
 	/* Basic firmware related information. */
 	uint16_t	fw_major_version;
 	uint16_t	fw_minor_version;
@@ -2458,6 +2503,10 @@
 	uint64_t	fce_wr, fce_rd;
 	struct mutex	fce_mutex;
 
+	uint32_t	hw_event_start;
+	uint32_t	hw_event_ptr;
+	uint32_t	hw_event_pause_errors;
+
 	uint8_t		host_str[16];
 	uint32_t	pci_attr;
 	uint16_t	chip_revision;
@@ -2493,6 +2542,13 @@
 	uint8_t		fcode_revision[16];
 	uint32_t	fw_revision[4];
 
+	uint16_t	fdt_odd_index;
+	uint32_t	fdt_wrt_disable;
+	uint32_t	fdt_erase_cmd;
+	uint32_t	fdt_block_size;
+	uint32_t	fdt_unprotect_sec_cmd;
+	uint32_t	fdt_protect_sec_cmd;
+
 	/* Needed for BEACON */
 	uint16_t	beacon_blink_led;
 	uint8_t		beacon_color_state;
@@ -2538,6 +2594,8 @@
 #define VP_ERR_ADAP_NORESOURCES	5
 	uint16_t	max_npiv_vports;	/* 63 or 125 per topoloty */
 	int		cur_vport_count;
+
+	struct qla_chip_state_84xx *cs84xx;
 } scsi_qla_host_t;
 
 
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 2cd899b..561a441 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 9337e13..078f2a1 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -719,7 +719,7 @@
 
 	uint16_t timeout;		/* Command timeout. */
 
-	uint8_t lun[8];			/* FCP LUN (BE). */
+	struct scsi_lun lun;		/* FCP LUN (BE). */
 
 	uint32_t control_flags;		/* Control Flags. */
 #define TCF_NOTMCMD_TO_TARGET	BIT_31
@@ -793,7 +793,19 @@
 #define FA_VPD_NVRAM_ADDR	0x48000
 #define FA_FEATURE_ADDR		0x4C000
 #define FA_FLASH_DESCR_ADDR	0x50000
-#define FA_HW_EVENT_ADDR	0x54000
+#define FA_HW_EVENT0_ADDR	0x54000
+#define FA_HW_EVENT1_ADDR	0x54200
+#define FA_HW_EVENT_SIZE	0x200
+#define FA_HW_EVENT_ENTRY_SIZE	4
+/*
+ * Flash Error Log Event Codes.
+ */
+#define HW_EVENT_RESET_ERR	0xF00B
+#define HW_EVENT_ISP_ERR	0xF020
+#define HW_EVENT_PARITY_ERR	0xF022
+#define HW_EVENT_NVRAM_CHKSUM_ERR	0xF023
+#define HW_EVENT_FLASH_FW_ERR	0xF024
+
 #define FA_BOOT_LOG_ADDR	0x58000
 #define FA_FW_DUMP0_ADDR	0x60000
 #define FA_FW_DUMP1_ADDR	0x70000
@@ -1174,4 +1186,159 @@
 };
 
 /* END MID Support ***********************************************************/
+
+/* Flash Description Table ***************************************************/
+
+struct qla_fdt_layout {
+	uint8_t sig[4];
+	uint16_t version;
+	uint16_t len;
+	uint16_t checksum;
+	uint8_t unused1[2];
+	uint8_t model[16];
+	uint16_t man_id;
+	uint16_t id;
+	uint8_t flags;
+	uint8_t erase_cmd;
+	uint8_t alt_erase_cmd;
+	uint8_t wrt_enable_cmd;
+	uint8_t wrt_enable_bits;
+	uint8_t wrt_sts_reg_cmd;
+	uint8_t unprotect_sec_cmd;
+	uint8_t read_man_id_cmd;
+	uint32_t block_size;
+	uint32_t alt_block_size;
+	uint32_t flash_size;
+	uint32_t wrt_enable_data;
+	uint8_t read_id_addr_len;
+	uint8_t wrt_disable_bits;
+	uint8_t read_dev_id_len;
+	uint8_t chip_erase_cmd;
+	uint16_t read_timeout;
+	uint8_t protect_sec_cmd;
+	uint8_t unused2[65];
+};
+
+/* 84XX Support **************************************************************/
+
+#define MBA_ISP84XX_ALERT	0x800f  /* Alert Notification. */
+#define A84_PANIC_RECOVERY	0x1
+#define A84_OP_LOGIN_COMPLETE	0x2
+#define A84_DIAG_LOGIN_COMPLETE	0x3
+#define A84_GOLD_LOGIN_COMPLETE	0x4
+
+#define MBC_ISP84XX_RESET	0x3a    /* Reset. */
+
+#define FSTATE_REMOTE_FC_DOWN	BIT_0
+#define FSTATE_NSL_LINK_DOWN	BIT_1
+#define FSTATE_IS_DIAG_FW	BIT_2
+#define FSTATE_LOGGED_IN	BIT_3
+#define FSTATE_WAITING_FOR_VERIFY	BIT_4
+
+#define VERIFY_CHIP_IOCB_TYPE	0x1B
+struct verify_chip_entry_84xx {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_defined;
+	uint8_t entry_status;
+
+	uint32_t handle;
+
+	uint16_t options;
+#define VCO_DONT_UPDATE_FW	BIT_0
+#define VCO_FORCE_UPDATE	BIT_1
+#define VCO_DONT_RESET_UPDATE	BIT_2
+#define VCO_DIAG_FW		BIT_3
+#define VCO_END_OF_DATA		BIT_14
+#define VCO_ENABLE_DSD		BIT_15
+
+	uint16_t reserved_1;
+
+	uint16_t data_seg_cnt;
+	uint16_t reserved_2[3];
+
+	uint32_t fw_ver;
+	uint32_t exchange_address;
+
+	uint32_t reserved_3[3];
+	uint32_t fw_size;
+	uint32_t fw_seq_size;
+	uint32_t relative_offset;
+
+	uint32_t dseg_address[2];
+	uint32_t dseg_length;
+};
+
+struct verify_chip_rsp_84xx {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_defined;
+	uint8_t entry_status;
+
+	uint32_t handle;
+
+	uint16_t comp_status;
+#define CS_VCS_CHIP_FAILURE	0x3
+#define CS_VCS_BAD_EXCHANGE	0x8
+#define CS_VCS_SEQ_COMPLETEi	0x40
+
+	uint16_t failure_code;
+#define VFC_CHECKSUM_ERROR	0x1
+#define VFC_INVALID_LEN		0x2
+#define VFC_ALREADY_IN_PROGRESS	0x8
+
+	uint16_t reserved_1[4];
+
+	uint32_t fw_ver;
+	uint32_t exchange_address;
+
+	uint32_t reserved_2[6];
+};
+
+#define ACCESS_CHIP_IOCB_TYPE	0x2B
+struct access_chip_84xx {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_defined;
+	uint8_t entry_status;
+
+	uint32_t handle;
+
+	uint16_t options;
+#define ACO_DUMP_MEMORY		0x0
+#define ACO_LOAD_MEMORY		0x1
+#define ACO_CHANGE_CONFIG_PARAM	0x2
+#define ACO_REQUEST_INFO	0x3
+
+	uint16_t reserved1;
+
+	uint16_t dseg_count;
+	uint16_t reserved2[3];
+
+	uint32_t parameter1;
+	uint32_t parameter2;
+	uint32_t parameter3;
+
+	uint32_t reserved3[3];
+	uint32_t total_byte_cnt;
+	uint32_t reserved4;
+
+	uint32_t dseg_address[2];
+	uint32_t dseg_length;
+};
+
+struct access_chip_rsp_84xx {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_defined;
+	uint8_t entry_status;
+
+	uint32_t handle;
+
+	uint16_t comp_status;
+	uint16_t failure_code;
+	uint32_t residual_count;
+
+	uint32_t reserved[12];
+};
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 193f688..a9571c2 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -38,9 +38,6 @@
 extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
 extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
 
-extern void qla2x00_restart_queues(scsi_qla_host_t *, uint8_t);
-
-extern void qla2x00_rescan_fcports(scsi_qla_host_t *);
 extern void qla2x00_update_fcports(scsi_qla_host_t *);
 
 extern int qla2x00_abort_isp(scsi_qla_host_t *);
@@ -50,6 +47,8 @@
 extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
 extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
 
+extern void qla84xx_put_chip(struct scsi_qla_host *);
+
 /*
  * Global Data in qla_os.c source file.
  */
@@ -67,6 +66,10 @@
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
+extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
+    fc_host_event_code, u32);
+extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
+    uint16_t, uint16_t);
 
 /*
  * Global Functions in qla_mid.c source file.
@@ -149,12 +152,17 @@
 qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
 
 extern int
+qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t,
+    uint32_t);
+
+extern int
 qla2x00_abort_command(scsi_qla_host_t *, srb_t *);
 
-#if USE_ABORT_TGT
 extern int
-qla2x00_abort_target(fc_port_t *);
-#endif
+qla2x00_abort_target(struct fc_port *, unsigned int);
+
+extern int
+qla2x00_lun_reset(struct fc_port *, unsigned int);
 
 extern int
 qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -220,7 +228,8 @@
     dma_addr_t);
 
 extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *);
-extern int qla24xx_abort_target(fc_port_t *);
+extern int qla24xx_abort_target(struct fc_port *, unsigned int);
+extern int qla24xx_lun_reset(struct fc_port *, unsigned int);
 
 extern int
 qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
@@ -246,6 +255,8 @@
 extern int
 qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *);
 
+extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -298,6 +309,11 @@
 extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
 extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
 
+extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
+    uint16_t, uint16_t);
+
+extern void qla2xxx_get_flash_info(scsi_qla_host_t *);
+
 /*
  * Global Function Prototypes in qla_dbg.c source file.
  */
@@ -307,7 +323,6 @@
 extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
 extern void qla2x00_dump_regs(scsi_qla_host_t *);
 extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
-extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *);
 
 /*
  * Global Function Prototypes in qla_gs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index c180876..750d7ef 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,17 +1,11 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
 
-static inline struct ct_sns_req *
-qla2x00_prep_ct_req(struct ct_sns_req *, uint16_t, uint16_t);
-
-static inline struct sns_cmd_pkt *
-qla2x00_prep_sns_cmd(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
-
 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
@@ -1538,7 +1532,7 @@
 		eiter->a.sup_speed = __constant_cpu_to_be32(
 		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
 		    FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
-	else if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+	else if (IS_QLA24XX_TYPE(ha))
 		eiter->a.sup_speed = __constant_cpu_to_be32(
 		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
 		    FDMI_PORT_SPEED_4GB);
@@ -1847,8 +1841,10 @@
 		    "GPSC")) != QLA_SUCCESS) {
 			/* FM command unsupported? */
 			if (rval == QLA_INVALID_COMMAND &&
-			    ct_rsp->header.reason_code ==
-			    CT_REASON_INVALID_COMMAND_CODE) {
+			    (ct_rsp->header.reason_code ==
+				CT_REASON_INVALID_COMMAND_CODE ||
+			     ct_rsp->header.reason_code ==
+				CT_REASON_COMMAND_UNSUPPORTED)) {
 				DEBUG2(printk("scsi(%ld): GPSC command "
 				    "unsupported, disabling query...\n",
 				    ha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 364be7d..01e2608 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -15,14 +15,6 @@
 #include <asm/prom.h>
 #endif
 
-/* XXX(hch): this is ugly, but we don't want to pull in exioctl.h */
-#ifndef EXT_IS_LUN_BIT_SET
-#define EXT_IS_LUN_BIT_SET(P,L) \
-    (((P)->mask[L/8] & (0x80 >> (L%8)))?1:0)
-#define EXT_SET_LUN_BIT(P,L) \
-    ((P)->mask[L/8] |= (0x80 >> (L%8)))
-#endif
-
 /*
 *  QLogic ISP2x00 Hardware Support Function Prototypes.
 */
@@ -45,6 +37,9 @@
 
 static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev);
 
+static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
+static int qla84xx_init_chip(scsi_qla_host_t *);
+
 /****************************************************************************/
 /*                QLogic ISP2x00 Hardware Support Functions.                */
 /****************************************************************************/
@@ -114,6 +109,15 @@
 		rval = qla2x00_setup_chip(ha);
 		if (rval)
 			return (rval);
+		qla2xxx_get_flash_info(ha);
+	}
+	if (IS_QLA84XX(ha)) {
+		ha->cs84xx = qla84xx_get_chip(ha);
+		if (!ha->cs84xx) {
+			qla_printk(KERN_ERR, ha,
+			    "Unable to configure ISP84XX.\n");
+			return QLA_FUNCTION_FAILED;
+		}
 	}
 	rval = qla2x00_init_rings(ha);
 
@@ -500,6 +504,7 @@
 static inline void
 qla24xx_reset_risc(scsi_qla_host_t *ha)
 {
+	int hw_evt = 0;
 	unsigned long flags = 0;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	uint32_t cnt, d2;
@@ -528,6 +533,8 @@
 		d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
 		barrier();
 	}
+	if (cnt == 0)
+		hw_evt = 1;
 
 	/* Wait for soft-reset to complete. */
 	d2 = RD_REG_DWORD(&reg->ctrl_status);
@@ -536,6 +543,10 @@
 		d2 = RD_REG_DWORD(&reg->ctrl_status);
 		barrier();
 	}
+	if (cnt == 0 || hw_evt)
+		qla2xxx_hw_event_log(ha, HW_EVENT_RESET_ERR,
+		    RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
+		    RD_REG_WORD(&reg->mailbox3));
 
 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
 	RD_REG_DWORD(&reg->hccr);
@@ -1243,10 +1254,10 @@
 qla2x00_fw_ready(scsi_qla_host_t *ha)
 {
 	int		rval;
-	unsigned long	wtime, mtime;
+	unsigned long	wtime, mtime, cs84xx_time;
 	uint16_t	min_wait;	/* Minimum wait time if loop is down */
 	uint16_t	wait_time;	/* Wait time if loop is coming ready */
-	uint16_t	fw_state;
+	uint16_t	state[3];
 
 	rval = QLA_SUCCESS;
 
@@ -1275,12 +1286,34 @@
 	    ha->host_no));
 
 	do {
-		rval = qla2x00_get_firmware_state(ha, &fw_state);
+		rval = qla2x00_get_firmware_state(ha, state);
 		if (rval == QLA_SUCCESS) {
-			if (fw_state < FSTATE_LOSS_OF_SYNC) {
+			if (state[0] < FSTATE_LOSS_OF_SYNC) {
 				ha->device_flags &= ~DFLG_NO_CABLE;
 			}
-			if (fw_state == FSTATE_READY) {
+			if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
+				DEBUG16(printk("scsi(%ld): fw_state=%x "
+				    "84xx=%x.\n", ha->host_no, state[0],
+				    state[2]));
+				if ((state[2] & FSTATE_LOGGED_IN) &&
+				     (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
+					DEBUG16(printk("scsi(%ld): Sending "
+					    "verify iocb.\n", ha->host_no));
+
+					cs84xx_time = jiffies;
+					rval = qla84xx_init_chip(ha);
+					if (rval != QLA_SUCCESS)
+						break;
+
+					/* Add time taken to initialize. */
+					cs84xx_time = jiffies - cs84xx_time;
+					wtime += cs84xx_time;
+					mtime += cs84xx_time;
+					DEBUG16(printk("scsi(%ld): Increasing "
+					    "wait time by %ld. New time %ld\n",
+					    ha->host_no, cs84xx_time, wtime));
+				}
+			} else if (state[0] == FSTATE_READY) {
 				DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
 				    ha->host_no));
 
@@ -1294,7 +1327,7 @@
 			rval = QLA_FUNCTION_FAILED;
 
 			if (atomic_read(&ha->loop_down_timer) &&
-			    fw_state != FSTATE_READY) {
+			    state[0] != FSTATE_READY) {
 				/* Loop down. Timeout on min_wait for states
 				 * other than Wait for Login.
 				 */
@@ -1319,11 +1352,11 @@
 		msleep(500);
 
 		DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
-		    ha->host_no, fw_state, jiffies));
+		    ha->host_no, state[0], jiffies));
 	} while (1);
 
 	DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
-	    ha->host_no, fw_state, jiffies));
+	    ha->host_no, state[0], jiffies));
 
 	if (rval) {
 		DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
@@ -1555,6 +1588,10 @@
 		qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
 		    "invalid -- WWPN) defaults.\n");
 
+		if (chksum)
+			qla2xxx_hw_event_log(ha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
+			    MSW(chksum), LSW(chksum));
+
 		/*
 		 * Set default initialization control block.
 		 */
@@ -2165,20 +2202,6 @@
 }
 
 static void
-qla2x00_probe_for_all_luns(scsi_qla_host_t *ha)
-{
-	fc_port_t	*fcport;
-
-	qla2x00_mark_all_devices_lost(ha, 0);
- 	list_for_each_entry(fcport, &ha->fcports, list) {
-		if (fcport->port_type != FCT_TARGET)
-			continue;
-
-		qla2x00_update_fcport(ha, fcport);
-	}
-}
-
-static void
 qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
 {
 #define LS_UNKNOWN      2
@@ -2251,10 +2274,6 @@
 	if (fcport->port_type == FCT_TARGET)
 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
 	fc_remote_port_rolechg(rport, rport_ids.roles);
-
-	if (rport->scsi_target_id != -1 &&
-	    rport->scsi_target_id < ha->host->max_id)
-		fcport->os_target_id = rport->scsi_target_id;
 }
 
 /*
@@ -2434,7 +2453,8 @@
 
 			if (fcport->loop_id == FC_NO_LOOP_ID) {
 				fcport->loop_id = next_loopid;
-				rval = qla2x00_find_new_loop_id(ha, fcport);
+				rval = qla2x00_find_new_loop_id(
+				    to_qla_parent(ha), fcport);
 				if (rval != QLA_SUCCESS) {
 					/* Ran out of IDs to use */
 					break;
@@ -2459,7 +2479,8 @@
 
 			/* Find a new loop ID to use. */
 			fcport->loop_id = next_loopid;
-			rval = qla2x00_find_new_loop_id(ha, fcport);
+			rval = qla2x00_find_new_loop_id(to_qla_parent(ha),
+			    fcport);
 			if (rval != QLA_SUCCESS) {
 				/* Ran out of IDs to use */
 				break;
@@ -3193,25 +3214,6 @@
 }
 
 void
-qla2x00_rescan_fcports(scsi_qla_host_t *ha)
-{
-	int rescan_done;
-	fc_port_t *fcport;
-
-	rescan_done = 0;
-	list_for_each_entry(fcport, &ha->fcports, list) {
-		if ((fcport->flags & FCF_RESCAN_NEEDED) == 0)
-			continue;
-
-		qla2x00_update_fcport(ha, fcport);
-		fcport->flags &= ~FCF_RESCAN_NEEDED;
-
-		rescan_done = 1;
-	}
-	qla2x00_probe_for_all_luns(ha);
-}
-
-void
 qla2x00_update_fcports(scsi_qla_host_t *ha)
 {
 	fc_port_t *fcport;
@@ -4044,16 +4046,16 @@
 	if (!ha->parent)
 		return -EINVAL;
 
-	rval = qla2x00_fw_ready(ha);
+	rval = qla2x00_fw_ready(ha->parent);
 	if (rval == QLA_SUCCESS) {
 		clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
-		qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
+		qla2x00_marker(ha->parent, 0, 0, MK_SYNC_ALL);
 	}
 
 	ha->flags.management_server_logged_in = 0;
 
 	/* Login to SNS first */
-	qla24xx_login_fabric(ha, NPH_SNS, 0xff, 0xff, 0xfc,
+	qla24xx_login_fabric(ha->parent, NPH_SNS, 0xff, 0xff, 0xfc,
 	    mb, BIT_1);
 	if (mb[0] != MBS_COMMAND_COMPLETE) {
 		DEBUG15(qla_printk(KERN_INFO, ha,
@@ -4067,7 +4069,77 @@
 	atomic_set(&ha->loop_state, LOOP_UP);
 	set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
 	set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
-	rval = qla2x00_loop_resync(ha);
+	rval = qla2x00_loop_resync(ha->parent);
 
 	return rval;
 }
+
+/* 84XX Support **************************************************************/
+
+static LIST_HEAD(qla_cs84xx_list);
+static DEFINE_MUTEX(qla_cs84xx_mutex);
+
+static struct qla_chip_state_84xx *
+qla84xx_get_chip(struct scsi_qla_host *ha)
+{
+	struct qla_chip_state_84xx *cs84xx;
+
+	mutex_lock(&qla_cs84xx_mutex);
+
+	/* Find any shared 84xx chip. */
+	list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
+		if (cs84xx->bus == ha->pdev->bus) {
+			kref_get(&cs84xx->kref);
+			goto done;
+		}
+	}
+
+	cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
+	if (!cs84xx)
+		goto done;
+
+	kref_init(&cs84xx->kref);
+	spin_lock_init(&cs84xx->access_lock);
+	mutex_init(&cs84xx->fw_update_mutex);
+	cs84xx->bus = ha->pdev->bus;
+
+	list_add_tail(&cs84xx->list, &qla_cs84xx_list);
+done:
+	mutex_unlock(&qla_cs84xx_mutex);
+	return cs84xx;
+}
+
+static void
+__qla84xx_chip_release(struct kref *kref)
+{
+	struct qla_chip_state_84xx *cs84xx =
+	    container_of(kref, struct qla_chip_state_84xx, kref);
+
+	mutex_lock(&qla_cs84xx_mutex);
+	list_del(&cs84xx->list);
+	mutex_unlock(&qla_cs84xx_mutex);
+	kfree(cs84xx);
+}
+
+void
+qla84xx_put_chip(struct scsi_qla_host *ha)
+{
+	if (ha->cs84xx)
+		kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
+}
+
+static int
+qla84xx_init_chip(scsi_qla_host_t *ha)
+{
+	int rval;
+	uint16_t status[2];
+
+	mutex_lock(&ha->cs84xx->fw_update_mutex);
+
+	rval = qla84xx_verify_chip(ha, status);
+
+	mutex_unlock(&ha->cs84xx->fw_update_mutex);
+
+	return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
+	    QLA_SUCCESS;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 5d1a3f7..e9bae27 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,11 +1,10 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 
-static __inline__ uint16_t qla2x00_debounce_register(volatile uint16_t __iomem *);
 /*
  * qla2x00_debounce_register
  *      Debounce register.
@@ -32,94 +31,12 @@
 	return (first);
 }
 
-static __inline__ int qla2x00_normalize_dma_addr(
-    dma_addr_t *e_addr,  uint32_t *e_len,
-    dma_addr_t *ne_addr, uint32_t *ne_len);
-
-/**
- * qla2x00_normalize_dma_addr() - Normalize an DMA address.
- * @e_addr: Raw DMA address
- * @e_len: Raw DMA length
- * @ne_addr: Normalized second DMA address
- * @ne_len: Normalized second DMA length
- *
- * If the address does not span a 4GB page boundary, the contents of @ne_addr
- * and @ne_len are undefined.  @e_len is updated to reflect a normalization.
- *
- * Example:
- *
- * 	ffffabc0ffffeeee	(e_addr) start of DMA address
- * 	0000000020000000	(e_len)  length of DMA transfer
- *	ffffabc11fffeeed	end of DMA transfer
- *
- * Is the 4GB boundary crossed?
- *
- * 	ffffabc0ffffeeee	(e_addr)
- *	ffffabc11fffeeed	(e_addr + e_len - 1)
- *	00000001e0000003	((e_addr ^ (e_addr + e_len - 1))
- *	0000000100000000	((e_addr ^ (e_addr + e_len - 1)) & ~(0xffffffff)
- *
- * Compute start of second DMA segment:
- *
- * 	ffffabc0ffffeeee	(e_addr)
- *	ffffabc1ffffeeee	(0x100000000 + e_addr)
- *	ffffabc100000000	(0x100000000 + e_addr) & ~(0xffffffff)
- *	ffffabc100000000	(ne_addr)
- *
- * Compute length of second DMA segment:
- *
- *	00000000ffffeeee	(e_addr & 0xffffffff)
- *	0000000000001112	(0x100000000 - (e_addr & 0xffffffff))
- *	000000001fffeeee	(e_len - (0x100000000 - (e_addr & 0xffffffff))
- *	000000001fffeeee	(ne_len)
- *
- * Adjust length of first DMA segment
- *
- * 	0000000020000000	(e_len)
- *	0000000000001112	(e_len - ne_len)
- *	0000000000001112	(e_len)
- *
- * Returns non-zero if the specified address was normalized, else zero.
- */
-static __inline__ int
-qla2x00_normalize_dma_addr(
-    dma_addr_t *e_addr,  uint32_t *e_len,
-    dma_addr_t *ne_addr, uint32_t *ne_len)
-{
-	int normalized;
-
-	normalized = 0;
-	if ((*e_addr ^ (*e_addr + *e_len - 1)) & ~(0xFFFFFFFFULL)) {
-		/* Compute normalized crossed address and len */
-		*ne_addr = (0x100000000ULL + *e_addr) & ~(0xFFFFFFFFULL);
-		*ne_len = *e_len - (0x100000000ULL - (*e_addr & 0xFFFFFFFFULL));
-		*e_len -= *ne_len;
-
-		normalized++;
-	}
-	return (normalized);
-}
-
-static __inline__ void qla2x00_poll(scsi_qla_host_t *);
 static inline void
 qla2x00_poll(scsi_qla_host_t *ha)
 {
 	ha->isp_ops->intr_handler(0, ha);
 }
 
-static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *);
-/*
- * This routine will wait for fabric devices for
- * the reset delay.
- */
-static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *ha)
-{
-	uint16_t	fw_state;
-
-	qla2x00_get_firmware_state(ha, &fw_state);
-}
-
-static __inline__ scsi_qla_host_t * to_qla_parent(scsi_qla_host_t *);
 static __inline__ scsi_qla_host_t *
 to_qla_parent(scsi_qla_host_t *ha)
 {
@@ -152,7 +69,6 @@
 	return (QLA_SUCCESS);
 }
 
-static inline uint8_t *host_to_fcp_swap(uint8_t *, uint32_t);
 static inline uint8_t *
 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
 {
@@ -166,7 +82,6 @@
        return fcp;
 }
 
-static inline int qla2x00_is_reserved_id(scsi_qla_host_t *, uint16_t);
 static inline int
 qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id)
 {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 024c662..5489d50 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -11,9 +11,6 @@
 
 #include <scsi/scsi_tcq.h>
 
-static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
-static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
-static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
 static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
 static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
 
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index f033703..285479b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -14,9 +14,6 @@
 static void qla2x00_status_entry(scsi_qla_host_t *, void *);
 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
-static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *);
-
-static void qla24xx_ms_entry(scsi_qla_host_t *, struct ct_entry_24xx *);
 
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -33,7 +30,6 @@
 	scsi_qla_host_t	*ha;
 	struct device_reg_2xxx __iomem *reg;
 	int		status;
-	unsigned long	flags;
 	unsigned long	iter;
 	uint16_t	hccr;
 	uint16_t	mb[4];
@@ -48,7 +44,7 @@
 	reg = &ha->iobase->isp;
 	status = 0;
 
-	spin_lock_irqsave(&ha->hardware_lock, flags);
+	spin_lock(&ha->hardware_lock);
 	for (iter = 50; iter--; ) {
 		hccr = RD_REG_WORD(&reg->hccr);
 		if (hccr & HCCR_RISC_PAUSE) {
@@ -99,7 +95,7 @@
 			RD_REG_WORD(&reg->hccr);
 		}
 	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock(&ha->hardware_lock);
 
 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -125,7 +121,6 @@
 	scsi_qla_host_t	*ha;
 	struct device_reg_2xxx __iomem *reg;
 	int		status;
-	unsigned long	flags;
 	unsigned long	iter;
 	uint32_t	stat;
 	uint16_t	hccr;
@@ -141,7 +136,7 @@
 	reg = &ha->iobase->isp;
 	status = 0;
 
-	spin_lock_irqsave(&ha->hardware_lock, flags);
+	spin_lock(&ha->hardware_lock);
 	for (iter = 50; iter--; ) {
 		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
 		if (stat & HSR_RISC_PAUSED) {
@@ -211,7 +206,7 @@
 		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
 		RD_REG_WORD_RELAXED(&reg->hccr);
 	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock(&ha->hardware_lock);
 
 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -276,6 +271,9 @@
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	uint32_t	rscn_entry, host_pid;
 	uint8_t		rscn_queue_index;
+	unsigned long	flags;
+	scsi_qla_host_t	*vha;
+	int		i;
 
 	/* Setup to process RIO completion. */
 	handle_cnt = 0;
@@ -351,6 +349,7 @@
 		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
 		    mb[1], mb[2], mb[3]);
 
+		qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
 		ha->isp_ops->fw_dump(ha, 1);
 
 		if (IS_FWI2_CAPABLE(ha)) {
@@ -375,6 +374,7 @@
 		    ha->host_no));
 		qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
 
+		qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
 		break;
 
@@ -383,6 +383,7 @@
 		    ha->host_no));
 		qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
 
+		qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
 		break;
 
@@ -410,6 +411,7 @@
 		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
 
 		ha->flags.management_server_logged_in = 0;
+		qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
 		break;
 
 	case MBA_LOOP_UP:		/* Loop Up Event */
@@ -429,12 +431,14 @@
 		    link_speed);
 
 		ha->flags.management_server_logged_in = 0;
+		qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate);
 		break;
 
 	case MBA_LOOP_DOWN:		/* Loop Down Event */
-		DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n",
-		    ha->host_no, mb[1]));
-		qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]);
+		DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
+		    "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3]));
+		qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
+		    mb[1], mb[2], mb[3]);
 
 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
 			atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -452,6 +456,7 @@
 		ha->link_data_rate = PORT_SPEED_UNKNOWN;
 		if (ql2xfdmienable)
 			set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+		qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
 		break;
 
 	case MBA_LIP_RESET:		/* LIP reset occurred */
@@ -475,6 +480,7 @@
 
 		ha->operating_mode = LOOP;
 		ha->flags.management_server_logged_in = 0;
+		qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]);
 		break;
 
 	case MBA_POINT_TO_POINT:	/* Point-to-Point */
@@ -538,6 +544,18 @@
 		break;
 
 	case MBA_PORT_UPDATE:		/* Port database update */
+		if ((ha->flags.npiv_supported) && (ha->num_vhosts)) {
+			for_each_mapped_vp_idx(ha, i) {
+				list_for_each_entry(vha, &ha->vp_list,
+				    vp_list) {
+					if ((mb[3] & 0xff)
+					    == vha->vp_idx) {
+						ha = vha;
+						break;
+					}
+				}
+			}
+		}
 		/*
 		 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
 		 * event etc. earlier indicating loop is down) then process
@@ -572,12 +590,18 @@
 		break;
 
 	case MBA_RSCN_UPDATE:		/* State Change Registration */
-		/* Check if the Vport has issued a SCR */
-		if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
-			break;
-		/* Only handle SCNs for our Vport index. */
-		if (ha->flags.npiv_supported && ha->vp_idx != mb[3])
-			break;
+		if ((ha->flags.npiv_supported) && (ha->num_vhosts)) {
+			for_each_mapped_vp_idx(ha, i) {
+				list_for_each_entry(vha, &ha->vp_list,
+				    vp_list) {
+					if ((mb[3] & 0xff)
+					    == vha->vp_idx) {
+						ha = vha;
+						break;
+					}
+				}
+			}
+		}
 
 		DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
 		    ha->host_no));
@@ -612,6 +636,7 @@
 
 		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
 		set_bit(RSCN_UPDATE, &ha->dpc_flags);
+		qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry);
 		break;
 
 	/* case MBA_RIO_RESPONSE: */
@@ -637,6 +662,42 @@
 		DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
 		ha->host_no, mb[1], mb[2]));
 		break;
+
+	case MBA_ISP84XX_ALERT:
+		DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
+		    "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3]));
+
+		spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
+		switch (mb[1]) {
+		case A84_PANIC_RECOVERY:
+			qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
+			    "%04x %04x\n", mb[2], mb[3]);
+			break;
+		case A84_OP_LOGIN_COMPLETE:
+			ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
+			DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
+			    "firmware version %x\n", ha->cs84xx->op_fw_version));
+			break;
+		case A84_DIAG_LOGIN_COMPLETE:
+			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
+			DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
+			    "diagnostic firmware version %x\n",
+			    ha->cs84xx->diag_fw_version));
+			break;
+		case A84_GOLD_LOGIN_COMPLETE:
+			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
+			ha->cs84xx->fw_update = 1;
+			DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
+			    "firmware version %x\n",
+			    ha->cs84xx->gold_fw_version));
+			break;
+		default:
+			qla_printk(KERN_ERR, ha,
+			    "Alert 84xx: Invalid Alert %04x %04x %04x\n",
+			    mb[1], mb[2], mb[3]);
+		}
+		spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
+		break;
 	}
 
 	if (!ha->parent && ha->num_vhosts)
@@ -803,9 +864,6 @@
 		case STATUS_CONT_TYPE:
 			qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
 			break;
-		case MS_IOCB_TYPE:
-			qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt);
-			break;
 		default:
 			/* Type Not Supported. */
 			DEBUG4(printk(KERN_WARNING
@@ -1340,44 +1398,6 @@
 }
 
 /**
- * qla2x00_ms_entry() - Process a Management Server entry.
- * @ha: SCSI driver HA context
- * @index: Response queue out pointer
- */
-static void
-qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt)
-{
-	srb_t          *sp;
-
-	DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
-	    __func__, ha->host_no, pkt, pkt->handle1));
-
-	/* Validate handle. */
- 	if (pkt->handle1 < MAX_OUTSTANDING_COMMANDS)
- 		sp = ha->outstanding_cmds[pkt->handle1];
-	else
-		sp = NULL;
-
-	if (sp == NULL) {
-		DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
-		    ha->host_no));
-		qla_printk(KERN_WARNING, ha, "MS entry - invalid handle\n");
-
-		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-		return;
-	}
-
-	CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->status);
-	CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
-
-	/* Free outstanding command slot. */
-	ha->outstanding_cmds[pkt->handle1] = NULL;
-
-	qla2x00_sp_compl(ha, sp);
-}
-
-
-/**
  * qla24xx_mbx_completion() - Process mailbox command completions.
  * @ha: SCSI driver HA context
  * @mb0: Mailbox0 register
@@ -1449,9 +1469,6 @@
 		case STATUS_CONT_TYPE:
 			qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
 			break;
-		case MS_IOCB_TYPE:
-			qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
-			break;
 		case VP_RPT_ID_IOCB_TYPE:
 			qla24xx_report_id_acquisition(ha,
 			    (struct vp_rpt_id_entry_24xx *)pkt);
@@ -1533,7 +1550,6 @@
 	scsi_qla_host_t	*ha;
 	struct device_reg_24xx __iomem *reg;
 	int		status;
-	unsigned long	flags;
 	unsigned long	iter;
 	uint32_t	stat;
 	uint32_t	hccr;
@@ -1549,13 +1565,19 @@
 	reg = &ha->iobase->isp24;
 	status = 0;
 
-	spin_lock_irqsave(&ha->hardware_lock, flags);
+	spin_lock(&ha->hardware_lock);
 	for (iter = 50; iter--; ) {
 		stat = RD_REG_DWORD(&reg->host_status);
 		if (stat & HSRX_RISC_PAUSED) {
 			if (pci_channel_offline(ha->pdev))
 				break;
 
+			if (ha->hw_event_pause_errors == 0)
+				qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
+				    0, MSW(stat), LSW(stat));
+			else if (ha->hw_event_pause_errors < 0xffffffff)
+				ha->hw_event_pause_errors++;
+
 			hccr = RD_REG_DWORD(&reg->hccr);
 
 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
@@ -1597,7 +1619,7 @@
 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 		RD_REG_DWORD_RELAXED(&reg->hccr);
 	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock(&ha->hardware_lock);
 
 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -1608,66 +1630,21 @@
 	return IRQ_HANDLED;
 }
 
-/**
- * qla24xx_ms_entry() - Process a Management Server entry.
- * @ha: SCSI driver HA context
- * @index: Response queue out pointer
- */
-static void
-qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
-{
-	srb_t          *sp;
-
-	DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
-	    __func__, ha->host_no, pkt, pkt->handle));
-
-	DEBUG9(printk("%s: ct pkt dump:\n", __func__));
-	DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));
-
-	/* Validate handle. */
- 	if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
- 		sp = ha->outstanding_cmds[pkt->handle];
-	else
-		sp = NULL;
-
-	if (sp == NULL) {
-		DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
-		    ha->host_no));
-		DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n",
-		    ha->host_no));
-		qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n",
-		    pkt->handle);
-
-		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-		return;
-	}
-
-	CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status);
-	CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
-
-	/* Free outstanding command slot. */
-	ha->outstanding_cmds[pkt->handle] = NULL;
-
-	qla2x00_sp_compl(ha, sp);
-}
-
 static irqreturn_t
 qla24xx_msix_rsp_q(int irq, void *dev_id)
 {
 	scsi_qla_host_t	*ha;
 	struct device_reg_24xx __iomem *reg;
-	unsigned long flags;
 
 	ha = dev_id;
 	reg = &ha->iobase->isp24;
 
-	spin_lock_irqsave(&ha->hardware_lock, flags);
+	spin_lock(&ha->hardware_lock);
 
 	qla24xx_process_response_queue(ha);
-
 	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock(&ha->hardware_lock);
 
 	return IRQ_HANDLED;
 }
@@ -1678,7 +1655,6 @@
 	scsi_qla_host_t	*ha;
 	struct device_reg_24xx __iomem *reg;
 	int		status;
-	unsigned long	flags;
 	uint32_t	stat;
 	uint32_t	hccr;
 	uint16_t	mb[4];
@@ -1687,13 +1663,19 @@
 	reg = &ha->iobase->isp24;
 	status = 0;
 
-	spin_lock_irqsave(&ha->hardware_lock, flags);
+	spin_lock(&ha->hardware_lock);
 	do {
 		stat = RD_REG_DWORD(&reg->host_status);
 		if (stat & HSRX_RISC_PAUSED) {
 			if (pci_channel_offline(ha->pdev))
 				break;
 
+			if (ha->hw_event_pause_errors == 0)
+				qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
+				    0, MSW(stat), LSW(stat));
+			else if (ha->hw_event_pause_errors < 0xffffffff)
+				ha->hw_event_pause_errors++;
+
 			hccr = RD_REG_DWORD(&reg->hccr);
 
 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
@@ -1734,7 +1716,7 @@
 		}
 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 	} while (0);
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock(&ha->hardware_lock);
 
 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -1821,10 +1803,9 @@
 {
 	int ret;
 	device_reg_t __iomem *reg = ha->iobase;
-	unsigned long flags;
 
 	/* If possible, enable MSI-X. */
-	if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
+	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
 		goto skip_msix;
 
         if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
@@ -1859,7 +1840,7 @@
 	    "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
 skip_msix:
 
-	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha))
+	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
 		goto skip_msi;
 
 	ret = pci_enable_msi(ha->pdev);
@@ -1882,7 +1863,7 @@
 clear_risc_ints:
 
 	ha->isp_ops->disable_intrs(ha);
-	spin_lock_irqsave(&ha->hardware_lock, flags);
+	spin_lock_irq(&ha->hardware_lock);
 	if (IS_FWI2_CAPABLE(ha)) {
 		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
 		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
@@ -1891,7 +1872,7 @@
 		WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
 		WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
 	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock_irq(&ha->hardware_lock);
 	ha->isp_ops->enable_intrs(ha);
 
 fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index bb10358..7d0a8a4 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -310,7 +310,7 @@
 	}
 
 	mcp->in_mb = MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -367,7 +367,7 @@
 		}
 	}
 
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -417,7 +417,7 @@
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->flags = 0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
 	/* Return mailbox data. */
@@ -466,7 +466,7 @@
 	mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -524,7 +524,7 @@
 		mcp->mb[12] = 0;	/* Undocumented, but used */
 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
 	}
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -576,7 +576,7 @@
 	mcp->mb[7] = 0x2525;
 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -587,6 +587,14 @@
 		if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
 		    mcp->mb[7] != 0x2525)
 			rval = QLA_FUNCTION_FAILED;
+		if (rval == QLA_FUNCTION_FAILED) {
+			struct device_reg_24xx __iomem *reg =
+			    &ha->iobase->isp24;
+
+			qla2xxx_hw_event_log(ha, HW_EVENT_ISP_ERR, 0,
+			    LSW(RD_REG_DWORD(&reg->hccr)),
+			    LSW(RD_REG_DWORD(&reg->istatus)));
+		}
 	}
 
 	if (rval != QLA_SUCCESS) {
@@ -640,7 +648,7 @@
 		mcp->in_mb |= MBX_1;
 	}
 
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -674,8 +682,8 @@
  *	Kernel context.
  */
 int
-qla2x00_issue_iocb(scsi_qla_host_t *ha, void*  buffer, dma_addr_t phys_addr,
-    size_t size)
+qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
+    dma_addr_t phys_addr, size_t size, uint32_t tov)
 {
 	int		rval;
 	mbx_cmd_t	mc;
@@ -689,7 +697,7 @@
 	mcp->mb[7] = LSW(MSD(phys_addr));
 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_2|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = tov;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -710,6 +718,14 @@
 	return rval;
 }
 
+int
+qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr,
+    size_t size)
+{
+	return qla2x00_issue_iocb_timeout(ha, buffer, phys_addr, size,
+	    MBX_TOV_SECONDS);
+}
+
 /*
  * qla2x00_abort_command
  *	Abort command aborts a specified IOCB.
@@ -760,7 +776,7 @@
 	mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
 	mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -776,36 +792,20 @@
 	return rval;
 }
 
-#if USE_ABORT_TGT
-/*
- * qla2x00_abort_target
- *	Issue abort target mailbox command.
- *
- * Input:
- *	ha = adapter block pointer.
- *
- * Returns:
- *	qla2x00 local function return status code.
- *
- * Context:
- *	Kernel context.
- */
 int
-qla2x00_abort_target(fc_port_t *fcport)
+qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
 {
-	int        rval;
+	int rval, rval2;
 	mbx_cmd_t  mc;
 	mbx_cmd_t  *mcp = &mc;
 	scsi_qla_host_t *ha;
 
-	if (fcport == NULL)
-		return 0;
-
 	DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
 
+	l = l;
 	ha = fcport->ha;
 	mcp->mb[0] = MBC_ABORT_TARGET;
-	mcp->out_mb = MBX_2|MBX_1|MBX_0;
+	mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
 	if (HAS_EXTENDED_IDS(ha)) {
 		mcp->mb[1] = fcport->loop_id;
 		mcp->mb[10] = 0;
@@ -814,27 +814,70 @@
 		mcp->mb[1] = fcport->loop_id << 8;
 	}
 	mcp->mb[2] = ha->loop_reset_delay;
+	mcp->mb[9] = ha->vp_idx;
 
 	mcp->in_mb = MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
-
-	/* Issue marker command. */
-	ha->marker_needed = 1;
-
 	if (rval != QLA_SUCCESS) {
-		DEBUG2_3_11(printk("qla2x00_abort_target(%ld): failed=%x.\n",
+		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
 		    ha->host_no, rval));
+	}
+
+	/* Issue marker IOCB. */
+	rval2 = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID);
+	if (rval2 != QLA_SUCCESS) {
+		DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
+		    "(%x).\n", __func__, ha->host_no, rval2));
 	} else {
-		/*EMPTY*/
-		DEBUG11(printk("qla2x00_abort_target(%ld): done.\n",
-		    ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
 	}
 
 	return rval;
 }
-#endif
+
+int
+qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
+{
+	int rval, rval2;
+	mbx_cmd_t  mc;
+	mbx_cmd_t  *mcp = &mc;
+	scsi_qla_host_t *ha;
+
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
+
+	ha = fcport->ha;
+	mcp->mb[0] = MBC_LUN_RESET;
+	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+	if (HAS_EXTENDED_IDS(ha))
+		mcp->mb[1] = fcport->loop_id;
+	else
+		mcp->mb[1] = fcport->loop_id << 8;
+	mcp->mb[2] = l;
+	mcp->mb[3] = 0;
+	mcp->mb[9] = ha->vp_idx;
+
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(ha, mcp);
+	if (rval != QLA_SUCCESS) {
+		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
+		    ha->host_no, rval));
+	}
+
+	/* Issue marker IOCB. */
+	rval2 = qla2x00_marker(ha, fcport->loop_id, l, MK_SYNC_ID_LUN);
+	if (rval2 != QLA_SUCCESS) {
+		DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
+		    "(%x).\n", __func__, ha->host_no, rval2));
+	} else {
+		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+	}
+
+	return rval;
+}
 
 /*
  * qla2x00_get_adapter_id
@@ -871,7 +914,7 @@
 	mcp->mb[9] = ha->vp_idx;
 	mcp->out_mb = MBX_9|MBX_0;
 	mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 	if (mcp->mb[0] == MBS_COMMAND_ERROR)
@@ -928,7 +971,7 @@
 	mcp->mb[0] = MBC_GET_RETRY_COUNT;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -995,7 +1038,7 @@
 	mcp->in_mb = MBX_5|MBX_4|MBX_0;
 	mcp->buf_size = size;
 	mcp->flags = MBX_DMA_OUT;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
 	if (rval != QLA_SUCCESS) {
@@ -1173,7 +1216,7 @@
  *	Kernel context.
  */
 int
-qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *dptr)
+qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
 {
 	int rval;
 	mbx_cmd_t mc;
@@ -1184,13 +1227,15 @@
 
 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
 	mcp->out_mb = MBX_0;
-	mcp->in_mb = MBX_2|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
-	/* Return firmware state. */
-	*dptr = mcp->mb[1];
+	/* Return firmware states. */
+	states[0] = mcp->mb[1];
+	states[1] = mcp->mb[2];
+	states[2] = mcp->mb[3];
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
@@ -1246,7 +1291,7 @@
 	}
 
 	mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -1318,7 +1363,7 @@
 		mcp->mb[3] = 0;
 	}
 	mcp->in_mb = MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -1743,7 +1788,7 @@
 	}
 
 	mcp->in_mb = MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -1791,7 +1836,7 @@
 	mcp->mb[3] = 0;
 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -1852,7 +1897,7 @@
 		mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
 	}
 	mcp->in_mb = MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -1896,7 +1941,7 @@
 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -2036,7 +2081,7 @@
 		mcp->mb[1] = loop_id << 8;
 		mcp->out_mb |= MBX_1;
 	}
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = IOCTL_CMD;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -2082,7 +2127,7 @@
 	mcp->mb[10] = 0;
 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = IOCTL_CMD;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -2180,17 +2225,15 @@
 	} p;
 };
 
-int
-qla24xx_abort_target(fc_port_t *fcport)
+static int
+__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
+    unsigned int l)
 {
-	int		rval;
+	int		rval, rval2;
 	struct tsk_mgmt_cmd *tsk;
 	dma_addr_t	tsk_dma;
 	scsi_qla_host_t *ha, *pha;
 
-	if (fcport == NULL)
-		return 0;
-
 	DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
 
 	ha = fcport->ha;
@@ -2207,47 +2250,61 @@
 	tsk->p.tsk.entry_count = 1;
 	tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
 	tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
-	tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET);
+	tsk->p.tsk.control_flags = cpu_to_le32(type);
 	tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
 	tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
 	tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
 	tsk->p.tsk.vp_index = fcport->vp_idx;
+	if (type == TCF_LUN_RESET) {
+		int_to_scsilun(l, &tsk->p.tsk.lun);
+		host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
+		    sizeof(tsk->p.tsk.lun));
+	}
 
 	rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0);
 	if (rval != QLA_SUCCESS) {
-		DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB "
-		    "(%x).\n", __func__, ha->host_no, rval));
-		goto atarget_done;
+		DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
+		    "(%x).\n", __func__, ha->host_no, name, rval));
 	} else if (tsk->p.sts.entry_status != 0) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
 		    "-- error status (%x).\n", __func__, ha->host_no,
 		    tsk->p.sts.entry_status));
 		rval = QLA_FUNCTION_FAILED;
-		goto atarget_done;
 	} else if (tsk->p.sts.comp_status !=
 	    __constant_cpu_to_le16(CS_COMPLETE)) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
 		    "-- completion status (%x).\n", __func__,
 		    ha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
 		rval = QLA_FUNCTION_FAILED;
-		goto atarget_done;
 	}
 
 	/* Issue marker IOCB. */
-	rval = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID);
-	if (rval != QLA_SUCCESS) {
+	rval2 = qla2x00_marker(ha, fcport->loop_id, l,
+	    type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
+	if (rval2 != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
-		    "(%x).\n", __func__, ha->host_no, rval));
+		    "(%x).\n", __func__, ha->host_no, rval2));
 	} else {
 		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
 	}
 
-atarget_done:
 	dma_pool_free(pha->s_dma_pool, tsk, tsk_dma);
 
 	return rval;
 }
 
+int
+qla24xx_abort_target(struct fc_port *fcport, unsigned int l)
+{
+	return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l);
+}
+
+int
+qla24xx_lun_reset(struct fc_port *fcport, unsigned int l)
+{
+	return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l);
+}
+
 #if 0
 
 int
@@ -2304,7 +2361,7 @@
 	mcp->mb[4] = sw_em_4g | BIT_15;
 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -2372,7 +2429,7 @@
 	mcp->mb[7] = TC_AEN_DISABLE;
 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 	if (rval != QLA_SUCCESS) {
@@ -2401,7 +2458,7 @@
 	mcp->mb[1] = TC_EFT_DISABLE;
 	mcp->out_mb = MBX_1|MBX_0;
 	mcp->in_mb = MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 	if (rval != QLA_SUCCESS) {
@@ -2441,7 +2498,7 @@
 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
 	    MBX_1|MBX_0;
 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 	if (rval != QLA_SUCCESS) {
@@ -2477,7 +2534,7 @@
 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
 	    MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 	if (rval != QLA_SUCCESS) {
@@ -2525,7 +2582,7 @@
 	mcp->mb[10] = 0;
 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -2559,7 +2616,7 @@
 	mcp->mb[4] = mcp->mb[5] = 0;
 	mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -2877,7 +2934,7 @@
 	}
 
 	mcp->in_mb = MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(ha, mcp);
 
@@ -2890,3 +2947,104 @@
 
 	return rval;
 }
+
+/* 84XX Support **************************************************************/
+
+struct cs84xx_mgmt_cmd {
+	union {
+		struct verify_chip_entry_84xx req;
+		struct verify_chip_rsp_84xx rsp;
+	} p;
+};
+
+int
+qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
+{
+	int rval, retry;
+	struct cs84xx_mgmt_cmd *mn;
+	dma_addr_t mn_dma;
+	uint16_t options;
+	unsigned long flags;
+
+	DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+
+	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+	if (mn == NULL) {
+		DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
+		    "IOCB.\n", __func__, ha->host_no));
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+
+	/* Force Update? */
+	options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
+	/* Diagnostic firmware? */
+	/* options |= MENLO_DIAG_FW; */
+	/* We update the firmware with only one data sequence. */
+	options |= VCO_END_OF_DATA;
+
+	retry = 0;
+	do {
+		memset(mn, 0, sizeof(*mn));
+		mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
+		mn->p.req.entry_count = 1;
+		mn->p.req.options = cpu_to_le16(options);
+
+		DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__,
+		    ha->host_no));
+		DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
+		    sizeof(*mn)));
+
+		rval = qla2x00_issue_iocb_timeout(ha, mn, mn_dma, 0, 120);
+		if (rval != QLA_SUCCESS) {
+			DEBUG2_16(printk("%s(%ld): failed to issue Verify "
+			    "IOCB (%x).\n", __func__, ha->host_no, rval));
+			goto verify_done;
+		}
+
+		DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__,
+		    ha->host_no));
+		DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
+		    sizeof(*mn)));
+
+		status[0] = le16_to_cpu(mn->p.rsp.comp_status);
+		status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
+		    le16_to_cpu(mn->p.rsp.failure_code) : 0;
+		DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__,
+		    ha->host_no, status[0], status[1]));
+
+		if (status[0] != CS_COMPLETE) {
+			rval = QLA_FUNCTION_FAILED;
+			if (!(options & VCO_DONT_UPDATE_FW)) {
+				DEBUG2_16(printk("%s(%ld): Firmware update "
+				    "failed. Retrying without update "
+				    "firmware.\n", __func__, ha->host_no));
+				options |= VCO_DONT_UPDATE_FW;
+				options &= ~VCO_FORCE_UPDATE;
+				retry = 1;
+			}
+		} else {
+			DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n",
+			    __func__, ha->host_no,
+			    le32_to_cpu(mn->p.rsp.fw_ver)));
+
+			/* NOTE: we only update OP firmware. */
+			spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
+			ha->cs84xx->op_fw_version =
+			    le32_to_cpu(mn->p.rsp.fw_ver);
+			spin_unlock_irqrestore(&ha->cs84xx->access_lock,
+			    flags);
+		}
+	} while (retry);
+
+verify_done:
+	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+	if (rval != QLA_SUCCESS) {
+		DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__,
+		    ha->host_no, rval));
+	} else {
+		DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
+	}
+
+	return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cf784cd..f2b0497 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,20 +1,8 @@
 /*
- *                  QLOGIC LINUX SOFTWARE
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
- * QLogic ISP2x00 device driver for Linux 2.6.x
- * Copyright (C) 2003-2005 QLogic Corporation
- * (www.qlogic.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
+ * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
 
@@ -28,8 +16,6 @@
 #include <scsi/scsicam.h>
 #include <linux/delay.h>
 
-void qla2x00_vp_stop_timer(scsi_qla_host_t *);
-
 void
 qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
 {
@@ -268,9 +254,17 @@
 static int
 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
 {
+	scsi_qla_host_t *ha = vha->parent;
+
 	if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
 		/* VP acquired. complete port configuration */
-		qla24xx_configure_vp(vha);
+		if (atomic_read(&ha->loop_state) == LOOP_READY) {
+			qla24xx_configure_vp(vha);
+		} else {
+			set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
+			set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
+		}
+
 		return 0;
 	}
 
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3c1b433..8b33b16 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -26,9 +26,6 @@
  */
 static struct kmem_cache *srb_cachep;
 
-/*
- * Ioctl related information.
- */
 int num_hosts;
 int ql2xlogintimeout = 20;
 module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
@@ -103,9 +100,9 @@
 		void (*fn)(struct scsi_cmnd *));
 static int qla2xxx_eh_abort(struct scsi_cmnd *);
 static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
+static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
 static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
-static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
 
 static int qla2x00_change_queue_depth(struct scsi_device *, int);
 static int qla2x00_change_queue_type(struct scsi_device *, int);
@@ -117,6 +114,7 @@
 
 	.eh_abort_handler	= qla2xxx_eh_abort,
 	.eh_device_reset_handler = qla2xxx_eh_device_reset,
+	.eh_target_reset_handler = qla2xxx_eh_target_reset,
 	.eh_bus_reset_handler	= qla2xxx_eh_bus_reset,
 	.eh_host_reset_handler	= qla2xxx_eh_host_reset,
 
@@ -148,6 +146,7 @@
 
 	.eh_abort_handler	= qla2xxx_eh_abort,
 	.eh_device_reset_handler = qla2xxx_eh_device_reset,
+	.eh_target_reset_handler = qla2xxx_eh_target_reset,
 	.eh_bus_reset_handler	= qla2xxx_eh_bus_reset,
 	.eh_host_reset_handler	= qla2xxx_eh_host_reset,
 
@@ -253,9 +252,9 @@
 
 		strcpy(str, "PCIe (");
 		if (lspeed == 1)
-			strcat(str, "2.5Gb/s ");
+			strcat(str, "2.5GT/s ");
 		else if (lspeed == 2)
-			strcat(str, "5.0Gb/s ");
+			strcat(str, "5.0GT/s ");
 		else
 			strcat(str, "<unknown> ");
 		snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
@@ -340,6 +339,8 @@
 		strcat(str, "[T10 CRC] ");
 	if (ha->fw_attributes & BIT_5)
 		strcat(str, "[VI] ");
+	if (ha->fw_attributes & BIT_10)
+		strcat(str, "[84XX] ");
 	if (ha->fw_attributes & BIT_13)
 		strcat(str, "[Experimental]");
 	return str;
@@ -570,8 +571,6 @@
 	else
 		return_status = QLA_FUNCTION_FAILED;
 
-	DEBUG2(printk("%s return_status=%d\n",__func__,return_status));
-
 	return (return_status);
 }
 
@@ -685,7 +684,6 @@
 
 		DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n",
 		    __func__, ha->host_no, sp, serial));
-		DEBUG3(qla2x00_print_scsi_cmd(cmd));
 
 		spin_unlock_irqrestore(&pha->hardware_lock, flags);
 		if (ha->isp_ops->abort_command(ha, sp)) {
@@ -719,190 +717,122 @@
 	return ret;
 }
 
-/**************************************************************************
-* qla2x00_eh_wait_for_pending_target_commands
-*
-* Description:
-*    Waits for all the commands to come back from the specified target.
-*
-* Input:
-*    ha - pointer to scsi_qla_host structure.
-*    t  - target
-* Returns:
-*    Either SUCCESS or FAILED.
-*
-* Note:
-**************************************************************************/
+enum nexus_wait_type {
+	WAIT_HOST = 0,
+	WAIT_TARGET,
+	WAIT_LUN,
+};
+
 static int
-qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
+    unsigned int l, enum nexus_wait_type type)
 {
-	int	cnt;
-	int	status;
-	srb_t		*sp;
-	struct scsi_cmnd *cmd;
+	int cnt, match, status;
+	srb_t *sp;
 	unsigned long flags;
 	scsi_qla_host_t *pha = to_qla_parent(ha);
 
-	status = 0;
-
-	/*
-	 * Waiting for all commands for the designated target in the active
-	 * array
-	 */
-	for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
-		spin_lock_irqsave(&pha->hardware_lock, flags);
+	status = QLA_SUCCESS;
+	spin_lock_irqsave(&pha->hardware_lock, flags);
+	for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS;
+	    cnt++) {
 		sp = pha->outstanding_cmds[cnt];
-		if (sp) {
-			cmd = sp->cmd;
-			spin_unlock_irqrestore(&pha->hardware_lock, flags);
-			if (cmd->device->id == t &&
-			    ha->vp_idx == sp->ha->vp_idx) {
-				if (!qla2x00_eh_wait_on_command(ha, cmd)) {
-					status = 1;
-					break;
-				}
-			}
-		} else {
-			spin_unlock_irqrestore(&pha->hardware_lock, flags);
+		if (!sp)
+			continue;
+		if (ha->vp_idx != sp->ha->vp_idx)
+			continue;
+		match = 0;
+		switch (type) {
+		case WAIT_HOST:
+			match = 1;
+			break;
+		case WAIT_TARGET:
+			match = sp->cmd->device->id == t;
+			break;
+		case WAIT_LUN:
+			match = (sp->cmd->device->id == t &&
+			    sp->cmd->device->lun == l);
+			break;
 		}
+		if (!match)
+			continue;
+
+		spin_unlock_irqrestore(&pha->hardware_lock, flags);
+		status = qla2x00_eh_wait_on_command(ha, sp->cmd);
+		spin_lock_irqsave(&pha->hardware_lock, flags);
 	}
-	return (status);
+	spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+	return status;
 }
 
+static char *reset_errors[] = {
+	"HBA not online",
+	"HBA not ready",
+	"Task management failed",
+	"Waiting for command completions",
+};
 
-/**************************************************************************
-* qla2xxx_eh_device_reset
-*
-* Description:
-*    The device reset function will reset the target and abort any
-*    executing commands.
-*
-*    NOTE: The use of SP is undefined within this context.  Do *NOT*
-*          attempt to use this value, even if you determine it is
-*          non-null.
-*
-* Input:
-*    cmd = Linux SCSI command packet of the command that cause the
-*          bus device reset.
-*
-* Returns:
-*    SUCCESS/FAILURE (defined as macro in scsi.h).
-*
-**************************************************************************/
+static int
+__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
+    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int))
+{
+	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+	int err;
+
+	qla2x00_block_error_handler(cmd);
+
+	if (!fcport)
+		return FAILED;
+
+	qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
+	    ha->host_no, cmd->device->id, cmd->device->lun, name);
+
+	err = 0;
+	if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
+		goto eh_reset_failed;
+	err = 1;
+	if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS)
+		goto eh_reset_failed;
+	err = 2;
+	if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS)
+		goto eh_reset_failed;
+	err = 3;
+	if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id,
+	    cmd->device->lun, type) != QLA_SUCCESS)
+		goto eh_reset_failed;
+
+	qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
+	    ha->host_no, cmd->device->id, cmd->device->lun, name);
+
+	return SUCCESS;
+
+ eh_reset_failed:
+	qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n",
+	    ha->host_no, cmd->device->id, cmd->device->lun, name,
+	    reset_errors[err]);
+	return FAILED;
+}
+
 static int
 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
 {
 	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
-	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
-	int ret = FAILED;
-	unsigned int id, lun;
-	unsigned long serial;
 
-	qla2x00_block_error_handler(cmd);
-
-	id = cmd->device->id;
-	lun = cmd->device->lun;
-	serial = cmd->serial_number;
-
-	if (!fcport)
-		return ret;
-
-	qla_printk(KERN_INFO, ha,
-	    "scsi(%ld:%d:%d): DEVICE RESET ISSUED.\n", ha->host_no, id, lun);
-
-	if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
-		goto eh_dev_reset_done;
-
-	if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) {
-		if (qla2x00_device_reset(ha, fcport) == 0)
-			ret = SUCCESS;
-
-#if defined(LOGOUT_AFTER_DEVICE_RESET)
-		if (ret == SUCCESS) {
-			if (fcport->flags & FC_FABRIC_DEVICE) {
-				ha->isp_ops->fabric_logout(ha, fcport->loop_id);
-				qla2x00_mark_device_lost(ha, fcport, 0, 0);
-			}
-		}
-#endif
-	} else {
-		DEBUG2(printk(KERN_INFO
-		    "%s failed: loop not ready\n",__func__));
-	}
-
-	if (ret == FAILED) {
-		DEBUG3(printk("%s(%ld): device reset failed\n",
-		    __func__, ha->host_no));
-		qla_printk(KERN_INFO, ha, "%s: device reset failed\n",
-		    __func__);
-
-		goto eh_dev_reset_done;
-	}
-
-	/* Flush outstanding commands. */
-	if (qla2x00_eh_wait_for_pending_target_commands(ha, id))
-		ret = FAILED;
-	if (ret == FAILED) {
-		DEBUG3(printk("%s(%ld): failed while waiting for commands\n",
-		    __func__, ha->host_no));
-		qla_printk(KERN_INFO, ha,
-		    "%s: failed while waiting for commands\n", __func__);
-	} else
-		qla_printk(KERN_INFO, ha,
-		    "scsi(%ld:%d:%d): DEVICE RESET SUCCEEDED.\n", ha->host_no,
-		    id, lun);
- eh_dev_reset_done:
-	return ret;
+	return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
+	    ha->isp_ops->lun_reset);
 }
 
-/**************************************************************************
-* qla2x00_eh_wait_for_pending_commands
-*
-* Description:
-*    Waits for all the commands to come back from the specified host.
-*
-* Input:
-*    ha - pointer to scsi_qla_host structure.
-*
-* Returns:
-*    1 : SUCCESS
-*    0 : FAILED
-*
-* Note:
-**************************************************************************/
 static int
-qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha)
+qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
 {
-	int	cnt;
-	int	status;
-	srb_t		*sp;
-	struct scsi_cmnd *cmd;
-	unsigned long flags;
+	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
 
-	status = 1;
-
-	/*
-	 * Waiting for all commands for the designated target in the active
-	 * array
-	 */
-	for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
-		spin_lock_irqsave(&ha->hardware_lock, flags);
-		sp = ha->outstanding_cmds[cnt];
-		if (sp) {
-			cmd = sp->cmd;
-			spin_unlock_irqrestore(&ha->hardware_lock, flags);
-			status = qla2x00_eh_wait_on_command(ha, cmd);
-			if (status == 0)
-				break;
-		}
-		else {
-			spin_unlock_irqrestore(&ha->hardware_lock, flags);
-		}
-	}
-	return (status);
+	return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
+	    ha->isp_ops->target_reset);
 }
 
-
 /**************************************************************************
 * qla2xxx_eh_bus_reset
 *
@@ -953,7 +883,8 @@
 		goto eh_bus_reset_done;
 
 	/* Flush outstanding commands. */
-	if (!qla2x00_eh_wait_for_pending_commands(pha))
+	if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) !=
+	    QLA_SUCCESS)
 		ret = FAILED;
 
 eh_bus_reset_done:
@@ -1024,7 +955,8 @@
 	clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
 
 	/* Waiting for our command in done_queue to be returned to OS.*/
-	if (qla2x00_eh_wait_for_pending_commands(pha))
+	if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) ==
+	    QLA_SUCCESS)
 		ret = SUCCESS;
 
 	if (ha->parent)
@@ -1080,7 +1012,7 @@
 			if (fcport->port_type != FCT_TARGET)
 				continue;
 
-			ret = qla2x00_device_reset(ha, fcport);
+			ret = ha->isp_ops->target_reset(fcport, 0);
 			if (ret != QLA_SUCCESS) {
 				DEBUG2_3(printk("%s(%ld): bus_reset failed: "
 				    "target_reset=%d d_id=%x.\n", __func__,
@@ -1095,26 +1027,6 @@
 	return QLA_SUCCESS;
 }
 
-/*
- * qla2x00_device_reset
- *	Issue bus device reset message to the target.
- *
- * Input:
- *	ha = adapter block pointer.
- *	t = SCSI ID.
- *	TARGET_QUEUE_LOCK must be released.
- *	ADAPTER_STATE_LOCK must be released.
- *
- * Context:
- *	Kernel context.
- */
-static int
-qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
-{
-	/* Abort Target command will clear Reservation */
-	return ha->isp_ops->abort_target(reset_fcport);
-}
-
 void
 qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res)
 {
@@ -1292,7 +1204,8 @@
 	.enable_intrs		= qla2x00_enable_intrs,
 	.disable_intrs		= qla2x00_disable_intrs,
 	.abort_command		= qla2x00_abort_command,
-	.abort_target		= qla2x00_abort_target,
+	.target_reset		= qla2x00_abort_target,
+	.lun_reset		= qla2x00_lun_reset,
 	.fabric_login		= qla2x00_login_fabric,
 	.fabric_logout		= qla2x00_fabric_logout,
 	.calc_req_entries	= qla2x00_calc_iocbs_32,
@@ -1325,7 +1238,8 @@
 	.enable_intrs		= qla2x00_enable_intrs,
 	.disable_intrs		= qla2x00_disable_intrs,
 	.abort_command		= qla2x00_abort_command,
-	.abort_target		= qla2x00_abort_target,
+	.target_reset		= qla2x00_abort_target,
+	.lun_reset		= qla2x00_lun_reset,
 	.fabric_login		= qla2x00_login_fabric,
 	.fabric_logout		= qla2x00_fabric_logout,
 	.calc_req_entries	= qla2x00_calc_iocbs_32,
@@ -1358,7 +1272,8 @@
 	.enable_intrs		= qla24xx_enable_intrs,
 	.disable_intrs		= qla24xx_disable_intrs,
 	.abort_command		= qla24xx_abort_command,
-	.abort_target		= qla24xx_abort_target,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
 	.fabric_login		= qla24xx_login_fabric,
 	.fabric_logout		= qla24xx_fabric_logout,
 	.calc_req_entries	= NULL,
@@ -1391,7 +1306,8 @@
 	.enable_intrs		= qla24xx_enable_intrs,
 	.disable_intrs		= qla24xx_disable_intrs,
 	.abort_command		= qla24xx_abort_command,
-	.abort_target		= qla24xx_abort_target,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
 	.fabric_login		= qla24xx_login_fabric,
 	.fabric_logout		= qla24xx_fabric_logout,
 	.calc_req_entries	= NULL,
@@ -1464,6 +1380,13 @@
 		ha->device_type |= DT_IIDMA;
 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
 		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP8432:
+		ha->device_type |= DT_ISP8432;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
 	case PCI_DEVICE_ID_QLOGIC_ISP5422:
 		ha->device_type |= DT_ISP5422;
 		ha->device_type |= DT_FWI2;
@@ -1587,6 +1510,7 @@
 	sht = &qla2x00_driver_template;
 	if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) {
@@ -1677,7 +1601,7 @@
 		if (IS_QLA2322(ha) || IS_QLA6322(ha))
 			ha->optrom_size = OPTROM_SIZE_2322;
 		ha->isp_ops = &qla2300_isp_ops;
-	} else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+	} else if (IS_QLA24XX_TYPE(ha)) {
 		host->max_id = MAX_TARGETS_2200;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
 		ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
@@ -1699,6 +1623,8 @@
 		ha->gid_list_info_size = 8;
 		ha->optrom_size = OPTROM_SIZE_25XX;
 		ha->isp_ops = &qla25xx_isp_ops;
+		ha->hw_event_start = PCI_FUNC(pdev->devfn) ?
+		    FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR;
 	}
 	host->can_queue = ha->request_q_length + 128;
 
@@ -1713,6 +1639,7 @@
 	INIT_LIST_HEAD(&ha->list);
 	INIT_LIST_HEAD(&ha->fcports);
 	INIT_LIST_HEAD(&ha->vp_list);
+	INIT_LIST_HEAD(&ha->work_list);
 
 	set_bit(0, (unsigned long *) ha->vp_idx_map);
 
@@ -1819,6 +1746,8 @@
 
 	qla2x00_dfs_remove(ha);
 
+	qla84xx_put_chip(ha);
+
 	qla2x00_free_sysfs_attr(ha);
 
 	fc_remove_host(ha->host);
@@ -2206,6 +2135,97 @@
 	kfree(ha->nvram);
 }
 
+struct qla_work_evt *
+qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
+    int locked)
+{
+	struct qla_work_evt *e;
+
+	e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC:
+	    GFP_KERNEL);
+	if (!e)
+		return NULL;
+
+	INIT_LIST_HEAD(&e->list);
+	e->type = type;
+	e->flags = QLA_EVT_FLAG_FREE;
+	return e;
+}
+
+int
+qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
+{
+	unsigned long flags;
+
+	if (!locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	list_add_tail(&e->list, &ha->work_list);
+	qla2xxx_wake_dpc(ha);
+	if (!locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return QLA_SUCCESS;
+}
+
+int
+qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code,
+    u32 data)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.aen.code = code;
+	e->u.aen.data = data;
+	return qla2x00_post_work(ha, e, 1);
+}
+
+int
+qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1,
+    uint16_t d2, uint16_t d3)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.hwe.code = code;
+	e->u.hwe.d1 = d1;
+	e->u.hwe.d2 = d2;
+	e->u.hwe.d3 = d3;
+	return qla2x00_post_work(ha, e, 1);
+}
+
+static void
+qla2x00_do_work(struct scsi_qla_host *ha)
+{
+	struct qla_work_evt *e;
+
+	spin_lock_irq(&ha->hardware_lock);
+	while (!list_empty(&ha->work_list)) {
+		e = list_entry(ha->work_list.next, struct qla_work_evt, list);
+		list_del_init(&e->list);
+		spin_unlock_irq(&ha->hardware_lock);
+
+		switch (e->type) {
+		case QLA_EVT_AEN:
+			fc_host_post_event(ha->host, fc_get_event_number(),
+			    e->u.aen.code, e->u.aen.data);
+			break;
+		case QLA_EVT_HWE_LOG:
+			qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1,
+			    e->u.hwe.d2, e->u.hwe.d3);
+			break;
+		}
+		if (e->flags & QLA_EVT_FLAG_FREE)
+			kfree(e);
+		spin_lock_irq(&ha->hardware_lock);
+	}
+	spin_unlock_irq(&ha->hardware_lock);
+}
+
 /**************************************************************************
 * qla2x00_do_dpc
 *   This kernel thread is a task that is schedule by the interrupt handler
@@ -2257,6 +2277,8 @@
 			continue;
 		}
 
+		qla2x00_do_work(ha);
+
 		if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) {
 
 			DEBUG(printk("scsi(%ld): dpc: sched "
@@ -2291,12 +2313,6 @@
 		if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags))
 			qla2x00_update_fcports(ha);
 
-		if (test_and_clear_bit(LOOP_RESET_NEEDED, &ha->dpc_flags)) {
-			DEBUG(printk("scsi(%ld): dpc: sched loop_reset()\n",
-			    ha->host_no));
-			qla2x00_loop_reset(ha);
-		}
-
 		if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) &&
 		    (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {
 
@@ -2367,19 +2383,6 @@
 			    ha->host_no));
 		}
 
-		if ((test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags)) &&
-		    atomic_read(&ha->loop_state) != LOOP_DOWN) {
-
-			clear_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags);
-			DEBUG(printk("scsi(%ld): qla2x00_login_retry()\n",
-			    ha->host_no));
-
-			set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
-
-			DEBUG(printk("scsi(%ld): qla2x00_login_retry - end\n",
-			    ha->host_no));
-		}
-
 		if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
 
 			DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
@@ -2397,18 +2400,6 @@
 			    ha->host_no));
 		}
 
-		if (test_and_clear_bit(FCPORT_RESCAN_NEEDED, &ha->dpc_flags)) {
-
-			DEBUG(printk("scsi(%ld): Rescan flagged fcports...\n",
-			    ha->host_no));
-
-			qla2x00_rescan_fcports(ha);
-
-			DEBUG(printk("scsi(%ld): Rescan flagged fcports..."
-			    "end.\n",
-			    ha->host_no));
-		}
-
 		if (!ha->interrupts_on)
 			ha->isp_ops->enable_intrs(ha);
 
@@ -2586,7 +2577,8 @@
 			set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags);
 			start_dpc++;
 
-			if (!(ha->device_flags & DFLG_NO_CABLE)) {
+			if (!(ha->device_flags & DFLG_NO_CABLE) &&
+			    !ha->parent) {
 				DEBUG(printk("scsi(%ld): Loop down - "
 				    "aborting ISP.\n",
 				    ha->host_no));
@@ -2610,10 +2602,8 @@
 	/* Schedule the DPC routine if needed */
 	if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
 	    test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
-	    test_bit(LOOP_RESET_NEEDED, &ha->dpc_flags) ||
 	    test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) ||
 	    start_dpc ||
-	    test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
 	    test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
 	    test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) ||
 	    test_bit(VP_DPC_NEEDED, &ha->dpc_flags) ||
@@ -2665,7 +2655,7 @@
 		blob = &qla_fw_blobs[FW_ISP2300];
 	} else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
 		blob = &qla_fw_blobs[FW_ISP2322];
-	} else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+	} else if (IS_QLA24XX_TYPE(ha)) {
 		blob = &qla_fw_blobs[FW_ISP24XX];
 	} else if (IS_QLA25XX(ha)) {
 		blob = &qla_fw_blobs[FW_ISP25XX];
@@ -2815,6 +2805,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 249e4d9..2801c26 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,26 +1,12 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
-/*
- * Compile time Options:
- *     0 - Disable and 1 - Enable
- */
-#define DEBUG_QLA2100		0	/* For Debug of qla2x00 */
-
-#define USE_ABORT_TGT		1	/* Use Abort Target mbx cmd */
-
 #define MAX_RETRIES_OF_ISP_ABORT	5
 
 /* Max time to wait for the loop to be in LOOP_READY state */
 #define MAX_LOOP_TIMEOUT	(60 * 5)
 
-/*
- * Some vendor subsystems do not recover properly after a device reset.  Define
- * the following to force a logout after a successful device reset.
- */
-#undef LOGOUT_AFTER_DEVICE_RESET
-
 #include "qla_version.h"
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 26822c8..1728ab3 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -543,16 +543,149 @@
 	}
 }
 
+void
+qla2xxx_get_flash_info(scsi_qla_host_t *ha)
+{
+#define FLASH_BLK_SIZE_32K	0x8000
+#define FLASH_BLK_SIZE_64K	0x10000
+	uint16_t cnt, chksum;
+	uint16_t *wptr;
+	struct qla_fdt_layout *fdt;
+	uint8_t	man_id, flash_id;
+
+	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+		return;
+
+	wptr = (uint16_t *)ha->request_ring;
+	fdt = (struct qla_fdt_layout *)ha->request_ring;
+	ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
+	    FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE);
+	if (*wptr == __constant_cpu_to_le16(0xffff))
+		goto no_flash_data;
+	if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
+	    fdt->sig[3] != 'D')
+		goto no_flash_data;
+
+	for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
+	    cnt++)
+		chksum += le16_to_cpu(*wptr++);
+	if (chksum) {
+		DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
+		    "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
+		    le16_to_cpu(fdt->version)));
+		DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt)));
+		goto no_flash_data;
+	}
+
+	ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f;
+	ha->fdt_wrt_disable = fdt->wrt_disable_bits;
+	ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
+	ha->fdt_block_size = le32_to_cpu(fdt->block_size);
+	if (fdt->unprotect_sec_cmd) {
+		ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0300 |
+		    fdt->unprotect_sec_cmd);
+		ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
+		    flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
+		    flash_conf_to_access_addr(0x0336);
+	}
+
+	DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x "
+	    "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n",
+	    le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd,
+	    ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd,
+	    ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size));
+	return;
+
+no_flash_data:
+	qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
+	ha->fdt_wrt_disable = 0x9c;
+	ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
+	switch (man_id) {
+	case 0xbf: /* STT flash. */
+		if (flash_id == 0x8e)
+			ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+		else
+			ha->fdt_block_size = FLASH_BLK_SIZE_32K;
+
+		if (flash_id == 0x80)
+			ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0352);
+		break;
+	case 0x13: /* ST M25P80. */
+		ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+		break;
+	case 0x1f: /* Atmel 26DF081A. */
+		ha->fdt_odd_index = 1;
+		ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+		ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320);
+		ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339);
+		ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336);
+		break;
+	default:
+		/* Default to 64 kb sector size. */
+		ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+		break;
+	}
+
+	DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x "
+	    "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id,
+	    ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
+	    ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable,
+	    ha->fdt_block_size));
+}
+
+static void
+qla24xx_unprotect_flash(scsi_qla_host_t *ha)
+{
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	/* Enable flash write. */
+	WRT_REG_DWORD(&reg->ctrl_status,
+	    RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
+
+	if (!ha->fdt_wrt_disable)
+		return;
+
+	/* Disable flash write-protection. */
+	qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0);
+	/* Some flash parts need an additional zero-write to clear bits.*/
+	qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0);
+}
+
+static void
+qla24xx_protect_flash(scsi_qla_host_t *ha)
+{
+	uint32_t cnt;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	if (!ha->fdt_wrt_disable)
+		goto skip_wrt_protect;
+
+	/* Enable flash write-protection and wait for completion. */
+	qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101),
+	    ha->fdt_wrt_disable);
+	for (cnt = 300; cnt &&
+	    qla24xx_read_flash_dword(ha,
+		    flash_conf_to_access_addr(0x005)) & BIT_0;
+	    cnt--) {
+		udelay(10);
+	}
+
+skip_wrt_protect:
+	/* Disable flash write. */
+	WRT_REG_DWORD(&reg->ctrl_status,
+	    RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
+}
+
 static int
 qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
     uint32_t dwords)
 {
 	int ret;
 	uint32_t liter, miter;
-	uint32_t sec_mask, rest_addr, conf_addr;
-	uint32_t fdata, findex, cnt;
-	uint8_t	man_id, flash_id;
-	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	uint32_t sec_mask, rest_addr;
+	uint32_t fdata, findex;
 	dma_addr_t optrom_dma;
 	void *optrom = NULL;
 	uint32_t *s, *d;
@@ -571,51 +704,13 @@
 		}
 	}
 
-	qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
-	DEBUG9(printk("%s(%ld): Flash man_id=%d flash_id=%d\n", __func__,
-	    ha->host_no, man_id, flash_id));
+	rest_addr = (ha->fdt_block_size >> 2) - 1;
+	sec_mask = 0x80000 - (ha->fdt_block_size >> 2);
 
-	conf_addr = flash_conf_to_access_addr(0x03d8);
-	switch (man_id) {
-	case 0xbf: /* STT flash. */
-		if (flash_id == 0x8e) {
-			rest_addr = 0x3fff;
-			sec_mask = 0x7c000;
-		} else {
-			rest_addr = 0x1fff;
-			sec_mask = 0x7e000;
-		}
-		if (flash_id == 0x80)
-			conf_addr = flash_conf_to_access_addr(0x0352);
-		break;
-	case 0x13: /* ST M25P80. */
-		rest_addr = 0x3fff;
-		sec_mask = 0x7c000;
-		break;
-	case 0x1f: // Atmel 26DF081A
-		rest_addr = 0x3fff;
-		sec_mask = 0x7c000;
-		conf_addr = flash_conf_to_access_addr(0x0320);
-		break;
-	default:
-		/* Default to 64 kb sector size. */
-		rest_addr = 0x3fff;
-		sec_mask = 0x7c000;
-		break;
-	}
-
-	/* Enable flash write. */
-	WRT_REG_DWORD(&reg->ctrl_status,
-	    RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
-	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
-
-	/* Disable flash write-protection. */
-	qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0);
-	/* Some flash parts need an additional zero-write to clear bits.*/
-	qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0);
+	qla24xx_unprotect_flash(ha);
 
 	for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
-		if (man_id == 0x1f) {
+		if (ha->fdt_odd_index) {
 			findex = faddr << 2;
 			fdata = findex & sec_mask;
 		} else {
@@ -625,13 +720,13 @@
 
 		/* Are we at the beginning of a sector? */
 		if ((findex & rest_addr) == 0) {
-			/* Do sector unprotect at 4K boundry for Atmel part. */
-			if (man_id == 0x1f)
+			/* Do sector unprotect. */
+			if (ha->fdt_unprotect_sec_cmd)
 				qla24xx_write_flash_dword(ha,
-				    flash_conf_to_access_addr(0x0339),
+				    ha->fdt_unprotect_sec_cmd,
 				    (fdata & 0xff00) | ((fdata << 16) &
 				    0xff0000) | ((fdata >> 16) & 0xff));
-			ret = qla24xx_write_flash_dword(ha, conf_addr,
+			ret = qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd,
 			    (fdata & 0xff00) |((fdata << 16) &
 			    0xff0000) | ((fdata >> 16) & 0xff));
 			if (ret != QLA_SUCCESS) {
@@ -681,28 +776,16 @@
 			break;
 		}
 
-		/* Do sector protect at 4K boundry for Atmel part. */
-		if (man_id == 0x1f &&
+		/* Do sector protect. */
+		if (ha->fdt_unprotect_sec_cmd &&
 		    ((faddr & rest_addr) == rest_addr))
 			qla24xx_write_flash_dword(ha,
-			    flash_conf_to_access_addr(0x0336),
+			    ha->fdt_protect_sec_cmd,
 			    (fdata & 0xff00) | ((fdata << 16) &
 			    0xff0000) | ((fdata >> 16) & 0xff));
 	}
 
-	/* Enable flash write-protection and wait for completion. */
-	qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0x9c);
-	for (cnt = 300; cnt &&
-	    qla24xx_read_flash_dword(ha,
-		    flash_conf_to_access_addr(0x005)) & BIT_0;
-	    cnt--) {
-		udelay(10);
-	}
-
-	/* Disable flash write. */
-	WRT_REG_DWORD(&reg->ctrl_status,
-	    RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
-	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
+	qla24xx_protect_flash(ha);
 
 	if (optrom)
 		dma_free_coherent(&ha->pdev->dev,
@@ -2221,3 +2304,107 @@
 
 	return ret;
 }
+
+static int
+qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
+{
+	uint32_t d[2], faddr;
+
+	/* Locate first empty entry. */
+	for (;;) {
+		if (ha->hw_event_ptr >=
+		    ha->hw_event_start + FA_HW_EVENT_SIZE) {
+			DEBUG2(qla_printk(KERN_WARNING, ha,
+			    "HW event -- Log Full!\n"));
+			return QLA_MEMORY_ALLOC_FAILED;
+		}
+
+		qla24xx_read_flash_data(ha, d, ha->hw_event_ptr, 2);
+		faddr = flash_data_to_access_addr(ha->hw_event_ptr);
+		ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
+		if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
+		    d[1] == __constant_cpu_to_le32(0xffffffff)) {
+			qla24xx_unprotect_flash(ha);
+
+			qla24xx_write_flash_dword(ha, faddr++,
+			    cpu_to_le32(jiffies));
+			qla24xx_write_flash_dword(ha, faddr++, 0);
+			qla24xx_write_flash_dword(ha, faddr++, *fdata++);
+			qla24xx_write_flash_dword(ha, faddr++, *fdata);
+
+			qla24xx_protect_flash(ha);
+			break;
+		}
+	}
+	return QLA_SUCCESS;
+}
+
+int
+qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
+    uint16_t d2, uint16_t d3)
+{
+#define QMARK(a, b, c, d) \
+    cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
+
+	int rval;
+	uint32_t marker[2], fdata[4];
+
+	if (ha->hw_event_start == 0)
+		return QLA_FUNCTION_FAILED;
+
+	DEBUG2(qla_printk(KERN_WARNING, ha,
+	    "HW event -- code=%x, d1=%x, d2=%x, d3=%x.\n", code, d1, d2, d3));
+
+	/* If marker not already found, locate or write.  */
+	if (!ha->flags.hw_event_marker_found) {
+		/* Create marker. */
+		marker[0] = QMARK('L', ha->fw_major_version,
+		    ha->fw_minor_version, ha->fw_subminor_version);
+		marker[1] = QMARK(QLA_DRIVER_MAJOR_VER, QLA_DRIVER_MINOR_VER,
+		    QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
+
+		/* Locate marker. */
+		ha->hw_event_ptr = ha->hw_event_start;
+		for (;;) {
+			qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
+			    4);
+			if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
+			    fdata[1] == __constant_cpu_to_le32(0xffffffff))
+				break;
+			ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
+			if (ha->hw_event_ptr >=
+			    ha->hw_event_start + FA_HW_EVENT_SIZE) {
+				DEBUG2(qla_printk(KERN_WARNING, ha,
+				    "HW event -- Log Full!\n"));
+				return QLA_MEMORY_ALLOC_FAILED;
+			}
+			if (fdata[2] == marker[0] && fdata[3] == marker[1]) {
+				ha->flags.hw_event_marker_found = 1;
+				break;
+			}
+		}
+		/* No marker, write it. */
+		if (!ha->flags.hw_event_marker_found) {
+			rval = qla2xxx_hw_event_store(ha, marker);
+			if (rval != QLA_SUCCESS) {
+				DEBUG2(qla_printk(KERN_WARNING, ha,
+				    "HW event -- Failed marker write=%x.!\n",
+				    rval));
+				return rval;
+			}
+			ha->flags.hw_event_marker_found = 1;
+		}
+	}
+
+	/* Store error.  */
+	fdata[0] = cpu_to_le32(code << 16 | d1);
+	fdata[1] = cpu_to_le32(d2 << 16 | d3);
+	rval = qla2xxx_hw_event_store(ha, fdata);
+	if (rval != QLA_SUCCESS) {
+		DEBUG2(qla_printk(KERN_WARNING, ha,
+		    "HW event -- Failed error write=%x.!\n",
+		    rval));
+	}
+
+	return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ea08a12..f42f17a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,15 +1,15 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2005 QLogic Corporation
+ * Copyright (c)  2003-2008 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.02.00-k9"
+#define QLA2XXX_VERSION      "8.02.01-k1"
 
 #define QLA_DRIVER_MAJOR_VER	8
 #define QLA_DRIVER_MINOR_VER	2
-#define QLA_DRIVER_PATCH_VER	0
+#define QLA_DRIVER_PATCH_VER	1
 #define QLA_DRIVER_BETA_VER	0
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index fe415ec..1b667a7 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -216,6 +216,7 @@
 #define MBOX_CMD_ABOUT_FW			0x0009
 #define MBOX_CMD_PING				0x000B
 #define MBOX_CMD_LUN_RESET			0x0016
+#define MBOX_CMD_TARGET_WARM_RESET		0x0017
 #define MBOX_CMD_GET_MANAGEMENT_DATA		0x001E
 #define MBOX_CMD_GET_FW_STATUS			0x001F
 #define MBOX_CMD_SET_ISNS_SERVICE		0x0021
@@ -677,7 +678,8 @@
 	uint32_t system_defined; /* 04-07 */
 	uint16_t target;	/* 08-09 */
 	uint16_t modifier;	/* 0A-0B */
-#define MM_LUN_RESET	     0
+#define MM_LUN_RESET		0
+#define MM_TGT_WARM_RESET	1
 
 	uint16_t flags;		/* 0C-0D */
 	uint16_t reserved1;	/* 0E-0F */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index a3608e0..96ebfb0 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -27,6 +27,8 @@
 			   struct ddb_entry * ddb_entry);
 int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
 		      int lun);
+int qla4xxx_reset_target(struct scsi_qla_host * ha,
+			 struct ddb_entry * ddb_entry);
 int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
 		      uint32_t offset, uint32_t len);
 int qla4xxx_get_firmware_status(struct scsi_qla_host * ha);
@@ -68,6 +70,8 @@
 int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
 				uint32_t fw_ddb_index, uint32_t state);
 void qla4xxx_dump_buffer(void *b, uint32_t size);
+int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
+	struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index e4461b5..912a674 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -66,8 +66,8 @@
  *
  * This routine issues a marker IOCB.
  **/
-static int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
-				    struct ddb_entry *ddb_entry, int lun)
+int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
+	struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod)
 {
 	struct qla4_marker_entry *marker_entry;
 	unsigned long flags = 0;
@@ -87,7 +87,7 @@
 	marker_entry->hdr.entryType = ET_MARKER;
 	marker_entry->hdr.entryCount = 1;
 	marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
-	marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
+	marker_entry->modifier = cpu_to_le16(mrkr_mod);
 	int_to_scsilun(lun, &marker_entry->lun);
 	wmb();
 
@@ -210,14 +210,6 @@
 	/* Get real lun and adapter */
 	ddb_entry = srb->ddb;
 
-	/* Send marker(s) if needed. */
-	if (ha->marker_needed == 1) {
-		if (qla4xxx_send_marker_iocb(ha, ddb_entry,
-					     cmd->device->lun) != QLA_SUCCESS)
-			return QLA_ERROR;
-
-		ha->marker_needed = 0;
-	}
 	tot_dsds = 0;
 
 	/* Acquire hardware specific lock */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index fc84db4..a91a57c 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -11,28 +11,6 @@
 #include "ql4_inline.h"
 
 /**
- * qla2x00_process_completed_request() - Process a Fast Post response.
- * @ha: SCSI driver HA context
- * @index: SRB index
- **/
-static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
-					      uint32_t index)
-{
-	struct srb *srb;
-
-	srb = qla4xxx_del_from_active_array(ha, index);
-	if (srb) {
-		/* Save ISP completion status */
-		srb->cmd->result = DID_OK << 16;
-		qla4xxx_srb_compl(ha, srb);
-	} else {
-		DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
-			      "%d\n", ha->host_no, index));
-		set_bit(DPC_RESET_HA, &ha->dpc_flags);
-	}
-}
-
-/**
  * qla4xxx_status_entry - processes status IOCBs
  * @ha: Pointer to host adapter structure.
  * @sts_entry: Pointer to status entry structure.
@@ -47,14 +25,6 @@
 	uint32_t residual;
 	uint16_t sensebytecnt;
 
-	if (sts_entry->completionStatus == SCS_COMPLETE &&
-	    sts_entry->scsiStatus == 0) {
-		qla4xxx_process_completed_request(ha,
-						  le32_to_cpu(sts_entry->
-							      handle));
-		return;
-	}
-
 	srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
 	if (!srb) {
 		/* FIXMEdg: Don't we need to reset ISP in this case??? */
@@ -62,6 +32,9 @@
 			      "handle 0x%x, sp=%p. This cmd may have already "
 			      "been completed.\n", ha->host_no, __func__,
 			      le32_to_cpu(sts_entry->handle), srb));
+		dev_warn(&ha->pdev->dev, "%s invalid status entry:"
+			" handle=0x%0x\n", __func__, sts_entry->handle);
+		set_bit(DPC_RESET_HA, &ha->dpc_flags);
 		return;
 	}
 
@@ -88,10 +61,6 @@
 	scsi_status = sts_entry->scsiStatus;
 	switch (sts_entry->completionStatus) {
 	case SCS_COMPLETE:
-		if (scsi_status == 0) {
-			cmd->result = DID_OK << 16;
-			break;
-		}
 
 		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
 			cmd->result = DID_ERROR << 16;
@@ -100,7 +69,8 @@
 
 		if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
 			scsi_set_resid(cmd, residual);
-			if ((scsi_bufflen(cmd) - residual) < cmd->underflow) {
+			if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
+				cmd->underflow)) {
 
 				cmd->result = DID_ERROR << 16;
 
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 35cd73c..c577d79 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -713,6 +713,45 @@
 	return status;
 }
 
+/**
+ * qla4xxx_reset_target - issues target Reset
+ * @ha: Pointer to host adapter structure.
+ * @db_entry: Pointer to device database entry
+ * @un_entry: Pointer to lun entry structure
+ *
+ * This routine performs a TARGET RESET on the specified target.
+ * The caller must ensure that the ddb_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_reset_target(struct scsi_qla_host *ha,
+			 struct ddb_entry *ddb_entry)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_SUCCESS;
+
+	DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no,
+		      ddb_entry->os_target_id));
+
+	/*
+	 * Send target reset command to ISP, so that the ISP will return all
+	 * outstanding requests with RESET status
+	 */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET;
+	mbox_cmd[1] = ddb_entry->fw_ddb_index;
+	mbox_cmd[5] = 0x01;	/* Immediate Command Enable */
+
+	qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+				&mbox_sts[0]);
+	if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
+	    mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
+		status = QLA_ERROR;
+
+	return status;
+}
 
 int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
 		      uint32_t offset, uint32_t len)
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 8b92f34..0c78694 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -71,6 +71,7 @@
 static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
 				void (*done) (struct scsi_cmnd *));
 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
+static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
 static int qla4xxx_slave_alloc(struct scsi_device *device);
 static int qla4xxx_slave_configure(struct scsi_device *device);
@@ -84,6 +85,7 @@
 	.queuecommand		= qla4xxx_queuecommand,
 
 	.eh_device_reset_handler = qla4xxx_eh_device_reset,
+	.eh_target_reset_handler = qla4xxx_eh_target_reset,
 	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
 
 	.slave_configure	= qla4xxx_slave_configure,
@@ -1482,7 +1484,7 @@
 }
 
 /**
- * qla4xxx_eh_wait_for_active_target_commands - wait for active cmds to finish.
+ * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
  * @ha: pointer to to HBA
  * @t: target id
  * @l: lun id
@@ -1490,20 +1492,22 @@
  * This function waits for all outstanding commands to a lun to complete. It
  * returns 0 if all pending commands are returned and 1 otherwise.
  **/
-static int qla4xxx_eh_wait_for_active_target_commands(struct scsi_qla_host *ha,
-						 int t, int l)
+static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
+					struct scsi_target *stgt,
+					struct scsi_device *sdev)
 {
 	int cnt;
 	int status = 0;
 	struct scsi_cmnd *cmd;
 
 	/*
-	 * Waiting for all commands for the designated target in the active
-	 * array
+	 * Waiting for all commands for the designated target or dev
+	 * in the active array
 	 */
 	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
 		cmd = scsi_host_find_tag(ha->host, cnt);
-		if (cmd && cmd->device->id == t && cmd->device->lun == l) {
+		if (cmd && stgt == scsi_target(cmd->device) &&
+		    (!sdev || sdev == cmd->device)) {
 			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
 				status++;
 				break;
@@ -1548,24 +1552,19 @@
 		goto eh_dev_reset_done;
 	}
 
-	/* Send marker. */
-	ha->marker_needed = 1;
-
-	/*
-	 * If we are coming down the EH path, wait for all commands to complete
-	 * for the device.
-	 */
-	if (cmd->device->host->shost_state == SHOST_RECOVERY) {
-		if (qla4xxx_eh_wait_for_active_target_commands(ha,
-							  cmd->device->id,
-							  cmd->device->lun)){
-			dev_info(&ha->pdev->dev,
-				   "DEVICE RESET FAILED - waiting for "
-				   "commands.\n");
-			goto eh_dev_reset_done;
-		}
+	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
+					 cmd->device)) {
+		dev_info(&ha->pdev->dev,
+			   "DEVICE RESET FAILED - waiting for "
+			   "commands.\n");
+		goto eh_dev_reset_done;
 	}
 
+	/* Send marker. */
+	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
+		MM_LUN_RESET) != QLA_SUCCESS)
+		goto eh_dev_reset_done;
+
 	dev_info(&ha->pdev->dev,
 		   "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
 		   ha->host_no, cmd->device->channel, cmd->device->id,
@@ -1579,6 +1578,59 @@
 }
 
 /**
+ * qla4xxx_eh_target_reset - callback for target reset.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to reset the target.
+ **/
+static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+	struct ddb_entry *ddb_entry = cmd->device->hostdata;
+	int stat;
+
+	if (!ddb_entry)
+		return FAILED;
+
+	starget_printk(KERN_INFO, scsi_target(cmd->device),
+		       "WARM TARGET RESET ISSUED.\n");
+
+	DEBUG2(printk(KERN_INFO
+		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
+		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
+		      ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ,
+		      ha->dpc_flags, cmd->result, cmd->allowed));
+
+	stat = qla4xxx_reset_target(ha, ddb_entry);
+	if (stat != QLA_SUCCESS) {
+		starget_printk(KERN_INFO, scsi_target(cmd->device),
+			       "WARM TARGET RESET FAILED.\n");
+		return FAILED;
+	}
+
+	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
+					 NULL)) {
+		starget_printk(KERN_INFO, scsi_target(cmd->device),
+			       "WARM TARGET DEVICE RESET FAILED - "
+			       "waiting for commands.\n");
+		return FAILED;
+	}
+
+	/* Send marker. */
+	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
+		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
+		starget_printk(KERN_INFO, scsi_target(cmd->device),
+			       "WARM TARGET DEVICE RESET FAILED - "
+			       "marker iocb failed.\n");
+		return FAILED;
+	}
+
+	starget_printk(KERN_INFO, scsi_target(cmd->device),
+		       "WARM TARGET RESET SUCCEEDED.\n");
+	return SUCCESS;
+}
+
+/**
  * qla4xxx_eh_host_reset - kernel callback
  * @cmd: Pointer to Linux's SCSI command structure
  *
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 86e1318..52182a7 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -289,7 +289,7 @@
 {
 	struct raid_internal *i = to_raid_internal(r);
 
-	attribute_container_unregister(&i->r.raid_attrs.ac);
+	BUG_ON(attribute_container_unregister(&i->r.raid_attrs.ac));
 
 	kfree(i);
 }
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c78b836..f6980bd 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -166,6 +166,51 @@
 static DEFINE_MUTEX(host_cmd_pool_mutex);
 
 /**
+ * scsi_pool_alloc_command - internal function to get a fully allocated command
+ * @pool:	slab pool to allocate the command from
+ * @gfp_mask:	mask for the allocation
+ *
+ * Returns a fully allocated command (with the allied sense buffer) or
+ * NULL on failure
+ */
+static struct scsi_cmnd *
+scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
+{
+	struct scsi_cmnd *cmd;
+
+	cmd = kmem_cache_alloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
+	if (!cmd)
+		return NULL;
+
+	memset(cmd, 0, sizeof(*cmd));
+
+	cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
+					     gfp_mask | pool->gfp_mask);
+	if (!cmd->sense_buffer) {
+		kmem_cache_free(pool->cmd_slab, cmd);
+		return NULL;
+	}
+
+	return cmd;
+}
+
+/**
+ * scsi_pool_free_command - internal function to release a command
+ * @pool:	slab pool to allocate the command from
+ * @cmd:	command to release
+ *
+ * the command must previously have been allocated by
+ * scsi_pool_alloc_command.
+ */
+static void
+scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
+			 struct scsi_cmnd *cmd)
+{
+	kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
+	kmem_cache_free(pool->cmd_slab, cmd);
+}
+
+/**
  * __scsi_get_command - Allocate a struct scsi_cmnd
  * @shost: host to transmit command
  * @gfp_mask: allocation mask
@@ -178,20 +223,7 @@
 	struct scsi_cmnd *cmd;
 	unsigned char *buf;
 
-	cmd = kmem_cache_alloc(shost->cmd_pool->cmd_slab,
-			       gfp_mask | shost->cmd_pool->gfp_mask);
-
-	if (likely(cmd)) {
-		buf = kmem_cache_alloc(shost->cmd_pool->sense_slab,
-				       gfp_mask | shost->cmd_pool->gfp_mask);
-		if (likely(buf)) {
-			memset(cmd, 0, sizeof(*cmd));
-			cmd->sense_buffer = buf;
-		} else {
-			kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
-			cmd = NULL;
-		}
-	}
+	cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
 
 	if (unlikely(!cmd)) {
 		unsigned long flags;
@@ -268,11 +300,8 @@
 	}
 	spin_unlock_irqrestore(&shost->free_list_lock, flags);
 
-	if (likely(cmd != NULL)) {
-		kmem_cache_free(shost->cmd_pool->sense_slab,
-				cmd->sense_buffer);
-		kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
-	}
+	if (likely(cmd != NULL))
+		scsi_pool_free_command(shost->cmd_pool, cmd);
 
 	put_device(dev);
 }
@@ -301,30 +330,16 @@
 }
 EXPORT_SYMBOL(scsi_put_command);
 
-/**
- * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
- * @shost: host to allocate the freelist for.
- *
- * Description: The command freelist protects against system-wide out of memory
- * deadlock by preallocating one SCSI command structure for each host, so the
- * system can always write to a swap file on a device associated with that host.
- *
- * Returns:	Nothing.
- */
-int scsi_setup_command_freelist(struct Scsi_Host *shost)
+static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask)
 {
-	struct scsi_host_cmd_pool *pool;
-	struct scsi_cmnd *cmd;
-
-	spin_lock_init(&shost->free_list_lock);
-	INIT_LIST_HEAD(&shost->free_list);
-
+	struct scsi_host_cmd_pool *retval = NULL, *pool;
 	/*
 	 * Select a command slab for this host and create it if not
 	 * yet existent.
 	 */
 	mutex_lock(&host_cmd_pool_mutex);
-	pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
+	pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
+		&scsi_cmd_pool;
 	if (!pool->users) {
 		pool->cmd_slab = kmem_cache_create(pool->cmd_name,
 						   sizeof(struct scsi_cmnd), 0,
@@ -342,37 +357,122 @@
 	}
 
 	pool->users++;
-	shost->cmd_pool = pool;
+	retval = pool;
+ fail:
 	mutex_unlock(&host_cmd_pool_mutex);
+	return retval;
+}
 
-	/*
-	 * Get one backup command for this host.
-	 */
-	cmd = kmem_cache_alloc(shost->cmd_pool->cmd_slab,
-			       GFP_KERNEL | shost->cmd_pool->gfp_mask);
-	if (!cmd)
-		goto fail2;
+static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
+{
+	struct scsi_host_cmd_pool *pool;
 
-	cmd->sense_buffer = kmem_cache_alloc(shost->cmd_pool->sense_slab,
-					     GFP_KERNEL |
-					     shost->cmd_pool->gfp_mask);
-	if (!cmd->sense_buffer)
-		goto fail2;
-
-	list_add(&cmd->list, &shost->free_list);
-	return 0;
-
- fail2:
-	if (cmd)
-		kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
 	mutex_lock(&host_cmd_pool_mutex);
+	pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
+		&scsi_cmd_pool;
+	/*
+	 * This may happen if a driver has a mismatched get and put
+	 * of the command pool; the driver should be implicated in
+	 * the stack trace
+	 */
+	BUG_ON(pool->users == 0);
+
 	if (!--pool->users) {
 		kmem_cache_destroy(pool->cmd_slab);
 		kmem_cache_destroy(pool->sense_slab);
 	}
- fail:
 	mutex_unlock(&host_cmd_pool_mutex);
-	return -ENOMEM;
+}
+
+/**
+ * scsi_allocate_command - get a fully allocated SCSI command
+ * @gfp_mask:	allocation mask
+ *
+ * This function is for use outside of the normal host based pools.
+ * It allocates the relevant command and takes an additional reference
+ * on the pool it used.  This function *must* be paired with
+ * scsi_free_command which also has the identical mask, otherwise the
+ * free pool counts will eventually go wrong and you'll trigger a bug.
+ *
+ * This function should *only* be used by drivers that need a static
+ * command allocation at start of day for internal functions.
+ */
+struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
+{
+	struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
+
+	if (!pool)
+		return NULL;
+
+	return scsi_pool_alloc_command(pool, gfp_mask);
+}
+EXPORT_SYMBOL(scsi_allocate_command);
+
+/**
+ * scsi_free_command - free a command allocated by scsi_allocate_command
+ * @gfp_mask:	mask used in the original allocation
+ * @cmd:	command to free
+ *
+ * Note: using the original allocation mask is vital because that's
+ * what determines which command pool we use to free the command.  Any
+ * mismatch will cause the system to BUG eventually.
+ */
+void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
+{
+	struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
+
+	/*
+	 * this could trigger if the mask to scsi_allocate_command
+	 * doesn't match this mask.  Otherwise we're guaranteed that this
+	 * succeeds because scsi_allocate_command must have taken a reference
+	 * on the pool
+	 */
+	BUG_ON(!pool);
+
+	scsi_pool_free_command(pool, cmd);
+	/*
+	 * scsi_put_host_cmd_pool is called twice; once to release the
+	 * reference we took above, and once to release the reference
+	 * originally taken by scsi_allocate_command
+	 */
+	scsi_put_host_cmd_pool(gfp_mask);
+	scsi_put_host_cmd_pool(gfp_mask);
+}
+EXPORT_SYMBOL(scsi_free_command);
+
+/**
+ * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
+ * @shost: host to allocate the freelist for.
+ *
+ * Description: The command freelist protects against system-wide out of memory
+ * deadlock by preallocating one SCSI command structure for each host, so the
+ * system can always write to a swap file on a device associated with that host.
+ *
+ * Returns:	Nothing.
+ */
+int scsi_setup_command_freelist(struct Scsi_Host *shost)
+{
+	struct scsi_cmnd *cmd;
+	const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
+
+	spin_lock_init(&shost->free_list_lock);
+	INIT_LIST_HEAD(&shost->free_list);
+
+	shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask);
+
+	if (!shost->cmd_pool)
+		return -ENOMEM;
+
+	/*
+	 * Get one backup command for this host.
+	 */
+	cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+	if (!cmd) {
+		scsi_put_host_cmd_pool(gfp_mask);
+		return -ENOMEM;
+	}
+	list_add(&cmd->list, &shost->free_list);
+	return 0;
 }
 
 /**
@@ -386,17 +486,10 @@
 
 		cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
 		list_del_init(&cmd->list);
-		kmem_cache_free(shost->cmd_pool->sense_slab,
-				cmd->sense_buffer);
-		kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
+		scsi_pool_free_command(shost->cmd_pool, cmd);
 	}
-
-	mutex_lock(&host_cmd_pool_mutex);
-	if (!--shost->cmd_pool->users) {
-		kmem_cache_destroy(shost->cmd_pool->cmd_slab);
-		kmem_cache_destroy(shost->cmd_pool->sense_slab);
-	}
-	mutex_unlock(&host_cmd_pool_mutex);
+	shost->cmd_pool = NULL;
+	scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL);
 }
 
 #ifdef CONFIG_SCSI_LOGGING
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d1777a9..07103c3 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -39,16 +39,18 @@
 #include <linux/vmalloc.h>
 #include <linux/moduleparam.h>
 #include <linux/scatterlist.h>
-
 #include <linux/blkdev.h>
-#include "scsi.h"
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsicam.h>
+#include <scsi/scsi_eh.h>
 
 #include <linux/stat.h>
 
 #include "scsi_logging.h"
-#include "scsi_debug.h"
 
 #define SCSI_DEBUG_VERSION "1.81"
 static const char * scsi_debug_version_date = "20070104";
@@ -146,7 +148,6 @@
 #define DEV_READONLY(TGT)      (0)
 #define DEV_REMOVEABLE(TGT)    (0)
 
-static unsigned int sdebug_store_size;	/* in bytes */
 static unsigned int sdebug_store_sectors;
 static sector_t sdebug_capacity;	/* in sectors */
 
@@ -165,6 +166,9 @@
 
 #define SDEBUG_SENSE_LEN 32
 
+#define SCSI_DEBUG_CANQUEUE  255
+#define SCSI_DEBUG_MAX_CMD_LEN 16
+
 struct sdebug_dev_info {
 	struct list_head dev_list;
 	unsigned char sense_buff[SDEBUG_SENSE_LEN];	/* weak nexus */
@@ -202,30 +206,6 @@
 };
 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
 
-static struct scsi_host_template sdebug_driver_template = {
-	.proc_info =		scsi_debug_proc_info,
-	.name =			"SCSI DEBUG",
-	.info =			scsi_debug_info,
-	.slave_alloc =		scsi_debug_slave_alloc,
-	.slave_configure =	scsi_debug_slave_configure,
-	.slave_destroy =	scsi_debug_slave_destroy,
-	.ioctl =		scsi_debug_ioctl,
-	.queuecommand =		scsi_debug_queuecommand,
-	.eh_abort_handler =	scsi_debug_abort,
-	.eh_bus_reset_handler = scsi_debug_bus_reset,
-	.eh_device_reset_handler = scsi_debug_device_reset,
-	.eh_host_reset_handler = scsi_debug_host_reset,
-	.bios_param =		scsi_debug_biosparam,
-	.can_queue =		SCSI_DEBUG_CANQUEUE,
-	.this_id =		7,
-	.sg_tablesize =		256,
-	.cmd_per_lun =		16,
-	.max_sectors =		0xffff,
-	.unchecked_isa_dma = 	0,
-	.use_clustering = 	DISABLE_CLUSTERING,
-	.module =		THIS_MODULE,
-};
-
 static unsigned char * fake_storep;	/* ramdisk storage */
 
 static int num_aborts = 0;
@@ -238,8 +218,6 @@
 
 static char sdebug_proc_name[] = "scsi_debug";
 
-static int sdebug_driver_probe(struct device *);
-static int sdebug_driver_remove(struct device *);
 static struct bus_type pseudo_lld_bus;
 
 static struct device_driver sdebug_driverfs_driver = {
@@ -255,94 +233,77 @@
 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
 			           0, 0, 0x0, 0x0};
 
-/* function declarations */
-static int resp_inquiry(struct scsi_cmnd * SCpnt, int target,
-			struct sdebug_dev_info * devip);
-static int resp_requests(struct scsi_cmnd * SCpnt,
-			 struct sdebug_dev_info * devip);
-static int resp_start_stop(struct scsi_cmnd * scp,
-			   struct sdebug_dev_info * devip);
-static int resp_report_tgtpgs(struct scsi_cmnd * scp,
-			      struct sdebug_dev_info * devip);
-static int resp_readcap(struct scsi_cmnd * SCpnt,
-			struct sdebug_dev_info * devip);
-static int resp_readcap16(struct scsi_cmnd * SCpnt,
-			  struct sdebug_dev_info * devip);
-static int resp_mode_sense(struct scsi_cmnd * scp, int target,
-			   struct sdebug_dev_info * devip);
-static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
-			    struct sdebug_dev_info * devip);
-static int resp_log_sense(struct scsi_cmnd * scp,
-			  struct sdebug_dev_info * devip);
-static int resp_read(struct scsi_cmnd * SCpnt, unsigned long long lba,
-		     unsigned int num, struct sdebug_dev_info * devip);
-static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba,
-		      unsigned int num, struct sdebug_dev_info * devip);
-static int resp_report_luns(struct scsi_cmnd * SCpnt,
-			    struct sdebug_dev_info * devip);
-static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
-			    unsigned int num, struct sdebug_dev_info *devip);
-static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
-                                int arr_len);
-static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
-                               int max_arr_len);
-static void timer_intr_handler(unsigned long);
-static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev);
-static void mk_sense_buffer(struct sdebug_dev_info * devip, int key,
-			    int asc, int asq);
-static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
-			   struct sdebug_dev_info * devip);
-static int schedule_resp(struct scsi_cmnd * cmnd,
-			 struct sdebug_dev_info * devip,
-			 done_funct_t done, int scsi_result, int delta_jiff);
-static void __init sdebug_build_parts(unsigned char * ramp);
-static void __init init_all_queued(void);
-static void stop_all_queued(void);
-static int stop_queued_cmnd(struct scsi_cmnd * cmnd);
-static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
-			   int target_dev_id, int dev_id_num,
-			   const char * dev_id_str, int dev_id_str_len);
-static int inquiry_evpd_88(unsigned char * arr, int target_dev_id);
-static int do_create_driverfs_files(void);
-static void do_remove_driverfs_files(void);
-
 static int sdebug_add_adapter(void);
 static void sdebug_remove_adapter(void);
-static void sdebug_max_tgts_luns(void);
 
-static struct device pseudo_primary;
-static struct bus_type pseudo_lld_bus;
+static void sdebug_max_tgts_luns(void)
+{
+	struct sdebug_host_info *sdbg_host;
+	struct Scsi_Host *hpnt;
+
+	spin_lock(&sdebug_host_list_lock);
+	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+		hpnt = sdbg_host->shost;
+		if ((hpnt->this_id >= 0) &&
+		    (scsi_debug_num_tgts > hpnt->this_id))
+			hpnt->max_id = scsi_debug_num_tgts + 1;
+		else
+			hpnt->max_id = scsi_debug_num_tgts;
+		/* scsi_debug_max_luns; */
+		hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
+	}
+	spin_unlock(&sdebug_host_list_lock);
+}
+
+static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
+			    int asc, int asq)
+{
+	unsigned char *sbuff;
+
+	sbuff = devip->sense_buff;
+	memset(sbuff, 0, SDEBUG_SENSE_LEN);
+
+	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
+
+	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+		printk(KERN_INFO "scsi_debug:    [sense_key,asc,ascq]: "
+		      "[0x%x,0x%x,0x%x]\n", key, asc, asq);
+}
 
 static void get_data_transfer_info(unsigned char *cmd,
 				   unsigned long long *lba, unsigned int *num)
 {
-	int i;
-
 	switch (*cmd) {
 	case WRITE_16:
 	case READ_16:
-		for (*lba = 0, i = 0; i < 8; ++i) {
-			if (i > 0)
-				*lba <<= 8;
-			*lba += cmd[2 + i];
-		}
-		*num = cmd[13] + (cmd[12] << 8) +
-			(cmd[11] << 16) + (cmd[10] << 24);
+		*lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
+			(u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
+			(u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
+			(u64)cmd[3] << 48 | (u64)cmd[2] << 56;
+
+		*num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
+			(u32)cmd[10] << 24;
 		break;
 	case WRITE_12:
 	case READ_12:
-		*lba = cmd[5] + (cmd[4] << 8) +	(cmd[3] << 16) + (cmd[2] << 24);
-		*num = cmd[9] + (cmd[8] << 8) +	(cmd[7] << 16) + (cmd[6] << 24);
+		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
+			(u32)cmd[2] << 24;
+
+		*num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
+			(u32)cmd[6] << 24;
 		break;
 	case WRITE_10:
 	case READ_10:
 	case XDWRITEREAD_10:
-		*lba = cmd[5] + (cmd[4] << 8) +	(cmd[3] << 16) + (cmd[2] << 24);
-		*num = cmd[8] + (cmd[7] << 8);
+		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 |	(u32)cmd[3] << 16 |
+			(u32)cmd[2] << 24;
+
+		*num = (u32)cmd[8] | (u32)cmd[7] << 8;
 		break;
 	case WRITE_6:
 	case READ_6:
-		*lba = cmd[3] + (cmd[2] << 8) + ((cmd[1] & 0x1f) << 16);
+		*lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
+			(u32)(cmd[1] & 0x1f) << 16;
 		*num = (0 == cmd[4]) ? 256 : cmd[4];
 		break;
 	default:
@@ -350,237 +311,6 @@
 	}
 }
 
-static
-int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
-{
-	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
-	int len, k;
-	unsigned int num;
-	unsigned long long lba;
-	int errsts = 0;
-	int target = SCpnt->device->id;
-	struct sdebug_dev_info * devip = NULL;
-	int inj_recovered = 0;
-	int inj_transport = 0;
-	int delay_override = 0;
-
-	if (done == NULL)
-		return 0;	/* assume mid level reprocessing command */
-
-	scsi_set_resid(SCpnt, 0);
-	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
-		printk(KERN_INFO "scsi_debug: cmd ");
-		for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
-			printk("%02x ", (int)cmd[k]);
-		printk("\n");
-	}
-        if(target == sdebug_driver_template.this_id) {
-		printk(KERN_INFO "scsi_debug: initiator's id used as "
-		       "target!\n");
-		return schedule_resp(SCpnt, NULL, done,
-				     DID_NO_CONNECT << 16, 0);
-        }
-
-	if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
-	    (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
-		return schedule_resp(SCpnt, NULL, done,
-				     DID_NO_CONNECT << 16, 0);
-	devip = devInfoReg(SCpnt->device);
-	if (NULL == devip)
-		return schedule_resp(SCpnt, NULL, done,
-				     DID_NO_CONNECT << 16, 0);
-
-        if ((scsi_debug_every_nth != 0) &&
-            (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
-                scsi_debug_cmnd_count = 0;
-		if (scsi_debug_every_nth < -1)
-			scsi_debug_every_nth = -1;
-		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
-			return 0; /* ignore command causing timeout */
-		else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
-			inj_recovered = 1; /* to reads and writes below */
-		else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
-			inj_transport = 1; /* to reads and writes below */
-        }
-
-	if (devip->wlun) {
-		switch (*cmd) {
-		case INQUIRY:
-		case REQUEST_SENSE:
-		case TEST_UNIT_READY:
-		case REPORT_LUNS:
-			break;  /* only allowable wlun commands */
-		default:
-			if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
-				printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
-				       "not supported for wlun\n", *cmd);
-			mk_sense_buffer(devip, ILLEGAL_REQUEST,
-					INVALID_OPCODE, 0);
-			errsts = check_condition_result;
-			return schedule_resp(SCpnt, devip, done, errsts,
-					     0);
-		}
-	}
-
-	switch (*cmd) {
-	case INQUIRY:     /* mandatory, ignore unit attention */
-		delay_override = 1;
-		errsts = resp_inquiry(SCpnt, target, devip);
-		break;
-	case REQUEST_SENSE:	/* mandatory, ignore unit attention */
-		delay_override = 1;
-		errsts = resp_requests(SCpnt, devip);
-		break;
-	case REZERO_UNIT:	/* actually this is REWIND for SSC */
-	case START_STOP:
-		errsts = resp_start_stop(SCpnt, devip);
-		break;
-	case ALLOW_MEDIUM_REMOVAL:
-		if ((errsts = check_readiness(SCpnt, 1, devip)))
-			break;
-		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
-			printk(KERN_INFO "scsi_debug: Medium removal %s\n",
-			        cmd[4] ? "inhibited" : "enabled");
-		break;
-	case SEND_DIAGNOSTIC:     /* mandatory */
-		errsts = check_readiness(SCpnt, 1, devip);
-		break;
-	case TEST_UNIT_READY:     /* mandatory */
-		delay_override = 1;
-		errsts = check_readiness(SCpnt, 0, devip);
-		break;
-        case RESERVE:
-		errsts = check_readiness(SCpnt, 1, devip);
-                break;
-        case RESERVE_10:
-		errsts = check_readiness(SCpnt, 1, devip);
-                break;
-        case RELEASE:
-		errsts = check_readiness(SCpnt, 1, devip);
-                break;
-        case RELEASE_10:
-		errsts = check_readiness(SCpnt, 1, devip);
-                break;
-	case READ_CAPACITY:
-		errsts = resp_readcap(SCpnt, devip);
-		break;
-	case SERVICE_ACTION_IN:
-		if (SAI_READ_CAPACITY_16 != cmd[1]) {
-			mk_sense_buffer(devip, ILLEGAL_REQUEST,
-					INVALID_OPCODE, 0);
-			errsts = check_condition_result;
-			break;
-		}
-		errsts = resp_readcap16(SCpnt, devip);
-		break;
-	case MAINTENANCE_IN:
-		if (MI_REPORT_TARGET_PGS != cmd[1]) {
-			mk_sense_buffer(devip, ILLEGAL_REQUEST,
-					INVALID_OPCODE, 0);
-			errsts = check_condition_result;
-			break;
-		}
-		errsts = resp_report_tgtpgs(SCpnt, devip);
-		break;
-	case READ_16:
-	case READ_12:
-	case READ_10:
-	case READ_6:
-		if ((errsts = check_readiness(SCpnt, 0, devip)))
-			break;
-		if (scsi_debug_fake_rw)
-			break;
-		get_data_transfer_info(cmd, &lba, &num);
-		errsts = resp_read(SCpnt, lba, num, devip);
-		if (inj_recovered && (0 == errsts)) {
-			mk_sense_buffer(devip, RECOVERED_ERROR,
-					THRESHOLD_EXCEEDED, 0);
-			errsts = check_condition_result;
-		} else if (inj_transport && (0 == errsts)) {
-                        mk_sense_buffer(devip, ABORTED_COMMAND,
-                                        TRANSPORT_PROBLEM, ACK_NAK_TO);
-                        errsts = check_condition_result;
-                }
-		break;
-	case REPORT_LUNS:	/* mandatory, ignore unit attention */
-		delay_override = 1;
-		errsts = resp_report_luns(SCpnt, devip);
-		break;
-	case VERIFY:		/* 10 byte SBC-2 command */
-		errsts = check_readiness(SCpnt, 0, devip);
-		break;
-	case WRITE_16:
-	case WRITE_12:
-	case WRITE_10:
-	case WRITE_6:
-		if ((errsts = check_readiness(SCpnt, 0, devip)))
-			break;
-		if (scsi_debug_fake_rw)
-			break;
-		get_data_transfer_info(cmd, &lba, &num);
-		errsts = resp_write(SCpnt, lba, num, devip);
-		if (inj_recovered && (0 == errsts)) {
-			mk_sense_buffer(devip, RECOVERED_ERROR,
-					THRESHOLD_EXCEEDED, 0);
-			errsts = check_condition_result;
-		}
-		break;
-	case MODE_SENSE:
-	case MODE_SENSE_10:
-		errsts = resp_mode_sense(SCpnt, target, devip);
-		break;
-	case MODE_SELECT:
-		errsts = resp_mode_select(SCpnt, 1, devip);
-		break;
-	case MODE_SELECT_10:
-		errsts = resp_mode_select(SCpnt, 0, devip);
-		break;
-	case LOG_SENSE:
-		errsts = resp_log_sense(SCpnt, devip);
-		break;
-	case SYNCHRONIZE_CACHE:
-		delay_override = 1;
-		errsts = check_readiness(SCpnt, 0, devip);
-		break;
-	case WRITE_BUFFER:
-		errsts = check_readiness(SCpnt, 1, devip);
-		break;
-	case XDWRITEREAD_10:
-		if (!scsi_bidi_cmnd(SCpnt)) {
-			mk_sense_buffer(devip, ILLEGAL_REQUEST,
-					INVALID_FIELD_IN_CDB, 0);
-			errsts = check_condition_result;
-			break;
-		}
-
-		errsts = check_readiness(SCpnt, 0, devip);
-		if (errsts)
-			break;
-		if (scsi_debug_fake_rw)
-			break;
-		get_data_transfer_info(cmd, &lba, &num);
-		errsts = resp_read(SCpnt, lba, num, devip);
-		if (errsts)
-			break;
-		errsts = resp_write(SCpnt, lba, num, devip);
-		if (errsts)
-			break;
-		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
-		break;
-	default:
-		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
-			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
-			       "supported\n", *cmd);
-		if ((errsts = check_readiness(SCpnt, 1, devip)))
-			break;	/* Unit attention takes precedence */
-		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
-		errsts = check_condition_result;
-		break;
-	}
-	return schedule_resp(SCpnt, devip, done, errsts,
-			     (delay_override ? 0 : scsi_debug_delay));
-}
-
 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
 {
 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
@@ -613,81 +343,37 @@
 }
 
 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
-static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
+static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 				int arr_len)
 {
-	int k, req_len, act_len, len, active;
-	void * kaddr;
-	void * kaddr_off;
-	struct scatterlist *sg;
+	int act_len;
 	struct scsi_data_buffer *sdb = scsi_in(scp);
 
 	if (!sdb->length)
 		return 0;
-	if (!sdb->table.sgl)
-		return (DID_ERROR << 16);
 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
 		return (DID_ERROR << 16);
-	active = 1;
-	req_len = act_len = 0;
-	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, k) {
-		if (active) {
-			kaddr = (unsigned char *)
-				kmap_atomic(sg_page(sg), KM_USER0);
-			if (NULL == kaddr)
-				return (DID_ERROR << 16);
-			kaddr_off = (unsigned char *)kaddr + sg->offset;
-			len = sg->length;
-			if ((req_len + len) > arr_len) {
-				active = 0;
-				len = arr_len - req_len;
-			}
-			memcpy(kaddr_off, arr + req_len, len);
-			kunmap_atomic(kaddr, KM_USER0);
-			act_len += len;
-		}
-		req_len += sg->length;
-	}
+
+	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
+				      arr, arr_len);
 	if (sdb->resid)
 		sdb->resid -= act_len;
 	else
-		sdb->resid = req_len - act_len;
+		sdb->resid = scsi_bufflen(scp) - act_len;
+
 	return 0;
 }
 
 /* Returns number of bytes fetched into 'arr' or -1 if error. */
-static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
-			       int max_arr_len)
+static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
+			       int arr_len)
 {
-	int k, req_len, len, fin;
-	void * kaddr;
-	void * kaddr_off;
-	struct scatterlist * sg;
-
-	if (0 == scsi_bufflen(scp))
+	if (!scsi_bufflen(scp))
 		return 0;
-	if (NULL == scsi_sglist(scp))
-		return -1;
 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
 		return -1;
-	req_len = fin = 0;
-	scsi_for_each_sg(scp, sg, scsi_sg_count(scp), k) {
-		kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
-		if (NULL == kaddr)
-			return -1;
-		kaddr_off = (unsigned char *)kaddr + sg->offset;
-		len = sg->length;
-		if ((req_len + len) > max_arr_len) {
-			len = max_arr_len - req_len;
-			fin = 1;
-		}
-		memcpy(arr + req_len, kaddr_off, len);
-		kunmap_atomic(kaddr, KM_USER0);
-		if (fin)
-			return req_len + len;
-		req_len += sg->length;
-	}
-	return req_len;
+
+	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
 }
 
 
@@ -1159,6 +845,14 @@
 	return 0;
 }
 
+static sector_t get_sdebug_capacity(void)
+{
+	if (scsi_debug_virtual_gb > 0)
+		return 2048 * 1024 * scsi_debug_virtual_gb;
+	else
+		return sdebug_store_sectors;
+}
+
 #define SDEBUG_READCAP_ARR_SZ 8
 static int resp_readcap(struct scsi_cmnd * scp,
 			struct sdebug_dev_info * devip)
@@ -1170,11 +864,7 @@
 	if ((errsts = check_readiness(scp, 1, devip)))
 		return errsts;
 	/* following just in case virtual_gb changed */
-	if (scsi_debug_virtual_gb > 0) {
-		sdebug_capacity = 2048 * 1024;
-		sdebug_capacity *= scsi_debug_virtual_gb;
-	} else
-		sdebug_capacity = sdebug_store_sectors;
+	sdebug_capacity = get_sdebug_capacity();
 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
 	if (sdebug_capacity < 0xffffffff) {
 		capac = (unsigned int)sdebug_capacity - 1;
@@ -1207,11 +897,7 @@
 	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
 		     + cmd[13]);
 	/* following just in case virtual_gb changed */
-	if (scsi_debug_virtual_gb > 0) {
-		sdebug_capacity = 2048 * 1024;
-		sdebug_capacity *= scsi_debug_virtual_gb;
-	} else
-		sdebug_capacity = sdebug_store_sectors;
+	sdebug_capacity = get_sdebug_capacity();
 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
 	capac = sdebug_capacity - 1;
 	for (k = 0; k < 8; ++k, capac >>= 8)
@@ -1505,13 +1191,9 @@
 		offset = 8;
 	}
 	ap = arr + offset;
-	if ((bd_len > 0) && (0 == sdebug_capacity)) {
-		if (scsi_debug_virtual_gb > 0) {
-			sdebug_capacity = 2048 * 1024;
-			sdebug_capacity *= scsi_debug_virtual_gb;
-		} else
-			sdebug_capacity = sdebug_store_sectors;
-	}
+	if ((bd_len > 0) && (!sdebug_capacity))
+		sdebug_capacity = get_sdebug_capacity();
+
 	if (8 == bd_len) {
 		if (sdebug_capacity > 0xfffffffe) {
 			ap[0] = 0xff;
@@ -1808,25 +1490,53 @@
 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
 }
 
-static int resp_read(struct scsi_cmnd * SCpnt, unsigned long long lba,
-		     unsigned int num, struct sdebug_dev_info * devip)
+static int check_device_access_params(struct sdebug_dev_info *devi,
+				      unsigned long long lba, unsigned int num)
 {
-	unsigned long iflags;
-	unsigned int block, from_bottom;
-	unsigned long long u;
-	int ret;
-
 	if (lba + num > sdebug_capacity) {
-		mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE,
-				0);
+		mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
 		return check_condition_result;
 	}
 	/* transfer length excessive (tie in to block limits VPD page) */
 	if (num > sdebug_store_sectors) {
-		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
-				0);
+		mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
 		return check_condition_result;
 	}
+	return 0;
+}
+
+static int do_device_access(struct scsi_cmnd *scmd,
+			    struct sdebug_dev_info *devi,
+			    unsigned long long lba, unsigned int num, int write)
+{
+	int ret;
+	unsigned int block, rest = 0;
+	int (*func)(struct scsi_cmnd *, unsigned char *, int);
+
+	func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
+
+	block = do_div(lba, sdebug_store_sectors);
+	if (block + num > sdebug_store_sectors)
+		rest = block + num - sdebug_store_sectors;
+
+	ret = func(scmd, fake_storep + (block * SECT_SIZE),
+		   (num - rest) * SECT_SIZE);
+	if (!ret && rest)
+		ret = func(scmd, fake_storep, rest * SECT_SIZE);
+
+	return ret;
+}
+
+static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
+		     unsigned int num, struct sdebug_dev_info *devip)
+{
+	unsigned long iflags;
+	int ret;
+
+	ret = check_device_access_params(devip, lba, num);
+	if (ret)
+		return ret;
+
 	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
 	    (lba <= OPT_MEDIUM_ERR_ADDR) &&
 	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
@@ -1845,74 +1555,30 @@
 		return check_condition_result;
 	}
 	read_lock_irqsave(&atomic_rw, iflags);
-	if ((lba + num) <= sdebug_store_sectors)
-		ret = fill_from_dev_buffer(SCpnt,
-					   fake_storep + (lba * SECT_SIZE),
-			   		   num * SECT_SIZE);
-	else {
-		/* modulo when one arg is 64 bits needs do_div() */
-		u = lba;
-		block = do_div(u, sdebug_store_sectors);
-		from_bottom = 0;
-		if ((block + num) > sdebug_store_sectors)
-			from_bottom = (block + num) - sdebug_store_sectors;
-		ret = fill_from_dev_buffer(SCpnt,
-					   fake_storep + (block * SECT_SIZE),
-			   		   (num - from_bottom) * SECT_SIZE);
-		if ((0 == ret) && (from_bottom > 0))
-			ret = fill_from_dev_buffer(SCpnt, fake_storep,
-						   from_bottom * SECT_SIZE);
-	}
+	ret = do_device_access(SCpnt, devip, lba, num, 0);
 	read_unlock_irqrestore(&atomic_rw, iflags);
 	return ret;
 }
 
-static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba,
-		      unsigned int num, struct sdebug_dev_info * devip)
+static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
+		      unsigned int num, struct sdebug_dev_info *devip)
 {
 	unsigned long iflags;
-	unsigned int block, to_bottom;
-	unsigned long long u;
-	int res;
+	int ret;
 
-	if (lba + num > sdebug_capacity) {
-		mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE,
-			       	0);
-		return check_condition_result;
-	}
-	/* transfer length excessive (tie in to block limits VPD page) */
-	if (num > sdebug_store_sectors) {
-		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
-				0);
-		return check_condition_result;
-	}
+	ret = check_device_access_params(devip, lba, num);
+	if (ret)
+		return ret;
 
 	write_lock_irqsave(&atomic_rw, iflags);
-	if ((lba + num) <= sdebug_store_sectors)
-		res = fetch_to_dev_buffer(SCpnt,
-					  fake_storep + (lba * SECT_SIZE),
-			   		  num * SECT_SIZE);
-	else {
-		/* modulo when one arg is 64 bits needs do_div() */
-		u = lba;
-		block = do_div(u, sdebug_store_sectors);
-		to_bottom = 0;
-		if ((block + num) > sdebug_store_sectors)
-			to_bottom = (block + num) - sdebug_store_sectors;
-		res = fetch_to_dev_buffer(SCpnt,
-					  fake_storep + (block * SECT_SIZE),
-			   		  (num - to_bottom) * SECT_SIZE);
-		if ((0 == res) && (to_bottom > 0))
-			res = fetch_to_dev_buffer(SCpnt, fake_storep,
-						  to_bottom * SECT_SIZE);
-	}
+	ret = do_device_access(SCpnt, devip, lba, num, 1);
 	write_unlock_irqrestore(&atomic_rw, iflags);
-	if (-1 == res)
+	if (-1 == ret)
 		return (DID_ERROR << 16);
-	else if ((res < (num * SECT_SIZE)) &&
+	else if ((ret < (num * SECT_SIZE)) &&
 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
 		printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
-		       " IO sent=%d bytes\n", num * SECT_SIZE, res);
+		       " IO sent=%d bytes\n", num * SECT_SIZE, ret);
 	return 0;
 }
 
@@ -1987,16 +1653,7 @@
 	if (!buf)
 		return ret;
 
-	offset = 0;
-	scsi_for_each_sg(scp, sg, scsi_sg_count(scp), i) {
-		kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
-		if (!kaddr)
-			goto out;
-
-		memcpy(buf + offset, kaddr + sg->offset, sg->length);
-		offset += sg->length;
-		kunmap_atomic(kaddr, KM_USER0);
-	}
+	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
 
 	offset = 0;
 	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
@@ -2045,7 +1702,73 @@
 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
 }
 
-static int scsi_debug_slave_alloc(struct scsi_device * sdp)
+
+static struct sdebug_dev_info *
+sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
+{
+	struct sdebug_dev_info *devip;
+
+	devip = kzalloc(sizeof(*devip), flags);
+	if (devip) {
+		devip->sdbg_host = sdbg_host;
+		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
+	}
+	return devip;
+}
+
+static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
+{
+	struct sdebug_host_info * sdbg_host;
+	struct sdebug_dev_info * open_devip = NULL;
+	struct sdebug_dev_info * devip =
+			(struct sdebug_dev_info *)sdev->hostdata;
+
+	if (devip)
+		return devip;
+	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
+	if (!sdbg_host) {
+                printk(KERN_ERR "Host info NULL\n");
+		return NULL;
+        }
+	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
+		if ((devip->used) && (devip->channel == sdev->channel) &&
+                    (devip->target == sdev->id) &&
+                    (devip->lun == sdev->lun))
+                        return devip;
+		else {
+			if ((!devip->used) && (!open_devip))
+				open_devip = devip;
+		}
+	}
+	if (!open_devip) { /* try and make a new one */
+		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
+		if (!open_devip) {
+			printk(KERN_ERR "%s: out of memory at line %d\n",
+				__FUNCTION__, __LINE__);
+			return NULL;
+		}
+	}
+
+	open_devip->channel = sdev->channel;
+	open_devip->target = sdev->id;
+	open_devip->lun = sdev->lun;
+	open_devip->sdbg_host = sdbg_host;
+	open_devip->reset = 1;
+	open_devip->used = 1;
+	memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
+	if (scsi_debug_dsense)
+		open_devip->sense_buff[0] = 0x72;
+	else {
+		open_devip->sense_buff[0] = 0x70;
+		open_devip->sense_buff[7] = 0xa;
+	}
+	if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
+		open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
+
+	return open_devip;
+}
+
+static int scsi_debug_slave_alloc(struct scsi_device *sdp)
 {
 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
@@ -2054,9 +1777,9 @@
 	return 0;
 }
 
-static int scsi_debug_slave_configure(struct scsi_device * sdp)
+static int scsi_debug_slave_configure(struct scsi_device *sdp)
 {
-	struct sdebug_dev_info * devip;
+	struct sdebug_dev_info *devip;
 
 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 		printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
@@ -2074,10 +1797,10 @@
 	return 0;
 }
 
-static void scsi_debug_slave_destroy(struct scsi_device * sdp)
+static void scsi_debug_slave_destroy(struct scsi_device *sdp)
 {
-	struct sdebug_dev_info * devip =
-				(struct sdebug_dev_info *)sdp->hostdata;
+	struct sdebug_dev_info *devip =
+		(struct sdebug_dev_info *)sdp->hostdata;
 
 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 		printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
@@ -2089,84 +1812,44 @@
 	}
 }
 
-static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
+/* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
+static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
 {
-	struct sdebug_host_info * sdbg_host;
-	struct sdebug_dev_info * open_devip = NULL;
-	struct sdebug_dev_info * devip =
-			(struct sdebug_dev_info *)sdev->hostdata;
+	unsigned long iflags;
+	int k;
+	struct sdebug_queued_cmd *sqcp;
 
-	if (devip)
-		return devip;
-	sdbg_host = *(struct sdebug_host_info **) sdev->host->hostdata;
-        if(! sdbg_host) {
-                printk(KERN_ERR "Host info NULL\n");
-		return NULL;
-        }
-	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
-		if ((devip->used) && (devip->channel == sdev->channel) &&
-                    (devip->target == sdev->id) &&
-                    (devip->lun == sdev->lun))
-                        return devip;
-		else {
-			if ((!devip->used) && (!open_devip))
-				open_devip = devip;
+	spin_lock_irqsave(&queued_arr_lock, iflags);
+	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+		sqcp = &queued_arr[k];
+		if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
+			del_timer_sync(&sqcp->cmnd_timer);
+			sqcp->in_use = 0;
+			sqcp->a_cmnd = NULL;
+			break;
 		}
 	}
-	if (NULL == open_devip) { /* try and make a new one */
-		open_devip = kzalloc(sizeof(*open_devip),GFP_ATOMIC);
-		if (NULL == open_devip) {
-			printk(KERN_ERR "%s: out of memory at line %d\n",
-				__FUNCTION__, __LINE__);
-			return NULL;
-		}
-		open_devip->sdbg_host = sdbg_host;
-		list_add_tail(&open_devip->dev_list,
-		&sdbg_host->dev_info_list);
-	}
-        if (open_devip) {
-		open_devip->channel = sdev->channel;
-		open_devip->target = sdev->id;
-		open_devip->lun = sdev->lun;
-		open_devip->sdbg_host = sdbg_host;
-		open_devip->reset = 1;
-		open_devip->used = 1;
-		memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
-		if (scsi_debug_dsense)
-			open_devip->sense_buff[0] = 0x72;
-		else {
-			open_devip->sense_buff[0] = 0x70;
-			open_devip->sense_buff[7] = 0xa;
-		}
-		if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
-			open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
-		return open_devip;
-        }
-        return NULL;
+	spin_unlock_irqrestore(&queued_arr_lock, iflags);
+	return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0;
 }
 
-static void mk_sense_buffer(struct sdebug_dev_info * devip, int key,
-			    int asc, int asq)
+/* Deletes (stops) timers of all queued commands */
+static void stop_all_queued(void)
 {
-	unsigned char * sbuff;
+	unsigned long iflags;
+	int k;
+	struct sdebug_queued_cmd *sqcp;
 
-	sbuff = devip->sense_buff;
-	memset(sbuff, 0, SDEBUG_SENSE_LEN);
-	if (scsi_debug_dsense) {
-		sbuff[0] = 0x72;  /* descriptor, current */
-		sbuff[1] = key;
-		sbuff[2] = asc;
-		sbuff[3] = asq;
-	} else {
-		sbuff[0] = 0x70;  /* fixed, current */
-		sbuff[2] = key;
-		sbuff[7] = 0xa;	  /* implies 18 byte sense buffer */
-		sbuff[12] = asc;
-		sbuff[13] = asq;
+	spin_lock_irqsave(&queued_arr_lock, iflags);
+	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+		sqcp = &queued_arr[k];
+		if (sqcp->in_use && sqcp->a_cmnd) {
+			del_timer_sync(&sqcp->cmnd_timer);
+			sqcp->in_use = 0;
+			sqcp->a_cmnd = NULL;
+		}
 	}
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
-		printk(KERN_INFO "scsi_debug:    [sense_key,asc,ascq]: "
-		      "[0x%x,0x%x,0x%x]\n", key, asc, asq);
+	spin_unlock_irqrestore(&queued_arr_lock, iflags);
 }
 
 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
@@ -2226,7 +1909,7 @@
 		printk(KERN_INFO "scsi_debug: bus_reset\n");
 	++num_bus_resets;
 	if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
-		sdbg_host = *(struct sdebug_host_info **) hp->hostdata;
+		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
 		if (sdbg_host) {
 			list_for_each_entry(dev_info,
                                             &sdbg_host->dev_info_list,
@@ -2256,46 +1939,6 @@
 	return SUCCESS;
 }
 
-/* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
-static int stop_queued_cmnd(struct scsi_cmnd * cmnd)
-{
-	unsigned long iflags;
-	int k;
-	struct sdebug_queued_cmd * sqcp;
-
-	spin_lock_irqsave(&queued_arr_lock, iflags);
-	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
-		sqcp = &queued_arr[k];
-		if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
-			del_timer_sync(&sqcp->cmnd_timer);
-			sqcp->in_use = 0;
-			sqcp->a_cmnd = NULL;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&queued_arr_lock, iflags);
-	return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0;
-}
-
-/* Deletes (stops) timers of all queued commands */
-static void stop_all_queued(void)
-{
-	unsigned long iflags;
-	int k;
-	struct sdebug_queued_cmd * sqcp;
-
-	spin_lock_irqsave(&queued_arr_lock, iflags);
-	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
-		sqcp = &queued_arr[k];
-		if (sqcp->in_use && sqcp->a_cmnd) {
-			del_timer_sync(&sqcp->cmnd_timer);
-			sqcp->in_use = 0;
-			sqcp->a_cmnd = NULL;
-		}
-	}
-	spin_unlock_irqrestore(&queued_arr_lock, iflags);
-}
-
 /* Initializes timers in queued array */
 static void __init init_all_queued(void)
 {
@@ -2313,7 +1956,8 @@
 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
 }
 
-static void __init sdebug_build_parts(unsigned char * ramp)
+static void __init sdebug_build_parts(unsigned char *ramp,
+				      unsigned long store_size)
 {
 	struct partition * pp;
 	int starts[SDEBUG_MAX_PARTS + 2];
@@ -2321,7 +1965,7 @@
 	int heads_by_sects, start_sec, end_sec;
 
 	/* assume partition table already zeroed */
-	if ((scsi_debug_num_parts < 1) || (sdebug_store_size < 1048576))
+	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
 		return;
 	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
 		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
@@ -2419,7 +2063,6 @@
 		return 0;
 	}
 }
-
 /* Note: The following macros create attribute files in the
    /sys/module/scsi_debug/parameters directory. Unfortunately this
    driver is unaware of a change and cannot trigger auxiliary actions
@@ -2736,11 +2379,9 @@
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
 		scsi_debug_virtual_gb = n;
-		if (scsi_debug_virtual_gb > 0) {
-			sdebug_capacity = 2048 * 1024;
-			sdebug_capacity *= scsi_debug_virtual_gb;
-		} else
-			sdebug_capacity = sdebug_store_sectors;
+
+		sdebug_capacity = get_sdebug_capacity();
+
 		return count;
 	}
 	return -EINVAL;
@@ -2756,21 +2397,10 @@
 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
 				     const char * buf, size_t count)
 {
-        int delta_hosts;
-	char work[20];
+	int delta_hosts;
 
-        if (1 != sscanf(buf, "%10s", work))
+	if (sscanf(buf, "%d", &delta_hosts) != 1)
 		return -EINVAL;
-	{	/* temporary hack around sscanf() problem with -ve nums */
-		int neg = 0;
-
-		if ('-' == *work)
-			neg = 1;
-		if (1 != sscanf(work + neg, "%d", &delta_hosts))
-			return -EINVAL;
-		if (neg)
-			delta_hosts = -delta_hosts;
-	}
 	if (delta_hosts > 0) {
 		do {
 			sdebug_add_adapter();
@@ -2782,7 +2412,7 @@
 	}
 	return count;
 }
-DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 
+DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
 	    sdebug_add_host_store);
 
 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
@@ -2851,22 +2481,29 @@
 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
 }
 
+static void pseudo_0_release(struct device *dev)
+{
+	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+		printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n");
+}
+
+static struct device pseudo_primary = {
+	.bus_id		= "pseudo_0",
+	.release	= pseudo_0_release,
+};
+
 static int __init scsi_debug_init(void)
 {
-	unsigned int sz;
+	unsigned long sz;
 	int host_to_add;
 	int k;
 	int ret;
 
 	if (scsi_debug_dev_size_mb < 1)
 		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
-	sdebug_store_size = (unsigned int)scsi_debug_dev_size_mb * 1048576;
-	sdebug_store_sectors = sdebug_store_size / SECT_SIZE;
-	if (scsi_debug_virtual_gb > 0) {
-		sdebug_capacity = 2048 * 1024;
-		sdebug_capacity *= scsi_debug_virtual_gb;
-	} else
-		sdebug_capacity = sdebug_store_sectors;
+	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
+	sdebug_store_sectors = sz / SECT_SIZE;
+	sdebug_capacity = get_sdebug_capacity();
 
 	/* play around with geometry, don't waste too much on track 0 */
 	sdebug_heads = 8;
@@ -2885,7 +2522,6 @@
 			       (sdebug_sectors_per * sdebug_heads);
 	}
 
-	sz = sdebug_store_size;
 	fake_storep = vmalloc(sz);
 	if (NULL == fake_storep) {
 		printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
@@ -2893,7 +2529,7 @@
 	}
 	memset(fake_storep, 0, sz);
 	if (scsi_debug_num_parts > 0)
-		sdebug_build_parts(fake_storep);
+		sdebug_build_parts(fake_storep, sz);
 
 	ret = device_register(&pseudo_primary);
 	if (ret < 0) {
@@ -2922,8 +2558,6 @@
 
 	init_all_queued();
 
-	sdebug_driver_template.proc_name = sdebug_proc_name;
-
 	host_to_add = scsi_debug_add_host;
         scsi_debug_add_host = 0;
 
@@ -2972,30 +2606,6 @@
 device_initcall(scsi_debug_init);
 module_exit(scsi_debug_exit);
 
-static void pseudo_0_release(struct device * dev)
-{
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
-		printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n");
-}
-
-static struct device pseudo_primary = {
-	.bus_id		= "pseudo_0",
-	.release	= pseudo_0_release,
-};
-
-static int pseudo_lld_bus_match(struct device *dev,
-                          struct device_driver *dev_driver)
-{
-        return 1;
-}
-
-static struct bus_type pseudo_lld_bus = {
-        .name = "pseudo",
-        .match = pseudo_lld_bus_match,
-	.probe = sdebug_driver_probe,
-	.remove = sdebug_driver_remove,
-};
-
 static void sdebug_release_adapter(struct device * dev)
 {
         struct sdebug_host_info *sdbg_host;
@@ -3009,8 +2619,7 @@
 	int k, devs_per_host;
         int error = 0;
         struct sdebug_host_info *sdbg_host;
-        struct sdebug_dev_info *sdbg_devinfo;
-        struct list_head *lh, *lh_sf;
+	struct sdebug_dev_info *sdbg_devinfo, *tmp;
 
         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
         if (NULL == sdbg_host) {
@@ -3023,16 +2632,13 @@
 
 	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
         for (k = 0; k < devs_per_host; k++) {
-                sdbg_devinfo = kzalloc(sizeof(*sdbg_devinfo),GFP_KERNEL);
-                if (NULL == sdbg_devinfo) {
+		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
+		if (!sdbg_devinfo) {
                         printk(KERN_ERR "%s: out of memory at line %d\n",
                                __FUNCTION__, __LINE__);
                         error = -ENOMEM;
 			goto clean;
                 }
-                sdbg_devinfo->sdbg_host = sdbg_host;
-                list_add_tail(&sdbg_devinfo->dev_list,
-                              &sdbg_host->dev_info_list);
         }
 
         spin_lock(&sdebug_host_list_lock);
@@ -3053,9 +2659,8 @@
         return error;
 
 clean:
-	list_for_each_safe(lh, lh_sf, &sdbg_host->dev_info_list) {
-		sdbg_devinfo = list_entry(lh, struct sdebug_dev_info,
-					  dev_list);
+	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
+				 dev_list) {
 		list_del(&sdbg_devinfo->dev_list);
 		kfree(sdbg_devinfo);
 	}
@@ -3083,6 +2688,263 @@
         --scsi_debug_add_host;
 }
 
+static
+int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
+{
+	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
+	int len, k;
+	unsigned int num;
+	unsigned long long lba;
+	int errsts = 0;
+	int target = SCpnt->device->id;
+	struct sdebug_dev_info *devip = NULL;
+	int inj_recovered = 0;
+	int inj_transport = 0;
+	int delay_override = 0;
+
+	scsi_set_resid(SCpnt, 0);
+	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
+		printk(KERN_INFO "scsi_debug: cmd ");
+		for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
+			printk("%02x ", (int)cmd[k]);
+		printk("\n");
+	}
+
+	if (target == SCpnt->device->host->hostt->this_id) {
+		printk(KERN_INFO "scsi_debug: initiator's id used as "
+		       "target!\n");
+		return schedule_resp(SCpnt, NULL, done,
+				     DID_NO_CONNECT << 16, 0);
+	}
+
+	if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
+	    (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
+		return schedule_resp(SCpnt, NULL, done,
+				     DID_NO_CONNECT << 16, 0);
+	devip = devInfoReg(SCpnt->device);
+	if (NULL == devip)
+		return schedule_resp(SCpnt, NULL, done,
+				     DID_NO_CONNECT << 16, 0);
+
+	if ((scsi_debug_every_nth != 0) &&
+	    (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
+		scsi_debug_cmnd_count = 0;
+		if (scsi_debug_every_nth < -1)
+			scsi_debug_every_nth = -1;
+		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
+			return 0; /* ignore command causing timeout */
+		else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
+			inj_recovered = 1; /* to reads and writes below */
+		else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
+			inj_transport = 1; /* to reads and writes below */
+	}
+
+	if (devip->wlun) {
+		switch (*cmd) {
+		case INQUIRY:
+		case REQUEST_SENSE:
+		case TEST_UNIT_READY:
+		case REPORT_LUNS:
+			break;  /* only allowable wlun commands */
+		default:
+			if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+				printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
+				       "not supported for wlun\n", *cmd);
+			mk_sense_buffer(devip, ILLEGAL_REQUEST,
+					INVALID_OPCODE, 0);
+			errsts = check_condition_result;
+			return schedule_resp(SCpnt, devip, done, errsts,
+					     0);
+		}
+	}
+
+	switch (*cmd) {
+	case INQUIRY:     /* mandatory, ignore unit attention */
+		delay_override = 1;
+		errsts = resp_inquiry(SCpnt, target, devip);
+		break;
+	case REQUEST_SENSE:	/* mandatory, ignore unit attention */
+		delay_override = 1;
+		errsts = resp_requests(SCpnt, devip);
+		break;
+	case REZERO_UNIT:	/* actually this is REWIND for SSC */
+	case START_STOP:
+		errsts = resp_start_stop(SCpnt, devip);
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+		errsts = check_readiness(SCpnt, 1, devip);
+		if (errsts)
+			break;
+		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+			printk(KERN_INFO "scsi_debug: Medium removal %s\n",
+			       cmd[4] ? "inhibited" : "enabled");
+		break;
+	case SEND_DIAGNOSTIC:     /* mandatory */
+		errsts = check_readiness(SCpnt, 1, devip);
+		break;
+	case TEST_UNIT_READY:     /* mandatory */
+		delay_override = 1;
+		errsts = check_readiness(SCpnt, 0, devip);
+		break;
+	case RESERVE:
+		errsts = check_readiness(SCpnt, 1, devip);
+		break;
+	case RESERVE_10:
+		errsts = check_readiness(SCpnt, 1, devip);
+		break;
+	case RELEASE:
+		errsts = check_readiness(SCpnt, 1, devip);
+		break;
+	case RELEASE_10:
+		errsts = check_readiness(SCpnt, 1, devip);
+		break;
+	case READ_CAPACITY:
+		errsts = resp_readcap(SCpnt, devip);
+		break;
+	case SERVICE_ACTION_IN:
+		if (SAI_READ_CAPACITY_16 != cmd[1]) {
+			mk_sense_buffer(devip, ILLEGAL_REQUEST,
+					INVALID_OPCODE, 0);
+			errsts = check_condition_result;
+			break;
+		}
+		errsts = resp_readcap16(SCpnt, devip);
+		break;
+	case MAINTENANCE_IN:
+		if (MI_REPORT_TARGET_PGS != cmd[1]) {
+			mk_sense_buffer(devip, ILLEGAL_REQUEST,
+					INVALID_OPCODE, 0);
+			errsts = check_condition_result;
+			break;
+		}
+		errsts = resp_report_tgtpgs(SCpnt, devip);
+		break;
+	case READ_16:
+	case READ_12:
+	case READ_10:
+	case READ_6:
+		errsts = check_readiness(SCpnt, 0, devip);
+		if (errsts)
+			break;
+		if (scsi_debug_fake_rw)
+			break;
+		get_data_transfer_info(cmd, &lba, &num);
+		errsts = resp_read(SCpnt, lba, num, devip);
+		if (inj_recovered && (0 == errsts)) {
+			mk_sense_buffer(devip, RECOVERED_ERROR,
+					THRESHOLD_EXCEEDED, 0);
+			errsts = check_condition_result;
+		} else if (inj_transport && (0 == errsts)) {
+			mk_sense_buffer(devip, ABORTED_COMMAND,
+					TRANSPORT_PROBLEM, ACK_NAK_TO);
+			errsts = check_condition_result;
+		}
+		break;
+	case REPORT_LUNS:	/* mandatory, ignore unit attention */
+		delay_override = 1;
+		errsts = resp_report_luns(SCpnt, devip);
+		break;
+	case VERIFY:		/* 10 byte SBC-2 command */
+		errsts = check_readiness(SCpnt, 0, devip);
+		break;
+	case WRITE_16:
+	case WRITE_12:
+	case WRITE_10:
+	case WRITE_6:
+		errsts = check_readiness(SCpnt, 0, devip);
+		if (errsts)
+			break;
+		if (scsi_debug_fake_rw)
+			break;
+		get_data_transfer_info(cmd, &lba, &num);
+		errsts = resp_write(SCpnt, lba, num, devip);
+		if (inj_recovered && (0 == errsts)) {
+			mk_sense_buffer(devip, RECOVERED_ERROR,
+					THRESHOLD_EXCEEDED, 0);
+			errsts = check_condition_result;
+		}
+		break;
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+		errsts = resp_mode_sense(SCpnt, target, devip);
+		break;
+	case MODE_SELECT:
+		errsts = resp_mode_select(SCpnt, 1, devip);
+		break;
+	case MODE_SELECT_10:
+		errsts = resp_mode_select(SCpnt, 0, devip);
+		break;
+	case LOG_SENSE:
+		errsts = resp_log_sense(SCpnt, devip);
+		break;
+	case SYNCHRONIZE_CACHE:
+		delay_override = 1;
+		errsts = check_readiness(SCpnt, 0, devip);
+		break;
+	case WRITE_BUFFER:
+		errsts = check_readiness(SCpnt, 1, devip);
+		break;
+	case XDWRITEREAD_10:
+		if (!scsi_bidi_cmnd(SCpnt)) {
+			mk_sense_buffer(devip, ILLEGAL_REQUEST,
+					INVALID_FIELD_IN_CDB, 0);
+			errsts = check_condition_result;
+			break;
+		}
+
+		errsts = check_readiness(SCpnt, 0, devip);
+		if (errsts)
+			break;
+		if (scsi_debug_fake_rw)
+			break;
+		get_data_transfer_info(cmd, &lba, &num);
+		errsts = resp_read(SCpnt, lba, num, devip);
+		if (errsts)
+			break;
+		errsts = resp_write(SCpnt, lba, num, devip);
+		if (errsts)
+			break;
+		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
+		break;
+	default:
+		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
+			       "supported\n", *cmd);
+		errsts = check_readiness(SCpnt, 1, devip);
+		if (errsts)
+			break;	/* Unit attention takes precedence */
+		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
+		errsts = check_condition_result;
+		break;
+	}
+	return schedule_resp(SCpnt, devip, done, errsts,
+			     (delay_override ? 0 : scsi_debug_delay));
+}
+
+static struct scsi_host_template sdebug_driver_template = {
+	.proc_info =		scsi_debug_proc_info,
+	.proc_name =		sdebug_proc_name,
+	.name =			"SCSI DEBUG",
+	.info =			scsi_debug_info,
+	.slave_alloc =		scsi_debug_slave_alloc,
+	.slave_configure =	scsi_debug_slave_configure,
+	.slave_destroy =	scsi_debug_slave_destroy,
+	.ioctl =		scsi_debug_ioctl,
+	.queuecommand =		scsi_debug_queuecommand,
+	.eh_abort_handler =	scsi_debug_abort,
+	.eh_bus_reset_handler = scsi_debug_bus_reset,
+	.eh_device_reset_handler = scsi_debug_device_reset,
+	.eh_host_reset_handler = scsi_debug_host_reset,
+	.bios_param =		scsi_debug_biosparam,
+	.can_queue =		SCSI_DEBUG_CANQUEUE,
+	.this_id =		7,
+	.sg_tablesize =		256,
+	.cmd_per_lun =		16,
+	.max_sectors =		0xffff,
+	.use_clustering = 	DISABLE_CLUSTERING,
+	.module =		THIS_MODULE,
+};
+
 static int sdebug_driver_probe(struct device * dev)
 {
         int error = 0;
@@ -3120,9 +2982,8 @@
 
 static int sdebug_driver_remove(struct device * dev)
 {
-        struct list_head *lh, *lh_sf;
         struct sdebug_host_info *sdbg_host;
-        struct sdebug_dev_info *sdbg_devinfo;
+	struct sdebug_dev_info *sdbg_devinfo, *tmp;
 
 	sdbg_host = to_sdebug_host(dev);
 
@@ -3134,9 +2995,8 @@
 
         scsi_remove_host(sdbg_host->shost);
 
-        list_for_each_safe(lh, lh_sf, &sdbg_host->dev_info_list) {
-                sdbg_devinfo = list_entry(lh, struct sdebug_dev_info,
-                                          dev_list);
+	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
+				 dev_list) {
                 list_del(&sdbg_devinfo->dev_list);
                 kfree(sdbg_devinfo);
         }
@@ -3145,20 +3005,15 @@
         return 0;
 }
 
-static void sdebug_max_tgts_luns(void)
+static int pseudo_lld_bus_match(struct device *dev,
+				struct device_driver *dev_driver)
 {
-	struct sdebug_host_info * sdbg_host;
-	struct Scsi_Host *hpnt;
-
-	spin_lock(&sdebug_host_list_lock);
-	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
-		hpnt = sdbg_host->shost;
-		if ((hpnt->this_id >= 0) &&
-		    (scsi_debug_num_tgts > hpnt->this_id))
-			hpnt->max_id = scsi_debug_num_tgts + 1;
-		else
-			hpnt->max_id = scsi_debug_num_tgts;
-		hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* scsi_debug_max_luns; */
-	}
-	spin_unlock(&sdebug_host_list_lock);
+	return 1;
 }
+
+static struct bus_type pseudo_lld_bus = {
+	.name = "pseudo",
+	.match = pseudo_lld_bus_match,
+	.probe = sdebug_driver_probe,
+	.remove = sdebug_driver_remove,
+};
diff --git a/drivers/scsi/scsi_debug.h b/drivers/scsi/scsi_debug.h
deleted file mode 100644
index 965dd5e..0000000
--- a/drivers/scsi/scsi_debug.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _SCSI_DEBUG_H
-
-#include <linux/types.h>
-
-static int scsi_debug_slave_alloc(struct scsi_device *);
-static int scsi_debug_slave_configure(struct scsi_device *);
-static void scsi_debug_slave_destroy(struct scsi_device *);
-static int scsi_debug_queuecommand(struct scsi_cmnd *,
-				   void (*done) (struct scsi_cmnd *));
-static int scsi_debug_ioctl(struct scsi_device *, int, void __user *);
-static int scsi_debug_biosparam(struct scsi_device *, struct block_device *,
-		sector_t, int[]);
-static int scsi_debug_abort(struct scsi_cmnd *);
-static int scsi_debug_bus_reset(struct scsi_cmnd *);
-static int scsi_debug_device_reset(struct scsi_cmnd *);
-static int scsi_debug_host_reset(struct scsi_cmnd *);
-static int scsi_debug_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
-static const char * scsi_debug_info(struct Scsi_Host *);
-
-#define SCSI_DEBUG_CANQUEUE  255 	/* needs to be >= 1 */
-
-#define SCSI_DEBUG_MAX_CMD_LEN 16
-
-#endif
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 045a086..221f31e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -524,6 +524,41 @@
 	return rtn;
 }
 
+static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
+{
+	sdev->was_reset = 1;
+	sdev->expecting_cc_ua = 1;
+}
+
+/**
+ * scsi_try_target_reset - Ask host to perform a target reset
+ * @scmd:	SCSI cmd used to send a target reset
+ *
+ * Notes:
+ *    There is no timeout for this operation.  if this operation is
+ *    unreliable for a given host, then the host itself needs to put a
+ *    timer on it, and set the host back to a consistent state prior to
+ *    returning.
+ */
+static int scsi_try_target_reset(struct scsi_cmnd *scmd)
+{
+	unsigned long flags;
+	int rtn;
+
+	if (!scmd->device->host->hostt->eh_target_reset_handler)
+		return FAILED;
+
+	rtn = scmd->device->host->hostt->eh_target_reset_handler(scmd);
+	if (rtn == SUCCESS) {
+		spin_lock_irqsave(scmd->device->host->host_lock, flags);
+		__starget_for_each_device(scsi_target(scmd->device), NULL,
+					  __scsi_report_device_reset);
+		spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
+	}
+
+	return rtn;
+}
+
 /**
  * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
  * @scmd:	SCSI cmd used to send BDR
@@ -542,11 +577,8 @@
 		return FAILED;
 
 	rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd);
-	if (rtn == SUCCESS) {
-		scmd->device->was_reset = 1;
-		scmd->device->expecting_cc_ua = 1;
-	}
-
+	if (rtn == SUCCESS)
+		__scsi_report_device_reset(scmd->device, NULL);
 	return rtn;
 }
 
@@ -584,8 +616,9 @@
 {
 	if (__scsi_try_to_abort_cmd(scmd) != SUCCESS)
 		if (scsi_try_bus_device_reset(scmd) != SUCCESS)
-			if (scsi_try_bus_reset(scmd) != SUCCESS)
-				scsi_try_host_reset(scmd);
+			if (scsi_try_target_reset(scmd) != SUCCESS)
+				if (scsi_try_bus_reset(scmd) != SUCCESS)
+					scsi_try_host_reset(scmd);
 }
 
 /**
@@ -1060,6 +1093,56 @@
 }
 
 /**
+ * scsi_eh_target_reset - send target reset if needed
+ * @shost:	scsi host being recovered.
+ * @work_q:     &list_head for pending commands.
+ * @done_q:	&list_head for processed commands.
+ *
+ * Notes:
+ *    Try a target reset.
+ */
+static int scsi_eh_target_reset(struct Scsi_Host *shost,
+				struct list_head *work_q,
+				struct list_head *done_q)
+{
+	struct scsi_cmnd *scmd, *tgtr_scmd, *next;
+	unsigned int id;
+	int rtn;
+
+	for (id = 0; id <= shost->max_id; id++) {
+		tgtr_scmd = NULL;
+		list_for_each_entry(scmd, work_q, eh_entry) {
+			if (id == scmd_id(scmd)) {
+				tgtr_scmd = scmd;
+				break;
+			}
+		}
+		if (!tgtr_scmd)
+			continue;
+
+		SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
+						  "to target %d\n",
+						  current->comm, id));
+		rtn = scsi_try_target_reset(tgtr_scmd);
+		if (rtn == SUCCESS) {
+			list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
+				if (id == scmd_id(scmd))
+					if (!scsi_device_online(scmd->device) ||
+					    !scsi_eh_tur(tgtr_scmd))
+						scsi_eh_finish_cmd(scmd,
+								   done_q);
+			}
+		} else
+			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset"
+							  " failed target: "
+							  "%d\n",
+							  current->comm, id));
+	}
+
+	return list_empty(work_q);
+}
+
+/**
  * scsi_eh_bus_reset - send a bus reset 
  * @shost:	&scsi host being recovered.
  * @work_q:     &list_head for pending commands.
@@ -1447,9 +1530,11 @@
 {
 	if (!scsi_eh_stu(shost, work_q, done_q))
 		if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
-			if (!scsi_eh_bus_reset(shost, work_q, done_q))
-				if (!scsi_eh_host_reset(work_q, done_q))
-					scsi_eh_offline_sdevs(work_q, done_q);
+			if (!scsi_eh_target_reset(shost, work_q, done_q))
+				if (!scsi_eh_bus_reset(shost, work_q, done_q))
+					if (!scsi_eh_host_reset(work_q, done_q))
+						scsi_eh_offline_sdevs(work_q,
+								      done_q);
 }
 EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
 
@@ -1619,10 +1704,8 @@
 	struct scsi_device *sdev;
 
 	__shost_for_each_device(sdev, shost) {
-		if (channel == sdev_channel(sdev)) {
-			sdev->was_reset = 1;
-			sdev->expecting_cc_ua = 1;
-		}
+		if (channel == sdev_channel(sdev))
+			__scsi_report_device_reset(sdev, NULL);
 	}
 }
 EXPORT_SYMBOL(scsi_report_bus_reset);
@@ -1655,10 +1738,8 @@
 
 	__shost_for_each_device(sdev, shost) {
 		if (channel == sdev_channel(sdev) &&
-		    target == sdev_id(sdev)) {
-			sdev->was_reset = 1;
-			sdev->expecting_cc_ua = 1;
-		}
+		    target == sdev_id(sdev))
+			__scsi_report_device_reset(sdev, NULL);
 	}
 }
 EXPORT_SYMBOL(scsi_report_device_reset);
@@ -1714,6 +1795,11 @@
 		if (rtn == SUCCESS)
 			break;
 		/* FALLTHROUGH */
+	case SCSI_TRY_RESET_TARGET:
+		rtn = scsi_try_target_reset(scmd);
+		if (rtn == SUCCESS)
+			break;
+		/* FALLTHROUGH */
 	case SCSI_TRY_RESET_BUS:
 		rtn = scsi_try_bus_reset(scmd);
 		if (rtn == SUCCESS)
@@ -1907,3 +1993,31 @@
 	}
 }
 EXPORT_SYMBOL(scsi_get_sense_info_fld);
+
+/**
+ * scsi_build_sense_buffer - build sense data in a buffer
+ * @desc:	Sense format (non zero == descriptor format,
+ * 		0 == fixed format)
+ * @buf:	Where to build sense data
+ * @key:	Sense key
+ * @asc:	Additional sense code
+ * @ascq:	Additional sense code qualifier
+ *
+ **/
+void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
+{
+	if (desc) {
+		buf[0] = 0x72;	/* descriptor, current */
+		buf[1] = key;
+		buf[2] = asc;
+		buf[3] = ascq;
+		buf[7] = 0;
+	} else {
+		buf[0] = 0x70;	/* fixed, current */
+		buf[2] = key;
+		buf[7] = 0xa;
+		buf[12] = asc;
+		buf[13] = ascq;
+	}
+}
+EXPORT_SYMBOL(scsi_build_sense_buffer);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f40898d..67f412b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -784,7 +784,7 @@
  * in req->data_len and req->next_rq->data_len. The upper-layer driver can
  * decide what to do with this information.
  */
-void scsi_end_bidi_request(struct scsi_cmnd *cmd)
+static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
 {
 	struct request *req = cmd->request;
 	unsigned int dlen = req->data_len;
@@ -839,7 +839,7 @@
 	int this_count = scsi_bufflen(cmd);
 	struct request_queue *q = cmd->device->request_queue;
 	struct request *req = cmd->request;
-	int clear_errors = 1;
+	int error = 0;
 	struct scsi_sense_hdr sshdr;
 	int sense_valid = 0;
 	int sense_deferred = 0;
@@ -853,7 +853,6 @@
 	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
 		req->errors = result;
 		if (result) {
-			clear_errors = 0;
 			if (sense_valid && req->sense) {
 				/*
 				 * SG_IO wants current and deferred errors
@@ -865,6 +864,8 @@
 				memcpy(req->sense, cmd->sense_buffer,  len);
 				req->sense_len = len;
 			}
+			if (!sense_deferred)
+				error = -EIO;
 		}
 		if (scsi_bidi_cmnd(cmd)) {
 			/* will also release_buffers */
@@ -885,14 +886,11 @@
 				      "%d bytes done.\n",
 				      req->nr_sectors, good_bytes));
 
-	if (clear_errors)
-		req->errors = 0;
-
 	/* A number of bytes were successfully read.  If there
 	 * are leftovers and there is some kind of error
 	 * (result != 0), retry the rest.
 	 */
-	if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL)
+	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
 		return;
 
 	/* good_bytes = 0, or (inclusive) there were leftovers and
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index a0f308b..ee8496a 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -621,9 +621,7 @@
 {
 	int err;
 
-	scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
-					       sizeof(struct scsi_tgt_cmd),
-					       0, 0, NULL);
+	scsi_tgt_cmd_cache =  KMEM_CACHE(scsi_tgt_cmd, 0);
 	if (!scsi_tgt_cmd_cache)
 		return -ENOMEM;
 
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 26cfc56..03e3596 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -263,10 +263,11 @@
 	regs.SASR = wdregs + 3;
 	regs.SCMD = wdregs + 7;
 
-	wd33c93_init(host, regs, dma_setup, dma_stop, WD33C93_FS_MHZ(20));
+	hdata->wh.no_sync = 0;
+	hdata->wh.fast = 1;
+	hdata->wh.dma_mode = CTRL_BURST;
 
-	if (hdata->wh.no_sync == 0xff)
-		hdata->wh.no_sync = 0;
+	wd33c93_init(host, regs, dma_setup, dma_stop, WD33C93_FS_MHZ(20));
 
 	err = request_irq(irq, sgiwd93_intr, 0, "SGI WD93", host);
 	if (err) {
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 0a52d9d..df83bea 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
    Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
  */
 
-static const char *verstr = "20080221";
+static const char *verstr = "20080224";
 
 #include <linux/module.h>
 
@@ -183,6 +183,7 @@
 
 static struct st_buffer *new_tape_buffer(int, int, int);
 static int enlarge_buffer(struct st_buffer *, int, int);
+static void clear_buffer(struct st_buffer *);
 static void normalize_buffer(struct st_buffer *);
 static int append_to_buffer(const char __user *, struct st_buffer *, int);
 static int from_buffer(struct st_buffer *, char __user *, int);
@@ -442,6 +443,7 @@
 
 	memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE);
 	(STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result;
+	(STp->buffer)->cmdstat.residual = resid;
 	DEB( STp->write_pending = 0; )
 
 	if (SRpnt->waiting)
@@ -626,7 +628,7 @@
 
 
 /* Flush the write buffer (never need to write if variable blocksize). */
-static int flush_write_buffer(struct scsi_tape * STp)
+static int st_flush_write_buffer(struct scsi_tape * STp)
 {
 	int offset, transfer, blks;
 	int result;
@@ -717,7 +719,7 @@
 		return 0;
 	STps = &(STp->ps[STp->partition]);
 	if (STps->rw == ST_WRITING)	/* Writing */
-		return flush_write_buffer(STp);
+		return st_flush_write_buffer(STp);
 
 	if (STp->block_size == 0)
 		return 0;
@@ -1159,6 +1161,7 @@
 		goto err_out;
 	}
 
+	(STp->buffer)->cleared = 0;
 	(STp->buffer)->writing = 0;
 	(STp->buffer)->syscall_result = 0;
 
@@ -1211,7 +1214,7 @@
 		return 0;
 
 	if (STps->rw == ST_WRITING && !STp->pos_unknown) {
-		result = flush_write_buffer(STp);
+		result = st_flush_write_buffer(STp);
 		if (result != 0 && result != (-ENOSPC))
 			goto out;
 	}
@@ -1432,8 +1435,14 @@
 		if (STp->block_size)
 			bufsize = STp->block_size > st_fixed_buffer_size ?
 				STp->block_size : st_fixed_buffer_size;
-		else
+		else {
 			bufsize = count;
+			/* Make sure that data from previous user is not leaked even if
+			   HBA does not return correct residual */
+			if (is_read && STp->sili && !STbp->cleared)
+				clear_buffer(STbp);
+		}
+
 		if (bufsize > STbp->buffer_size &&
 		    !enlarge_buffer(STbp, bufsize, STp->restr_dma)) {
 			printk(KERN_WARNING "%s: Can't allocate %d byte tape buffer.\n",
@@ -1783,6 +1792,8 @@
 	memset(cmd, 0, MAX_COMMAND_SIZE);
 	cmd[0] = READ_6;
 	cmd[1] = (STp->block_size != 0);
+	if (!cmd[1] && STp->sili)
+		cmd[1] |= 2;
 	cmd[2] = blks >> 16;
 	cmd[3] = blks >> 8;
 	cmd[4] = blks;
@@ -1911,8 +1922,11 @@
 
 	}
 	/* End of error handling */ 
-	else			/* Read successful */
+	else {			/* Read successful */
 		STbp->buffer_bytes = bytes;
+		if (STp->sili) /* In fixed block mode residual is always zero here */
+			STbp->buffer_bytes -= STp->buffer->cmdstat.residual;
+	}
 
 	if (STps->drv_block >= 0) {
 		if (STp->block_size == 0)
@@ -2090,7 +2104,8 @@
 		       name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
 		       STp->scsi2_logical);
 		printk(KERN_INFO
-		       "%s:    sysv: %d nowait: %d\n", name, STm->sysv, STp->immediate);
+		       "%s:    sysv: %d nowait: %d sili: %d\n", name, STm->sysv, STp->immediate,
+			STp->sili);
 		printk(KERN_INFO "%s:    debugging: %d\n",
 		       name, debugging);
 	}
@@ -2133,6 +2148,7 @@
 		STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
 		STp->immediate = (options & MT_ST_NOWAIT) != 0;
 		STm->sysv = (options & MT_ST_SYSV) != 0;
+		STp->sili = (options & MT_ST_SILI) != 0;
 		DEB( debugging = (options & MT_ST_DEBUGGING) != 0;
 		     st_log_options(STp, STm, name); )
 	} else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) {
@@ -2164,6 +2180,8 @@
 			STp->immediate = value;
 		if ((options & MT_ST_SYSV) != 0)
 			STm->sysv = value;
+		if ((options & MT_ST_SILI) != 0)
+			STp->sili = value;
                 DEB(
 		if ((options & MT_ST_DEBUGGING) != 0)
 			debugging = value;
@@ -3655,6 +3673,8 @@
 		STbuffer->frp_segs += 1;
 		got += b_size;
 		STbuffer->buffer_size = got;
+		if (STbuffer->cleared)
+			memset(page_address(STbuffer->frp[segs].page), 0, b_size);
 		segs++;
 	}
 	STbuffer->b_data = page_address(STbuffer->frp[0].page);
@@ -3663,6 +3683,17 @@
 }
 
 
+/* Make sure that no data from previous user is in the internal buffer */
+static void clear_buffer(struct st_buffer * st_bp)
+{
+	int i;
+
+	for (i=0; i < st_bp->frp_segs; i++)
+		memset(page_address(st_bp->frp[i].page), 0, st_bp->frp[i].length);
+	st_bp->cleared = 1;
+}
+
+
 /* Release the extra buffer */
 static void normalize_buffer(struct st_buffer * STbuffer)
 {
@@ -3987,6 +4018,7 @@
 	tpnt->two_fm = ST_TWO_FM;
 	tpnt->fast_mteom = ST_FAST_MTEOM;
 	tpnt->scsi2_logical = ST_SCSI2LOGICAL;
+	tpnt->sili = ST_SILI;
 	tpnt->immediate = ST_NOWAIT;
 	tpnt->default_drvbuffer = 0xff;		/* No forced buffering */
 	tpnt->partition = 0;
@@ -4333,6 +4365,46 @@
 
 CLASS_DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
 
+static ssize_t st_options_show(struct class_device *class_dev, char *buf)
+{
+	struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev);
+	struct scsi_tape *STp;
+	int i, j, options;
+	ssize_t l = 0;
+
+	for (i=0; i < st_dev_max; i++) {
+		for (j=0; j < ST_NBR_MODES; j++)
+			if (&scsi_tapes[i]->modes[j] == STm)
+				break;
+		if (j < ST_NBR_MODES)
+			break;
+	}
+	if (i == st_dev_max)
+		return 0;  /* should never happen */
+
+	STp = scsi_tapes[i];
+
+	options = STm->do_buffer_writes ? MT_ST_BUFFER_WRITES : 0;
+	options |= STm->do_async_writes ? MT_ST_ASYNC_WRITES : 0;
+	options |= STm->do_read_ahead ? MT_ST_READ_AHEAD : 0;
+	DEB( options |= debugging ? MT_ST_DEBUGGING : 0 );
+	options |= STp->two_fm ? MT_ST_TWO_FM : 0;
+	options |= STp->fast_mteom ? MT_ST_FAST_MTEOM : 0;
+	options |= STm->defaults_for_writes ? MT_ST_DEF_WRITES : 0;
+	options |= STp->can_bsr ? MT_ST_CAN_BSR : 0;
+	options |= STp->omit_blklims ? MT_ST_NO_BLKLIMS : 0;
+	options |= STp->can_partitions ? MT_ST_CAN_PARTITIONS : 0;
+	options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0;
+	options |= STm->sysv ? MT_ST_SYSV : 0;
+	options |= STp->immediate ? MT_ST_NOWAIT : 0;
+	options |= STp->sili ? MT_ST_SILI : 0;
+
+	l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
+	return l;
+}
+
+CLASS_DEVICE_ATTR(options, S_IRUGO, st_options_show, NULL);
+
 static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
 {
 	int i, rew, error;
@@ -4370,6 +4442,9 @@
 		error = class_device_create_file(st_class_member,
 				        &class_device_attr_default_compression);
 		if (error) goto out;
+		error = class_device_create_file(st_class_member,
+				        &class_device_attr_options);
+		if (error) goto out;
 
 		if (mode == 0 && rew == 0) {
 			error = sysfs_create_link(&STp->device->sdev_gendev.kobj,
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 5931726..b92712f 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -12,6 +12,7 @@
 	int midlevel_result;
 	struct scsi_sense_hdr sense_hdr;
 	int have_sense;
+	int residual;
 	u64 uremainder64;
 	u8 flags;
 	u8 remainder_valid;
@@ -34,6 +35,7 @@
 struct st_buffer {
 	unsigned char dma;	/* DMA-able buffer */
 	unsigned char do_dio;   /* direct i/o set up? */
+	unsigned char cleared;  /* internal buffer cleared after open? */
 	int buffer_size;
 	int buffer_blocks;
 	int buffer_bytes;
@@ -122,6 +124,7 @@
 	unsigned char try_dio_now;		/* try direct i/o before next close? */
 	unsigned char c_algo;			/* compression algorithm */
 	unsigned char pos_unknown;			/* after reset position unknown */
+	unsigned char sili;			/* use SILI when reading in variable b mode */
 	int tape_type;
 	int long_timeout;	/* timeout for commands known to take long time */
 
diff --git a/drivers/scsi/st_options.h b/drivers/scsi/st_options.h
index b6b5c9c..d2f9479 100644
--- a/drivers/scsi/st_options.h
+++ b/drivers/scsi/st_options.h
@@ -3,7 +3,7 @@
 
    Copyright 1995-2003 Kai Makisara.
 
-   Last modified: Mon Apr  7 22:49:18 2003 by makisara
+   Last modified: Thu Feb 21 21:47:07 2008 by kai.makisara
 */
 
 #ifndef _ST_OPTIONS_H
@@ -94,6 +94,10 @@
    The default is BSD semantics. */
 #define ST_SYSV 0
 
+/* If ST_SILI is non-zero, the SILI bit is set when reading in variable block
+   mode and the block size is determined using the residual returned by the HBA. */
+#define ST_SILI 0
+
 /* Time to wait for the drive to become ready if blocking open */
 #define ST_BLOCK_SECONDS     120
 
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 654430e..f308a03 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -33,6 +33,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
 
 #define DRV_NAME "stex"
 #define ST_DRIVER_VERSION "3.6.0000.1"
@@ -362,22 +363,14 @@
 	return status;
 }
 
-static void stex_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
-{
-	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
-	cmd->sense_buffer[0] = 0x70;    /* fixed format, current */
-	cmd->sense_buffer[2] = sk;
-	cmd->sense_buffer[7] = 18 - 8;  /* additional sense length */
-	cmd->sense_buffer[12] = asc;
-	cmd->sense_buffer[13] = ascq;
-}
-
 static void stex_invalid_field(struct scsi_cmnd *cmd,
 			       void (*done)(struct scsi_cmnd *))
 {
+	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
 	/* "Invalid field in cbd" */
-	stex_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
+				0x0);
 	done(cmd);
 }
 
@@ -426,49 +419,13 @@
 	return 0;
 }
 
-static void stex_internal_copy(struct scsi_cmnd *cmd,
-	const void *src, size_t *count, int sg_count, int direction)
-{
-	size_t lcount;
-	size_t len;
-	void *s, *d, *base = NULL;
-	size_t offset;
-
-	if (*count > scsi_bufflen(cmd))
-		*count = scsi_bufflen(cmd);
-	lcount = *count;
-	while (lcount) {
-		len = lcount;
-		s = (void *)src;
-
-		offset = *count - lcount;
-		s += offset;
-		base = scsi_kmap_atomic_sg(scsi_sglist(cmd),
-					   sg_count, &offset, &len);
-		if (!base) {
-			*count -= lcount;
-			return;
-		}
-		d = base + offset;
-
-		if (direction == ST_TO_CMD)
-			memcpy(d, s, len);
-		else
-			memcpy(s, d, len);
-
-		lcount -= len;
-		scsi_kunmap_atomic_sg(base);
-	}
-}
-
 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
 {
 	struct st_frame *p;
 	size_t count = sizeof(struct st_frame);
 
 	p = hba->copy_buffer;
-	stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd),
-			   ST_FROM_CMD);
+	count = scsi_sg_copy_to_buffer(ccb->cmd, p, count);
 	memset(p->base, 0, sizeof(u32)*6);
 	*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
 	p->rom_addr = 0;
@@ -486,8 +443,7 @@
 	p->subid =
 		hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
 
-	stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd),
-			   ST_TO_CMD);
+	count = scsi_sg_copy_from_buffer(ccb->cmd, p, count);
 }
 
 static void
@@ -554,10 +510,8 @@
 		unsigned char page;
 		page = cmd->cmnd[2] & 0x3f;
 		if (page == 0x8 || page == 0x3f) {
-			size_t cp_len = sizeof(ms10_caching_page);
-			stex_internal_copy(cmd, ms10_caching_page,
-					   &cp_len, scsi_sg_count(cmd),
-					   ST_TO_CMD);
+			scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
+						 sizeof(ms10_caching_page));
 			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
 			done(cmd);
 		} else
@@ -586,10 +540,8 @@
 		if (id != host->max_id - 1)
 			break;
 		if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
-			size_t cp_len = sizeof(console_inq_page);
-			stex_internal_copy(cmd, console_inq_page,
-					   &cp_len, scsi_sg_count(cmd),
-					   ST_TO_CMD);
+			scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
+						 sizeof(console_inq_page));
 			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
 			done(cmd);
 		} else
@@ -606,8 +558,7 @@
 			ver.signature[0] = PASSTHRU_SIGNATURE;
 			ver.console_id = host->max_id - 1;
 			ver.host_no = hba->host->host_no;
-			stex_internal_copy(cmd, &ver, &cp_len,
-					   scsi_sg_count(cmd), ST_TO_CMD);
+			cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
 			cmd->result = sizeof(ver) == cp_len ?
 				DID_OK << 16 | COMMAND_COMPLETE << 8 :
 				DID_ERROR << 16 | COMMAND_COMPLETE << 8;
@@ -700,15 +651,12 @@
 
 	if (ccb->cmd == NULL)
 		return;
-	stex_internal_copy(ccb->cmd,
-		resp->variable, &count, scsi_sg_count(ccb->cmd), ST_TO_CMD);
+	count = scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, count);
 }
 
 static void stex_ys_commands(struct st_hba *hba,
 	struct st_ccb *ccb, struct status_msg *resp)
 {
-	size_t count;
-
 	if (ccb->cmd->cmnd[0] == MGT_CMD &&
 		resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
 		scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
@@ -724,9 +672,8 @@
 		resp->scsi_status == SAM_STAT_GOOD) {
 		ST_INQ *inq_data;
 
-		count = STEX_EXTRA_SIZE;
-		stex_internal_copy(ccb->cmd, hba->copy_buffer,
-			&count, scsi_sg_count(ccb->cmd), ST_FROM_CMD);
+		scsi_sg_copy_to_buffer(ccb->cmd, hba->copy_buffer,
+				       STEX_EXTRA_SIZE);
 		inq_data = (ST_INQ *)hba->copy_buffer;
 		if (inq_data->DeviceTypeQualifier != 0)
 			ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT;
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c
index 02d9727..aaa4fd0 100644
--- a/drivers/scsi/sun3_scsi_vme.c
+++ b/drivers/scsi/sun3_scsi_vme.c
@@ -582,3 +582,4 @@
 
 #include "scsi_module.c"
 
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index f286c37..5fda881 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1973,10 +1973,7 @@
 	hostdata->incoming_ptr = 0;
 	hostdata->outgoing_len = 0;
 	hostdata->default_sx_per = DEFAULT_SX_PER;
-	hostdata->no_sync = 0xff;	/* sync defaults to off */
 	hostdata->no_dma = 0;	/* default is DMA enabled */
-	hostdata->fast = 0;	/* default is Fast SCSI transfers disabled */
-	hostdata->dma_mode = CTRL_DMA;	/* default is Single Byte DMA */
 
 #ifdef PROC_INTERFACE
 	hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 77f7a7f..96a585e 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1740,6 +1740,60 @@
 	}
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int serial8250_get_poll_char(struct uart_port *port)
+{
+	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	unsigned char lsr = serial_inp(up, UART_LSR);
+
+	while (!(lsr & UART_LSR_DR))
+		lsr = serial_inp(up, UART_LSR);
+
+	return serial_inp(up, UART_RX);
+}
+
+
+static void serial8250_put_poll_char(struct uart_port *port,
+			 unsigned char c)
+{
+	unsigned int ier;
+	struct uart_8250_port *up = (struct uart_8250_port *)port;
+
+	/*
+	 *	First save the IER then disable the interrupts
+	 */
+	ier = serial_in(up, UART_IER);
+	if (up->capabilities & UART_CAP_UUE)
+		serial_out(up, UART_IER, UART_IER_UUE);
+	else
+		serial_out(up, UART_IER, 0);
+
+	wait_for_xmitr(up, BOTH_EMPTY);
+	/*
+	 *	Send the character out.
+	 *	If a LF, also do CR...
+	 */
+	serial_out(up, UART_TX, c);
+	if (c == 10) {
+		wait_for_xmitr(up, BOTH_EMPTY);
+		serial_out(up, UART_TX, 13);
+	}
+
+	/*
+	 *	Finally, wait for transmitter to become empty
+	 *	and restore the IER
+	 */
+	wait_for_xmitr(up, BOTH_EMPTY);
+	serial_out(up, UART_IER, ier);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
 static int serial8250_startup(struct uart_port *port)
 {
 	struct uart_8250_port *up = (struct uart_8250_port *)port;
@@ -2386,6 +2440,10 @@
 	.request_port	= serial8250_request_port,
 	.config_port	= serial8250_config_port,
 	.verify_port	= serial8250_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_get_char = serial8250_get_poll_char,
+	.poll_put_char = serial8250_put_poll_char,
+#endif
 };
 
 static struct uart_8250_port serial8250_ports[UART_NR];
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index cf627cd..f7cd950 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -961,6 +961,9 @@
 config SERIAL_CORE_CONSOLE
 	bool
 
+config CONSOLE_POLL
+	bool
+
 config SERIAL_68328
 	bool "68328 serial support"
 	depends on M68328 || M68EZ328 || M68VZ328
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 640cfe4..3cbea54 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -66,4 +66,5 @@
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
 obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
+obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 40604a0..08adc1d 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -314,6 +314,32 @@
 	spin_unlock_irqrestore(&uap->port.lock, flags);
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+static int pl010_get_poll_char(struct uart_port *port)
+{
+	struct uart_amba_port *uap = (struct uart_amba_port *)port;
+	unsigned int status;
+
+	do {
+		status = readw(uap->port.membase + UART01x_FR);
+	} while (status & UART01x_FR_RXFE);
+
+	return readw(uap->port.membase + UART01x_DR);
+}
+
+static void pl010_put_poll_char(struct uart_port *port,
+			 unsigned char ch)
+{
+	struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
+		barrier();
+
+	writew(ch, uap->port.membase + UART01x_DR);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
 static int pl011_startup(struct uart_port *port)
 {
 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -572,6 +598,10 @@
 	.request_port	= pl010_request_port,
 	.config_port	= pl010_config_port,
 	.verify_port	= pl010_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_get_char = pl010_get_poll_char,
+	.poll_put_char = pl010_put_poll_char,
+#endif
 };
 
 static struct uart_amba_port *amba_ports[UART_NR];
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
new file mode 100644
index 0000000..9cf0332
--- /dev/null
+++ b/drivers/serial/kgdboc.c
@@ -0,0 +1,168 @@
+/*
+ * Based on the same principle as kgdboe using the NETPOLL api, this
+ * driver uses a console polling api to implement a gdb serial inteface
+ * which is multiplexed on a console port.
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * 2007-2008 (c) Jason Wessel - Wind River Systems, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/kgdb.h>
+#include <linux/tty.h>
+
+#define MAX_CONFIG_LEN		40
+
+static struct kgdb_io		kgdboc_io_ops;
+
+/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
+static int configured		= -1;
+
+static char config[MAX_CONFIG_LEN];
+static struct kparam_string kps = {
+	.string			= config,
+	.maxlen			= MAX_CONFIG_LEN,
+};
+
+static struct tty_driver	*kgdb_tty_driver;
+static int			kgdb_tty_line;
+
+static int kgdboc_option_setup(char *opt)
+{
+	if (strlen(opt) > MAX_CONFIG_LEN) {
+		printk(KERN_ERR "kgdboc: config string too long\n");
+		return -ENOSPC;
+	}
+	strcpy(config, opt);
+
+	return 0;
+}
+
+__setup("kgdboc=", kgdboc_option_setup);
+
+static int configure_kgdboc(void)
+{
+	struct tty_driver *p;
+	int tty_line = 0;
+	int err;
+
+	err = kgdboc_option_setup(config);
+	if (err || !strlen(config) || isspace(config[0]))
+		goto noconfig;
+
+	err = -ENODEV;
+
+	p = tty_find_polling_driver(config, &tty_line);
+	if (!p)
+		goto noconfig;
+
+	kgdb_tty_driver = p;
+	kgdb_tty_line = tty_line;
+
+	err = kgdb_register_io_module(&kgdboc_io_ops);
+	if (err)
+		goto noconfig;
+
+	configured = 1;
+
+	return 0;
+
+noconfig:
+	config[0] = 0;
+	configured = 0;
+
+	return err;
+}
+
+static int __init init_kgdboc(void)
+{
+	/* Already configured? */
+	if (configured == 1)
+		return 0;
+
+	return configure_kgdboc();
+}
+
+static void cleanup_kgdboc(void)
+{
+	if (configured == 1)
+		kgdb_unregister_io_module(&kgdboc_io_ops);
+}
+
+static int kgdboc_get_char(void)
+{
+	return kgdb_tty_driver->poll_get_char(kgdb_tty_driver, kgdb_tty_line);
+}
+
+static void kgdboc_put_char(u8 chr)
+{
+	kgdb_tty_driver->poll_put_char(kgdb_tty_driver, kgdb_tty_line, chr);
+}
+
+static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+{
+	int len = strlen(kmessage);
+
+	if (len >= MAX_CONFIG_LEN) {
+		printk(KERN_ERR "kgdboc: config string too long\n");
+		return -ENOSPC;
+	}
+
+	/* Only copy in the string if the init function has not run yet */
+	if (configured < 0) {
+		strcpy(config, kmessage);
+		return 0;
+	}
+
+	if (kgdb_connected) {
+		printk(KERN_ERR
+		       "kgdboc: Cannot reconfigure while KGDB is connected.\n");
+
+		return -EBUSY;
+	}
+
+	strcpy(config, kmessage);
+	/* Chop out \n char as a result of echo */
+	if (config[len - 1] == '\n')
+		config[len - 1] = '\0';
+
+	if (configured == 1)
+		cleanup_kgdboc();
+
+	/* Go and configure with the new params. */
+	return configure_kgdboc();
+}
+
+static void kgdboc_pre_exp_handler(void)
+{
+	/* Increment the module count when the debugger is active */
+	if (!kgdb_connected)
+		try_module_get(THIS_MODULE);
+}
+
+static void kgdboc_post_exp_handler(void)
+{
+	/* decrement the module count when the debugger detaches */
+	if (!kgdb_connected)
+		module_put(THIS_MODULE);
+}
+
+static struct kgdb_io kgdboc_io_ops = {
+	.name			= "kgdboc",
+	.read_char		= kgdboc_get_char,
+	.write_char		= kgdboc_put_char,
+	.pre_exception		= kgdboc_pre_exp_handler,
+	.post_exception		= kgdboc_post_exp_handler,
+};
+
+module_init(init_kgdboc);
+module_exit(cleanup_kgdboc);
+module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
+MODULE_PARM_DESC(kgdboc, "<serial_device>[,baud]");
+MODULE_DESCRIPTION("KGDB Console TTY Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 0f5a179..c32c1ca 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1771,7 +1771,7 @@
 }
 #endif
 
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 /*
  *	uart_console_write - write a console message to a serial port
  *	@port: the port to write the message
@@ -1827,7 +1827,7 @@
  *	options.  The format of the string is <baud><parity><bits><flow>,
  *	eg: 115200n8r
  */
-void __init
+void
 uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow)
 {
 	char *s = options;
@@ -1842,6 +1842,7 @@
 	if (*s)
 		*flow = *s;
 }
+EXPORT_SYMBOL_GPL(uart_parse_options);
 
 struct baud_rates {
 	unsigned int rate;
@@ -1872,7 +1873,7 @@
  *	@bits: number of data bits
  *	@flow: flow control character - 'r' (rts)
  */
-int __init
+int
 uart_set_options(struct uart_port *port, struct console *co,
 		 int baud, int parity, int bits, int flow)
 {
@@ -1924,10 +1925,16 @@
 	port->mctrl |= TIOCM_DTR;
 
 	port->ops->set_termios(port, &termios, &dummy);
-	co->cflag = termios.c_cflag;
+	/*
+	 * Allow the setting of the UART parameters with a NULL console
+	 * too:
+	 */
+	if (co)
+		co->cflag = termios.c_cflag;
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(uart_set_options);
 #endif /* CONFIG_SERIAL_CORE_CONSOLE */
 
 static void uart_change_pm(struct uart_state *state, int pm_state)
@@ -2182,6 +2189,60 @@
 	}
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+
+static int uart_poll_init(struct tty_driver *driver, int line, char *options)
+{
+	struct uart_driver *drv = driver->driver_state;
+	struct uart_state *state = drv->state + line;
+	struct uart_port *port;
+	int baud = 9600;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+
+	if (!state || !state->port)
+		return -1;
+
+	port = state->port;
+	if (!(port->ops->poll_get_char && port->ops->poll_put_char))
+		return -1;
+
+	if (options) {
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+		return uart_set_options(port, NULL, baud, parity, bits, flow);
+	}
+
+	return 0;
+}
+
+static int uart_poll_get_char(struct tty_driver *driver, int line)
+{
+	struct uart_driver *drv = driver->driver_state;
+	struct uart_state *state = drv->state + line;
+	struct uart_port *port;
+
+	if (!state || !state->port)
+		return -1;
+
+	port = state->port;
+	return port->ops->poll_get_char(port);
+}
+
+static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+	struct uart_driver *drv = driver->driver_state;
+	struct uart_state *state = drv->state + line;
+	struct uart_port *port;
+
+	if (!state || !state->port)
+		return;
+
+	port = state->port;
+	port->ops->poll_put_char(port, ch);
+}
+#endif
+
 static const struct tty_operations uart_ops = {
 	.open		= uart_open,
 	.close		= uart_close,
@@ -2206,6 +2267,11 @@
 #endif
 	.tiocmget	= uart_tiocmget,
 	.tiocmset	= uart_tiocmset,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_init	= uart_poll_init,
+	.poll_get_char	= uart_poll_get_char,
+	.poll_put_char	= uart_poll_put_char,
+#endif
 };
 
 /**
diff --git a/fs/Kconfig b/fs/Kconfig
index c509123..028ae38 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -444,6 +444,32 @@
 	  For more information on OCFS2, see the file
 	  <file:Documentation/filesystems/ocfs2.txt>.
 
+config OCFS2_FS_O2CB
+	tristate "O2CB Kernelspace Clustering"
+	depends on OCFS2_FS
+	default y
+	help
+	  OCFS2 includes a simple kernelspace clustering package, the OCFS2
+	  Cluster Base.  It only requires a very small userspace component
+	  to configure it. This comes with the standard ocfs2-tools package.
+	  O2CB is limited to maintaining a cluster for OCFS2 file systems.
+	  It cannot manage any other cluster applications.
+
+	  It is always safe to say Y here, as the clustering method is
+	  run-time selectable.
+
+config OCFS2_FS_USERSPACE_CLUSTER
+	tristate "OCFS2 Userspace Clustering"
+	depends on OCFS2_FS && DLM
+	default y
+	help
+	  This option will allow OCFS2 to use userspace clustering services
+	  in conjunction with the DLM in fs/dlm.  If you are using a
+	  userspace cluster manager, say Y here.
+
+	  It is safe to say Y, as the clustering method is run-time
+	  selectable.
+
 config OCFS2_DEBUG_MASKLOG
 	bool "OCFS2 logging support"
 	depends on OCFS2_FS
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index de8e64c..7f7947e 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,6 +1,6 @@
 config GFS2_FS
 	tristate "GFS2 file system support"
-	depends on EXPERIMENTAL
+	depends on EXPERIMENTAL && (64BIT || (LSF && LBD))
 	select FS_POSIX_ACL
 	select CRC32
 	help
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile
index 8fff110..e2350df 100644
--- a/fs/gfs2/Makefile
+++ b/fs/gfs2/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_GFS2_FS) += gfs2.o
 gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
-	glops.o inode.o lm.o log.o lops.o locking.o main.o meta_io.o \
+	glops.o inode.o log.o lops.o locking.o main.o meta_io.o \
 	mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \
 	ops_fstype.o ops_inode.o ops_super.o quota.o \
 	recovery.o rgrp.o super.o sys.o trans.o util.o
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 1047a8c..3e9bd46 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -116,7 +116,7 @@
 		goto out;
 
 	er.er_data_len = GFS2_EA_DATA_LEN(el->el_ea);
-	er.er_data = kmalloc(er.er_data_len, GFP_KERNEL);
+	er.er_data = kmalloc(er.er_data_len, GFP_NOFS);
 	error = -ENOMEM;
 	if (!er.er_data)
 		goto out;
@@ -222,7 +222,7 @@
 		return error;
 	}
 
-	clone = posix_acl_clone(acl, GFP_KERNEL);
+	clone = posix_acl_clone(acl, GFP_NOFS);
 	error = -ENOMEM;
 	if (!clone)
 		goto out;
@@ -272,7 +272,7 @@
 	if (!acl)
 		return gfs2_setattr_simple(ip, attr);
 
-	clone = posix_acl_clone(acl, GFP_KERNEL);
+	clone = posix_acl_clone(acl, GFP_NOFS);
 	error = -ENOMEM;
 	if (!clone)
 		goto out;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index e9456eb..c19184f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -33,6 +33,7 @@
  * keep it small.
  */
 struct metapath {
+	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
 	__u16 mp_list[GFS2_MAX_META_HEIGHT];
 };
 
@@ -135,9 +136,10 @@
 		/* Get a free block, fill it with the stuffed data,
 		   and write it out to disk */
 
+		unsigned int n = 1;
+		block = gfs2_alloc_block(ip, &n);
 		if (isdir) {
-			block = gfs2_alloc_meta(ip);
-
+			gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
 			error = gfs2_dir_get_new_buffer(ip, block, &bh);
 			if (error)
 				goto out_brelse;
@@ -145,8 +147,6 @@
 					      dibh, sizeof(struct gfs2_dinode));
 			brelse(bh);
 		} else {
-			block = gfs2_alloc_data(ip);
-
 			error = gfs2_unstuffer_page(ip, dibh, block, page);
 			if (error)
 				goto out_brelse;
@@ -161,12 +161,11 @@
 
 	if (ip->i_di.di_size) {
 		*(__be64 *)(di + 1) = cpu_to_be64(block);
-		ip->i_di.di_blocks++;
-		gfs2_set_inode_blocks(&ip->i_inode);
-		di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
+		gfs2_add_inode_blocks(&ip->i_inode, 1);
+		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
 	}
 
-	ip->i_di.di_height = 1;
+	ip->i_height = 1;
 	di->di_height = cpu_to_be16(1);
 
 out_brelse:
@@ -176,114 +175,13 @@
 	return error;
 }
 
-/**
- * calc_tree_height - Calculate the height of a metadata tree
- * @ip: The GFS2 inode
- * @size: The proposed size of the file
- *
- * Work out how tall a metadata tree needs to be in order to accommodate a
- * file of a particular size. If size is less than the current size of
- * the inode, then the current size of the inode is used instead of the
- * supplied one.
- *
- * Returns: the height the tree should be
- */
-
-static unsigned int calc_tree_height(struct gfs2_inode *ip, u64 size)
-{
-	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	u64 *arr;
-	unsigned int max, height;
-
-	if (ip->i_di.di_size > size)
-		size = ip->i_di.di_size;
-
-	if (gfs2_is_dir(ip)) {
-		arr = sdp->sd_jheightsize;
-		max = sdp->sd_max_jheight;
-	} else {
-		arr = sdp->sd_heightsize;
-		max = sdp->sd_max_height;
-	}
-
-	for (height = 0; height < max; height++)
-		if (arr[height] >= size)
-			break;
-
-	return height;
-}
-
-/**
- * build_height - Build a metadata tree of the requested height
- * @ip: The GFS2 inode
- * @height: The height to build to
- *
- *
- * Returns: errno
- */
-
-static int build_height(struct inode *inode, unsigned height)
-{
-	struct gfs2_inode *ip = GFS2_I(inode);
-	unsigned new_height = height - ip->i_di.di_height;
-	struct buffer_head *dibh;
-	struct buffer_head *blocks[GFS2_MAX_META_HEIGHT];
-	struct gfs2_dinode *di;
-	int error;
-	__be64 *bp;
-	u64 bn;
-	unsigned n;
-
-	if (height <= ip->i_di.di_height)
-		return 0;
-
-	error = gfs2_meta_inode_buffer(ip, &dibh);
-	if (error)
-		return error;
-
-	for(n = 0; n < new_height; n++) {
-		bn = gfs2_alloc_meta(ip);
-		blocks[n] = gfs2_meta_new(ip->i_gl, bn);
-		gfs2_trans_add_bh(ip->i_gl, blocks[n], 1);
-	}
-
-	n = 0;
-	bn = blocks[0]->b_blocknr;
-	if (new_height > 1) {
-		for(; n < new_height-1; n++) {
-			gfs2_metatype_set(blocks[n], GFS2_METATYPE_IN,
-					  GFS2_FORMAT_IN);
-			gfs2_buffer_clear_tail(blocks[n],
-					       sizeof(struct gfs2_meta_header));
-			bp = (__be64 *)(blocks[n]->b_data +
-				     sizeof(struct gfs2_meta_header));
-			*bp = cpu_to_be64(blocks[n+1]->b_blocknr);
-			brelse(blocks[n]);
-			blocks[n] = NULL;
-		}
-	}
-	gfs2_metatype_set(blocks[n], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
-	gfs2_buffer_copy_tail(blocks[n], sizeof(struct gfs2_meta_header),
-			      dibh, sizeof(struct gfs2_dinode));
-	brelse(blocks[n]);
-	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-	di = (struct gfs2_dinode *)dibh->b_data;
-	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
-	*(__be64 *)(di + 1) = cpu_to_be64(bn);
-	ip->i_di.di_height += new_height;
-	ip->i_di.di_blocks += new_height;
-	gfs2_set_inode_blocks(&ip->i_inode);
-	di->di_height = cpu_to_be16(ip->i_di.di_height);
-	di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
-	brelse(dibh);
-	return error;
-}
 
 /**
  * find_metapath - Find path through the metadata tree
- * @ip: The inode pointer
+ * @sdp: The superblock
  * @mp: The metapath to return the result in
  * @block: The disk block to look up
+ * @height: The pre-calculated height of the metadata tree
  *
  *   This routine returns a struct metapath structure that defines a path
  *   through the metadata of inode "ip" to get to block "block".
@@ -338,21 +236,29 @@
  *
  */
 
-static void find_metapath(struct gfs2_inode *ip, u64 block,
-			  struct metapath *mp)
+static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
+			  struct metapath *mp, unsigned int height)
 {
-	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	u64 b = block;
 	unsigned int i;
 
-	for (i = ip->i_di.di_height; i--;)
-		mp->mp_list[i] = do_div(b, sdp->sd_inptrs);
+	for (i = height; i--;)
+		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
 
 }
 
+static inline unsigned int zero_metapath_length(const struct metapath *mp,
+						unsigned height)
+{
+	unsigned int i;
+	for (i = 0; i < height - 1; i++) {
+		if (mp->mp_list[i] != 0)
+			return i;
+	}
+	return height;
+}
+
 /**
  * metapointer - Return pointer to start of metadata in a buffer
- * @bh: The buffer
  * @height: The metadata height (0 = dinode)
  * @mp: The metapath
  *
@@ -361,93 +267,302 @@
  * metadata tree.
  */
 
-static inline __be64 *metapointer(struct buffer_head *bh, int *boundary,
-			       unsigned int height, const struct metapath *mp)
+static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
 {
+	struct buffer_head *bh = mp->mp_bh[height];
 	unsigned int head_size = (height > 0) ?
 		sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
-	__be64 *ptr;
-	*boundary = 0;
-	ptr = ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
-	if (ptr + 1 == (__be64 *)(bh->b_data + bh->b_size))
-		*boundary = 1;
-	return ptr;
+	return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
 }
 
 /**
- * lookup_block - Get the next metadata block in metadata tree
- * @ip: The GFS2 inode
- * @bh: Buffer containing the pointers to metadata blocks
- * @height: The height of the tree (0 = dinode)
+ * lookup_metapath - Walk the metadata tree to a specific point
+ * @ip: The inode
  * @mp: The metapath
- * @create: Non-zero if we may create a new meatdata block
- * @new: Used to indicate if we did create a new metadata block
- * @block: the returned disk block number
  *
- * Given a metatree, complete to a particular height, checks to see if the next
- * height of the tree exists. If not the next height of the tree is created.
- * The block number of the next height of the metadata tree is returned.
+ * Assumes that the inode's buffer has already been looked up and
+ * hooked onto mp->mp_bh[0] and that the metapath has been initialised
+ * by find_metapath().
  *
+ * If this function encounters part of the tree which has not been
+ * allocated, it returns the current height of the tree at the point
+ * at which it found the unallocated block. Blocks which are found are
+ * added to the mp->mp_bh[] list.
+ *
+ * Returns: error or height of metadata tree
  */
 
-static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
-			unsigned int height, struct metapath *mp, int create,
-			int *new, u64 *block)
+static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
 {
-	int boundary;
-	__be64 *ptr = metapointer(bh, &boundary, height, mp);
+	unsigned int end_of_metadata = ip->i_height - 1;
+	unsigned int x;
+	__be64 *ptr;
+	u64 dblock;
+	int ret;
 
-	if (*ptr) {
-		*block = be64_to_cpu(*ptr);
-		return boundary;
+	for (x = 0; x < end_of_metadata; x++) {
+		ptr = metapointer(x, mp);
+		dblock = be64_to_cpu(*ptr);
+		if (!dblock)
+			return x + 1;
+
+		ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, 0, &mp->mp_bh[x+1]);
+		if (ret)
+			return ret;
 	}
 
-	*block = 0;
-
-	if (!create)
-		return 0;
-
-	if (height == ip->i_di.di_height - 1 && !gfs2_is_dir(ip))
-		*block = gfs2_alloc_data(ip);
-	else
-		*block = gfs2_alloc_meta(ip);
-
-	gfs2_trans_add_bh(ip->i_gl, bh, 1);
-
-	*ptr = cpu_to_be64(*block);
-	ip->i_di.di_blocks++;
-	gfs2_set_inode_blocks(&ip->i_inode);
-
-	*new = 1;
-	return 0;
+	return ip->i_height;
 }
 
-static inline void bmap_lock(struct inode *inode, int create)
+static inline void release_metapath(struct metapath *mp)
 {
-	struct gfs2_inode *ip = GFS2_I(inode);
+	int i;
+
+	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
+		if (mp->mp_bh[i] == NULL)
+			break;
+		brelse(mp->mp_bh[i]);
+	}
+}
+
+/**
+ * gfs2_extent_length - Returns length of an extent of blocks
+ * @start: Start of the buffer
+ * @len: Length of the buffer in bytes
+ * @ptr: Current position in the buffer
+ * @limit: Max extent length to return (0 = unlimited)
+ * @eob: Set to 1 if we hit "end of block"
+ *
+ * If the first block is zero (unallocated) it will return the number of
+ * unallocated blocks in the extent, otherwise it will return the number
+ * of contiguous blocks in the extent.
+ *
+ * Returns: The length of the extent (minimum of one block)
+ */
+
+static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob)
+{
+	const __be64 *end = (start + len);
+	const __be64 *first = ptr;
+	u64 d = be64_to_cpu(*ptr);
+
+	*eob = 0;
+	do {
+		ptr++;
+		if (ptr >= end)
+			break;
+		if (limit && --limit == 0)
+			break;
+		if (d)
+			d++;
+	} while(be64_to_cpu(*ptr) == d);
+	if (ptr >= end)
+		*eob = 1;
+	return (ptr - first);
+}
+
+static inline void bmap_lock(struct gfs2_inode *ip, int create)
+{
 	if (create)
 		down_write(&ip->i_rw_mutex);
 	else
 		down_read(&ip->i_rw_mutex);
 }
 
-static inline void bmap_unlock(struct inode *inode, int create)
+static inline void bmap_unlock(struct gfs2_inode *ip, int create)
 {
-	struct gfs2_inode *ip = GFS2_I(inode);
 	if (create)
 		up_write(&ip->i_rw_mutex);
 	else
 		up_read(&ip->i_rw_mutex);
 }
 
+static inline __be64 *gfs2_indirect_init(struct metapath *mp,
+					 struct gfs2_glock *gl, unsigned int i,
+					 unsigned offset, u64 bn)
+{
+	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
+		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
+				 sizeof(struct gfs2_dinode)));
+	BUG_ON(i < 1);
+	BUG_ON(mp->mp_bh[i] != NULL);
+	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
+	gfs2_trans_add_bh(gl, mp->mp_bh[i], 1);
+	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
+	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
+	ptr += offset;
+	*ptr = cpu_to_be64(bn);
+	return ptr;
+}
+
+enum alloc_state {
+	ALLOC_DATA = 0,
+	ALLOC_GROW_DEPTH = 1,
+	ALLOC_GROW_HEIGHT = 2,
+	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
+};
+
+/**
+ * gfs2_bmap_alloc - Build a metadata tree of the requested height
+ * @inode: The GFS2 inode
+ * @lblock: The logical starting block of the extent
+ * @bh_map: This is used to return the mapping details
+ * @mp: The metapath
+ * @sheight: The starting height (i.e. whats already mapped)
+ * @height: The height to build to
+ * @maxlen: The max number of data blocks to alloc
+ *
+ * In this routine we may have to alloc:
+ *   i) Indirect blocks to grow the metadata tree height
+ *  ii) Indirect blocks to fill in lower part of the metadata tree
+ * iii) Data blocks
+ *
+ * The function is in two parts. The first part works out the total
+ * number of blocks which we need. The second part does the actual
+ * allocation asking for an extent at a time (if enough contiguous free
+ * blocks are available, there will only be one request per bmap call)
+ * and uses the state machine to initialise the blocks in order.
+ *
+ * Returns: errno on error
+ */
+
+static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
+			   struct buffer_head *bh_map, struct metapath *mp,
+			   const unsigned int sheight,
+			   const unsigned int height,
+			   const unsigned int maxlen)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_sbd *sdp = GFS2_SB(inode);
+	struct buffer_head *dibh = mp->mp_bh[0];
+	u64 bn, dblock = 0;
+	unsigned n, i, blks, alloced = 0, iblks = 0, zmpl = 0;
+	unsigned dblks = 0;
+	unsigned ptrs_per_blk;
+	const unsigned end_of_metadata = height - 1;
+	int eob = 0;
+	enum alloc_state state;
+	__be64 *ptr;
+	__be64 zero_bn = 0;
+
+	BUG_ON(sheight < 1);
+	BUG_ON(dibh == NULL);
+
+	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+
+	if (height == sheight) {
+		struct buffer_head *bh;
+		/* Bottom indirect block exists, find unalloced extent size */
+		ptr = metapointer(end_of_metadata, mp);
+		bh = mp->mp_bh[end_of_metadata];
+		dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
+					   &eob);
+		BUG_ON(dblks < 1);
+		state = ALLOC_DATA;
+	} else {
+		/* Need to allocate indirect blocks */
+		ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
+		dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
+		if (height == ip->i_height) {
+			/* Writing into existing tree, extend tree down */
+			iblks = height - sheight;
+			state = ALLOC_GROW_DEPTH;
+		} else {
+			/* Building up tree height */
+			state = ALLOC_GROW_HEIGHT;
+			iblks = height - ip->i_height;
+			zmpl = zero_metapath_length(mp, height);
+			iblks -= zmpl;
+			iblks += height;
+		}
+	}
+
+	/* start of the second part of the function (state machine) */
+
+	blks = dblks + iblks;
+	i = sheight;
+	do {
+		n = blks - alloced;
+		bn = gfs2_alloc_block(ip, &n);
+		alloced += n;
+		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
+			gfs2_trans_add_unrevoke(sdp, bn, n);
+		switch (state) {
+		/* Growing height of tree */
+		case ALLOC_GROW_HEIGHT:
+			if (i == 1) {
+				ptr = (__be64 *)(dibh->b_data +
+						 sizeof(struct gfs2_dinode));
+				zero_bn = *ptr;
+			}
+			for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
+				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
+			if (i - 1 == height - ip->i_height) {
+				i--;
+				gfs2_buffer_copy_tail(mp->mp_bh[i],
+						sizeof(struct gfs2_meta_header),
+						dibh, sizeof(struct gfs2_dinode));
+				gfs2_buffer_clear_tail(dibh,
+						sizeof(struct gfs2_dinode) +
+						sizeof(__be64));
+				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
+					sizeof(struct gfs2_meta_header));
+				*ptr = zero_bn;
+				state = ALLOC_GROW_DEPTH;
+				for(i = zmpl; i < height; i++) {
+					if (mp->mp_bh[i] == NULL)
+						break;
+					brelse(mp->mp_bh[i]);
+					mp->mp_bh[i] = NULL;
+				}
+				i = zmpl;
+			}
+			if (n == 0)
+				break;
+		/* Branching from existing tree */
+		case ALLOC_GROW_DEPTH:
+			if (i > 1 && i < height)
+				gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1);
+			for (; i < height && n > 0; i++, n--)
+				gfs2_indirect_init(mp, ip->i_gl, i,
+						   mp->mp_list[i-1], bn++);
+			if (i == height)
+				state = ALLOC_DATA;
+			if (n == 0)
+				break;
+		/* Tree complete, adding data blocks */
+		case ALLOC_DATA:
+			BUG_ON(n > dblks);
+			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
+			gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1);
+			dblks = n;
+			ptr = metapointer(end_of_metadata, mp);
+			dblock = bn;
+			while (n-- > 0)
+				*ptr++ = cpu_to_be64(bn++);
+			break;
+		}
+	} while (state != ALLOC_DATA);
+
+	ip->i_height = height;
+	gfs2_add_inode_blocks(&ip->i_inode, alloced);
+	gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
+	map_bh(bh_map, inode->i_sb, dblock);
+	bh_map->b_size = dblks << inode->i_blkbits;
+	set_buffer_new(bh_map);
+	return 0;
+}
+
 /**
  * gfs2_block_map - Map a block from an inode to a disk block
  * @inode: The inode
  * @lblock: The logical block number
  * @bh_map: The bh to be mapped
+ * @create: True if its ok to alloc blocks to satify the request
  *
- * Find the block number on the current device which corresponds to an
- * inode's block. If the block had to be created, "new" will be set.
+ * Sets buffer_mapped() if successful, sets buffer_boundary() if a
+ * read of metadata will be required before the next block can be
+ * mapped. Sets buffer_new() if new blocks were allocated.
  *
  * Returns: errno
  */
@@ -457,97 +572,78 @@
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	struct buffer_head *bh;
-	unsigned int bsize;
-	unsigned int height;
-	unsigned int end_of_metadata;
-	unsigned int x;
-	int error = 0;
-	int new = 0;
-	u64 dblock = 0;
-	int boundary;
-	unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
-	struct metapath mp;
+	unsigned int bsize = sdp->sd_sb.sb_bsize;
+	const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
+	const u64 *arr = sdp->sd_heightsize;
+	__be64 *ptr;
 	u64 size;
-	struct buffer_head *dibh = NULL;
+	struct metapath mp;
+	int ret;
+	int eob;
+	unsigned int len;
+	struct buffer_head *bh;
+	u8 height;
 
 	BUG_ON(maxlen == 0);
 
-	if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
-		return 0;
-
-	bmap_lock(inode, create);
+	memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
+	bmap_lock(ip, create);
 	clear_buffer_mapped(bh_map);
 	clear_buffer_new(bh_map);
 	clear_buffer_boundary(bh_map);
-	bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize;
+	if (gfs2_is_dir(ip)) {
+		bsize = sdp->sd_jbsize;
+		arr = sdp->sd_jheightsize;
+	}
+
+	ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
+	if (ret)
+		goto out;
+
+	height = ip->i_height;
 	size = (lblock + 1) * bsize;
+	while (size > arr[height])
+		height++;
+	find_metapath(sdp, lblock, &mp, height);
+	ret = 1;
+	if (height > ip->i_height || gfs2_is_stuffed(ip))
+		goto do_alloc;
+	ret = lookup_metapath(ip, &mp);
+	if (ret < 0)
+		goto out;
+	if (ret != ip->i_height)
+		goto do_alloc;
+	ptr = metapointer(ip->i_height - 1, &mp);
+	if (*ptr == 0)
+		goto do_alloc;
+	map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
+	bh = mp.mp_bh[ip->i_height - 1];
+	len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
+	bh_map->b_size = (len << inode->i_blkbits);
+	if (eob)
+		set_buffer_boundary(bh_map);
+	ret = 0;
+out:
+	release_metapath(&mp);
+	bmap_unlock(ip, create);
+	return ret;
 
-	if (size > ip->i_di.di_size) {
-		height = calc_tree_height(ip, size);
-		if (ip->i_di.di_height < height) {
-			if (!create)
-				goto out_ok;
-	
-			error = build_height(inode, height);
-			if (error)
-				goto out_fail;
-		}
+do_alloc:
+	/* All allocations are done here, firstly check create flag */
+	if (!create) {
+		BUG_ON(gfs2_is_stuffed(ip));
+		ret = 0;
+		goto out;
 	}
 
-	find_metapath(ip, lblock, &mp);
-	end_of_metadata = ip->i_di.di_height - 1;
-	error = gfs2_meta_inode_buffer(ip, &bh);
-	if (error)
-		goto out_fail;
-	dibh = bh;
-	get_bh(dibh);
-
-	for (x = 0; x < end_of_metadata; x++) {
-		lookup_block(ip, bh, x, &mp, create, &new, &dblock);
-		brelse(bh);
-		if (!dblock)
-			goto out_ok;
-
-		error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh);
-		if (error)
-			goto out_fail;
-	}
-
-	boundary = lookup_block(ip, bh, end_of_metadata, &mp, create, &new, &dblock);
-	if (dblock) {
-		map_bh(bh_map, inode->i_sb, dblock);
-		if (boundary)
-			set_buffer_boundary(bh_map);
-		if (new) {
-			gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-			gfs2_dinode_out(ip, dibh->b_data);
-			set_buffer_new(bh_map);
-			goto out_brelse;
-		}
-		while(--maxlen && !buffer_boundary(bh_map)) {
-			u64 eblock;
-
-			mp.mp_list[end_of_metadata]++;
-			boundary = lookup_block(ip, bh, end_of_metadata, &mp, 0, &new, &eblock);
-			if (eblock != ++dblock)
-				break;
-			bh_map->b_size += (1 << inode->i_blkbits);
-			if (boundary)
-				set_buffer_boundary(bh_map);
-		}
-	}
-out_brelse:
-	brelse(bh);
-out_ok:
-	error = 0;
-out_fail:
-	if (dibh)
-		brelse(dibh);
-	bmap_unlock(inode, create);
-	return error;
+	/* At this point ret is the tree depth of already allocated blocks */
+	ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
+	goto out;
 }
 
+/*
+ * Deprecated: do not use in new code
+ */
 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
 {
 	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
@@ -558,7 +654,7 @@
 	BUG_ON(!dblock);
 	BUG_ON(!new);
 
-	bh.b_size = 1 << (inode->i_blkbits + 5);
+	bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
 	ret = gfs2_block_map(inode, lblock, &bh, create);
 	*extlen = bh.b_size >> inode->i_blkbits;
 	*dblock = bh.b_blocknr;
@@ -621,7 +717,7 @@
 	if (error)
 		goto out;
 
-	if (height < ip->i_di.di_height - 1)
+	if (height < ip->i_height - 1)
 		for (; top < bottom; top++, first = 0) {
 			if (!*top)
 				continue;
@@ -679,7 +775,7 @@
 		sm->sm_first = 0;
 	}
 
-	metadata = (height != ip->i_di.di_height - 1);
+	metadata = (height != ip->i_height - 1);
 	if (metadata)
 		revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
 
@@ -713,7 +809,7 @@
 	else
 		goto out; /* Nothing to do */
 
-	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
+	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
 
 	for (x = 0; x < rlist.rl_rgrps; x++) {
 		struct gfs2_rgrpd *rgd;
@@ -760,10 +856,7 @@
 		}
 
 		*p = 0;
-		if (!ip->i_di.di_blocks)
-			gfs2_consist_inode(ip);
-		ip->i_di.di_blocks--;
-		gfs2_set_inode_blocks(&ip->i_inode);
+		gfs2_add_inode_blocks(&ip->i_inode, -1);
 	}
 	if (bstart) {
 		if (metadata)
@@ -804,19 +897,16 @@
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct gfs2_alloc *al;
 	struct buffer_head *dibh;
-	unsigned int h;
 	int error;
 
 	al = gfs2_alloc_get(ip);
+	if (!al)
+		return -ENOMEM;
 
-	error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+	error = gfs2_quota_lock_check(ip);
 	if (error)
 		goto out;
 
-	error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
-	if (error)
-		goto out_gunlock_q;
-
 	al->al_requested = sdp->sd_max_height + RES_DATA;
 
 	error = gfs2_inplace_reserve(ip);
@@ -829,34 +919,25 @@
 	if (error)
 		goto out_ipres;
 
+	error = gfs2_meta_inode_buffer(ip, &dibh);
+	if (error)
+		goto out_end_trans;
+
 	if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
 		if (gfs2_is_stuffed(ip)) {
 			error = gfs2_unstuff_dinode(ip, NULL);
 			if (error)
-				goto out_end_trans;
-		}
-
-		h = calc_tree_height(ip, size);
-		if (ip->i_di.di_height < h) {
-			down_write(&ip->i_rw_mutex);
-			error = build_height(&ip->i_inode, h);
-			up_write(&ip->i_rw_mutex);
-			if (error)
-				goto out_end_trans;
+				goto out_brelse;
 		}
 	}
 
 	ip->i_di.di_size = size;
 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
-
-	error = gfs2_meta_inode_buffer(ip, &dibh);
-	if (error)
-		goto out_end_trans;
-
 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 	gfs2_dinode_out(ip, dibh->b_data);
-	brelse(dibh);
 
+out_brelse:
+	brelse(dibh);
 out_end_trans:
 	gfs2_trans_end(sdp);
 out_ipres:
@@ -986,7 +1067,8 @@
 
 static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
 {
-	unsigned int height = ip->i_di.di_height;
+	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+	unsigned int height = ip->i_height;
 	u64 lblock;
 	struct metapath mp;
 	int error;
@@ -994,10 +1076,11 @@
 	if (!size)
 		lblock = 0;
 	else
-		lblock = (size - 1) >> GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize_shift;
+		lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
 
-	find_metapath(ip, lblock, &mp);
-	gfs2_alloc_get(ip);
+	find_metapath(sdp, lblock, &mp, ip->i_height);
+	if (!gfs2_alloc_get(ip))
+		return -ENOMEM;
 
 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
 	if (error)
@@ -1037,10 +1120,8 @@
 		goto out;
 
 	if (!ip->i_di.di_size) {
-		ip->i_di.di_height = 0;
-		ip->i_di.di_goal_meta =
-			ip->i_di.di_goal_data =
-			ip->i_no_addr;
+		ip->i_height = 0;
+		ip->i_goal = ip->i_no_addr;
 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 	}
 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
@@ -1197,10 +1278,9 @@
 			      unsigned int len, int *alloc_required)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	u64 lblock, lblock_stop, dblock;
-	u32 extlen;
-	int new = 0;
-	int error = 0;
+	struct buffer_head bh;
+	unsigned int shift;
+	u64 lblock, lblock_stop, size;
 
 	*alloc_required = 0;
 
@@ -1214,6 +1294,8 @@
 		return 0;
 	}
 
+	*alloc_required = 1;
+	shift = sdp->sd_sb.sb_bsize_shift;
 	if (gfs2_is_dir(ip)) {
 		unsigned int bsize = sdp->sd_jbsize;
 		lblock = offset;
@@ -1221,27 +1303,25 @@
 		lblock_stop = offset + len + bsize - 1;
 		do_div(lblock_stop, bsize);
 	} else {
-		unsigned int shift = sdp->sd_sb.sb_bsize_shift;
 		u64 end_of_file = (ip->i_di.di_size + sdp->sd_sb.sb_bsize - 1) >> shift;
 		lblock = offset >> shift;
 		lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
-		if (lblock_stop > end_of_file) {
-			*alloc_required = 1;
+		if (lblock_stop > end_of_file)
 			return 0;
-		}
 	}
 
-	for (; lblock < lblock_stop; lblock += extlen) {
-		error = gfs2_extent_map(&ip->i_inode, lblock, &new, &dblock, &extlen);
-		if (error)
-			return error;
-
-		if (!dblock) {
-			*alloc_required = 1;
+	size = (lblock_stop - lblock) << shift;
+	do {
+		bh.b_state = 0;
+		bh.b_size = size;
+		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
+		if (!buffer_mapped(&bh))
 			return 0;
-		}
-	}
+		size -= bh.b_size;
+		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
+	} while(size > 0);
 
+	*alloc_required = 0;
 	return 0;
 }
 
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index c347095..eed040d 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -159,6 +159,7 @@
 	unsigned int o;
 	int copied = 0;
 	int error = 0;
+	int new = 0;
 
 	if (!size)
 		return 0;
@@ -183,7 +184,6 @@
 	while (copied < size) {
 		unsigned int amount;
 		struct buffer_head *bh;
-		int new = 0;
 
 		amount = size - copied;
 		if (amount > sdp->sd_sb.sb_bsize - o)
@@ -757,7 +757,7 @@
 
 	if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
 		struct gfs2_leaf *leaf;
-		unsigned hsize = 1 << ip->i_di.di_depth;
+		unsigned hsize = 1 << ip->i_depth;
 		unsigned index;
 		u64 ln;
 		if (hsize * sizeof(u64) != ip->i_di.di_size) {
@@ -765,7 +765,7 @@
 			return ERR_PTR(-EIO);
 		}
 
-		index = name->hash >> (32 - ip->i_di.di_depth);
+		index = name->hash >> (32 - ip->i_depth);
 		error = get_first_leaf(ip, index, &bh);
 		if (error)
 			return ERR_PTR(error);
@@ -803,14 +803,15 @@
 static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth)
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
-	u64 bn = gfs2_alloc_meta(ip);
+	unsigned int n = 1;
+	u64 bn = gfs2_alloc_block(ip, &n);
 	struct buffer_head *bh = gfs2_meta_new(ip->i_gl, bn);
 	struct gfs2_leaf *leaf;
 	struct gfs2_dirent *dent;
 	struct qstr name = { .name = "", .len = 0, .hash = 0 };
 	if (!bh)
 		return NULL;
-
+	gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1);
 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
 	gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF);
 	leaf = (struct gfs2_leaf *)bh->b_data;
@@ -905,12 +906,11 @@
 		*lp = cpu_to_be64(bn);
 
 	dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2;
-	dip->i_di.di_blocks++;
-	gfs2_set_inode_blocks(&dip->i_inode);
+	gfs2_add_inode_blocks(&dip->i_inode, 1);
 	dip->i_di.di_flags |= GFS2_DIF_EXHASH;
 
 	for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
-	dip->i_di.di_depth = y;
+	dip->i_depth = y;
 
 	gfs2_dinode_out(dip, dibh->b_data);
 
@@ -941,7 +941,7 @@
 	int x, moved = 0;
 	int error;
 
-	index = name->hash >> (32 - dip->i_di.di_depth);
+	index = name->hash >> (32 - dip->i_depth);
 	error = get_leaf_nr(dip, index, &leaf_no);
 	if (error)
 		return error;
@@ -952,7 +952,7 @@
 		return error;
 
 	oleaf = (struct gfs2_leaf *)obh->b_data;
-	if (dip->i_di.di_depth == be16_to_cpu(oleaf->lf_depth)) {
+	if (dip->i_depth == be16_to_cpu(oleaf->lf_depth)) {
 		brelse(obh);
 		return 1; /* can't split */
 	}
@@ -967,10 +967,10 @@
 	bn = nbh->b_blocknr;
 
 	/*  Compute the start and len of leaf pointers in the hash table.  */
-	len = 1 << (dip->i_di.di_depth - be16_to_cpu(oleaf->lf_depth));
+	len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth));
 	half_len = len >> 1;
 	if (!half_len) {
-		printk(KERN_WARNING "di_depth %u lf_depth %u index %u\n", dip->i_di.di_depth, be16_to_cpu(oleaf->lf_depth), index);
+		printk(KERN_WARNING "i_depth %u lf_depth %u index %u\n", dip->i_depth, be16_to_cpu(oleaf->lf_depth), index);
 		gfs2_consist_inode(dip);
 		error = -EIO;
 		goto fail_brelse;
@@ -997,7 +997,7 @@
 	kfree(lp);
 
 	/*  Compute the divider  */
-	divider = (start + half_len) << (32 - dip->i_di.di_depth);
+	divider = (start + half_len) << (32 - dip->i_depth);
 
 	/*  Copy the entries  */
 	dirent_first(dip, obh, &dent);
@@ -1021,13 +1021,13 @@
 
 			new->de_inum = dent->de_inum; /* No endian worries */
 			new->de_type = dent->de_type; /* No endian worries */
-			nleaf->lf_entries = cpu_to_be16(be16_to_cpu(nleaf->lf_entries)+1);
+			be16_add_cpu(&nleaf->lf_entries, 1);
 
 			dirent_del(dip, obh, prev, dent);
 
 			if (!oleaf->lf_entries)
 				gfs2_consist_inode(dip);
-			oleaf->lf_entries = cpu_to_be16(be16_to_cpu(oleaf->lf_entries)-1);
+			be16_add_cpu(&oleaf->lf_entries, -1);
 
 			if (!prev)
 				prev = dent;
@@ -1044,8 +1044,7 @@
 	error = gfs2_meta_inode_buffer(dip, &dibh);
 	if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
 		gfs2_trans_add_bh(dip->i_gl, dibh, 1);
-		dip->i_di.di_blocks++;
-		gfs2_set_inode_blocks(&dip->i_inode);
+		gfs2_add_inode_blocks(&dip->i_inode, 1);
 		gfs2_dinode_out(dip, dibh->b_data);
 		brelse(dibh);
 	}
@@ -1082,7 +1081,7 @@
 	int x;
 	int error = 0;
 
-	hsize = 1 << dip->i_di.di_depth;
+	hsize = 1 << dip->i_depth;
 	if (hsize * sizeof(u64) != dip->i_di.di_size) {
 		gfs2_consist_inode(dip);
 		return -EIO;
@@ -1090,7 +1089,7 @@
 
 	/*  Allocate both the "from" and "to" buffers in one big chunk  */
 
-	buf = kcalloc(3, sdp->sd_hash_bsize, GFP_KERNEL | __GFP_NOFAIL);
+	buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL);
 
 	for (block = dip->i_di.di_size >> sdp->sd_hash_bsize_shift; block--;) {
 		error = gfs2_dir_read_data(dip, (char *)buf,
@@ -1125,7 +1124,7 @@
 
 	error = gfs2_meta_inode_buffer(dip, &dibh);
 	if (!gfs2_assert_withdraw(sdp, !error)) {
-		dip->i_di.di_depth++;
+		dip->i_depth++;
 		gfs2_dinode_out(dip, dibh->b_data);
 		brelse(dibh);
 	}
@@ -1370,16 +1369,16 @@
 	int error = 0;
 	unsigned depth = 0;
 
-	hsize = 1 << dip->i_di.di_depth;
+	hsize = 1 << dip->i_depth;
 	if (hsize * sizeof(u64) != dip->i_di.di_size) {
 		gfs2_consist_inode(dip);
 		return -EIO;
 	}
 
 	hash = gfs2_dir_offset2hash(*offset);
-	index = hash >> (32 - dip->i_di.di_depth);
+	index = hash >> (32 - dip->i_depth);
 
-	lp = kmalloc(sdp->sd_hash_bsize, GFP_KERNEL);
+	lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
 	if (!lp)
 		return -ENOMEM;
 
@@ -1405,7 +1404,7 @@
 		if (error)
 			break;
 
-		len = 1 << (dip->i_di.di_depth - depth);
+		len = 1 << (dip->i_depth - depth);
 		index = (index & ~(len - 1)) + len;
 	}
 
@@ -1444,7 +1443,7 @@
 
 	error = -ENOMEM;
 	/* 96 is max number of dirents which can be stuffed into an inode */
-	darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_KERNEL);
+	darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_NOFS);
 	if (darr) {
 		g.pdent = darr;
 		g.offset = 0;
@@ -1549,7 +1548,7 @@
 	u32 index;
 	u64 bn;
 
-	index = name->hash >> (32 - ip->i_di.di_depth);
+	index = name->hash >> (32 - ip->i_depth);
 	error = get_first_leaf(ip, index, &obh);
 	if (error)
 		return error;
@@ -1579,8 +1578,7 @@
 	if (error)
 		return error;
 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
-	ip->i_di.di_blocks++;
-	gfs2_set_inode_blocks(&ip->i_inode);
+	gfs2_add_inode_blocks(&ip->i_inode, 1);
 	gfs2_dinode_out(ip, bh->b_data);
 	brelse(bh);
 	return 0;
@@ -1616,7 +1614,7 @@
 			dent->de_type = cpu_to_be16(type);
 			if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
 				leaf = (struct gfs2_leaf *)bh->b_data;
-				leaf->lf_entries = cpu_to_be16(be16_to_cpu(leaf->lf_entries) + 1);
+				be16_add_cpu(&leaf->lf_entries, 1);
 			}
 			brelse(bh);
 			error = gfs2_meta_inode_buffer(ip, &bh);
@@ -1641,7 +1639,7 @@
 			continue;
 		if (error < 0)
 			break;
-		if (ip->i_di.di_depth < GFS2_DIR_MAX_DEPTH) {
+		if (ip->i_depth < GFS2_DIR_MAX_DEPTH) {
 			error = dir_double_exhash(ip);
 			if (error)
 				break;
@@ -1785,13 +1783,13 @@
 	u64 leaf_no;
 	int error = 0;
 
-	hsize = 1 << dip->i_di.di_depth;
+	hsize = 1 << dip->i_depth;
 	if (hsize * sizeof(u64) != dip->i_di.di_size) {
 		gfs2_consist_inode(dip);
 		return -EIO;
 	}
 
-	lp = kmalloc(sdp->sd_hash_bsize, GFP_KERNEL);
+	lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
 	if (!lp)
 		return -ENOMEM;
 
@@ -1817,7 +1815,7 @@
 			if (error)
 				goto out;
 			leaf = (struct gfs2_leaf *)bh->b_data;
-			len = 1 << (dip->i_di.di_depth - be16_to_cpu(leaf->lf_depth));
+			len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth));
 			brelse(bh);
 
 			error = lc(dip, index, len, leaf_no, data);
@@ -1866,15 +1864,18 @@
 
 	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 
-	ht = kzalloc(size, GFP_KERNEL);
+	ht = kzalloc(size, GFP_NOFS);
 	if (!ht)
 		return -ENOMEM;
 
-	gfs2_alloc_get(dip);
+	if (!gfs2_alloc_get(dip)) {
+		error = -ENOMEM;
+		goto out;
+	}
 
 	error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
 	if (error)
-		goto out;
+		goto out_put;
 
 	error = gfs2_rindex_hold(sdp, &dip->i_alloc->al_ri_gh);
 	if (error)
@@ -1894,7 +1895,7 @@
 		l_blocks++;
 	}
 
-	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
+	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
 
 	for (x = 0; x < rlist.rl_rgrps; x++) {
 		struct gfs2_rgrpd *rgd;
@@ -1921,11 +1922,7 @@
 		brelse(bh);
 
 		gfs2_free_meta(dip, blk, 1);
-
-		if (!dip->i_di.di_blocks)
-			gfs2_consist_inode(dip);
-		dip->i_di.di_blocks--;
-		gfs2_set_inode_blocks(&dip->i_inode);
+		gfs2_add_inode_blocks(&dip->i_inode, -1);
 	}
 
 	error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
@@ -1952,8 +1949,9 @@
 	gfs2_glock_dq_uninit(&dip->i_alloc->al_ri_gh);
 out_qs:
 	gfs2_quota_unhold(dip);
-out:
+out_put:
 	gfs2_alloc_put(dip);
+out:
 	kfree(ht);
 	return error;
 }
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
index bee9970..e3f76f4 100644
--- a/fs/gfs2/eattr.c
+++ b/fs/gfs2/eattr.c
@@ -277,10 +277,7 @@
 		}
 
 		*dataptrs = 0;
-		if (!ip->i_di.di_blocks)
-			gfs2_consist_inode(ip);
-		ip->i_di.di_blocks--;
-		gfs2_set_inode_blocks(&ip->i_inode);
+		gfs2_add_inode_blocks(&ip->i_inode, -1);
 	}
 	if (bstart)
 		gfs2_free_meta(ip, bstart, blen);
@@ -321,6 +318,8 @@
 	int error;
 
 	al = gfs2_alloc_get(ip);
+	if (!al)
+		return -ENOMEM;
 
 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
 	if (error)
@@ -449,7 +448,7 @@
 	unsigned int x;
 	int error = 0;
 
-	bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
+	bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
 	if (!bh)
 		return -ENOMEM;
 
@@ -582,10 +581,11 @@
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct gfs2_ea_header *ea;
+	unsigned int n = 1;
 	u64 block;
 
-	block = gfs2_alloc_meta(ip);
-
+	block = gfs2_alloc_block(ip, &n);
+	gfs2_trans_add_unrevoke(sdp, block, 1);
 	*bhp = gfs2_meta_new(ip->i_gl, block);
 	gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
 	gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
@@ -597,8 +597,7 @@
 	ea->ea_flags = GFS2_EAFLAG_LAST;
 	ea->ea_num_ptrs = 0;
 
-	ip->i_di.di_blocks++;
-	gfs2_set_inode_blocks(&ip->i_inode);
+	gfs2_add_inode_blocks(&ip->i_inode, 1);
 
 	return 0;
 }
@@ -642,15 +641,15 @@
 			struct buffer_head *bh;
 			u64 block;
 			int mh_size = sizeof(struct gfs2_meta_header);
+			unsigned int n = 1;
 
-			block = gfs2_alloc_meta(ip);
-
+			block = gfs2_alloc_block(ip, &n);
+			gfs2_trans_add_unrevoke(sdp, block, 1);
 			bh = gfs2_meta_new(ip->i_gl, block);
 			gfs2_trans_add_bh(ip->i_gl, bh, 1);
 			gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
 
-			ip->i_di.di_blocks++;
-			gfs2_set_inode_blocks(&ip->i_inode);
+			gfs2_add_inode_blocks(&ip->i_inode, 1);
 
 			copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
 							   data_len;
@@ -684,15 +683,13 @@
 	int error;
 
 	al = gfs2_alloc_get(ip);
+	if (!al)
+		return -ENOMEM;
 
-	error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+	error = gfs2_quota_lock_check(ip);
 	if (error)
 		goto out;
 
-	error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
-	if (error)
-		goto out_gunlock_q;
-
 	al->al_requested = blks;
 
 	error = gfs2_inplace_reserve(ip);
@@ -966,9 +963,9 @@
 		gfs2_trans_add_bh(ip->i_gl, indbh, 1);
 	} else {
 		u64 blk;
-
-		blk = gfs2_alloc_meta(ip);
-
+		unsigned int n = 1;
+		blk = gfs2_alloc_block(ip, &n);
+		gfs2_trans_add_unrevoke(sdp, blk, 1);
 		indbh = gfs2_meta_new(ip->i_gl, blk);
 		gfs2_trans_add_bh(ip->i_gl, indbh, 1);
 		gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
@@ -978,8 +975,7 @@
 		*eablk = cpu_to_be64(ip->i_di.di_eattr);
 		ip->i_di.di_eattr = blk;
 		ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
-		ip->i_di.di_blocks++;
-		gfs2_set_inode_blocks(&ip->i_inode);
+		gfs2_add_inode_blocks(&ip->i_inode, 1);
 
 		eablk++;
 	}
@@ -1210,7 +1206,7 @@
 	unsigned int x;
 	int error;
 
-	bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
+	bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
 	if (!bh)
 		return -ENOMEM;
 
@@ -1347,7 +1343,7 @@
 	else
 		goto out;
 
-	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
+	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
 
 	for (x = 0; x < rlist.rl_rgrps; x++) {
 		struct gfs2_rgrpd *rgd;
@@ -1387,10 +1383,7 @@
 		}
 
 		*eablk = 0;
-		if (!ip->i_di.di_blocks)
-			gfs2_consist_inode(ip);
-		ip->i_di.di_blocks--;
-		gfs2_set_inode_blocks(&ip->i_inode);
+		gfs2_add_inode_blocks(&ip->i_inode, -1);
 	}
 	if (bstart)
 		gfs2_free_meta(ip, bstart, blen);
@@ -1442,10 +1435,7 @@
 	gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
 
 	ip->i_di.di_eattr = 0;
-	if (!ip->i_di.di_blocks)
-		gfs2_consist_inode(ip);
-	ip->i_di.di_blocks--;
-	gfs2_set_inode_blocks(&ip->i_inode);
+	gfs2_add_inode_blocks(&ip->i_inode, -1);
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (!error) {
@@ -1474,6 +1464,8 @@
 	int error;
 
 	al = gfs2_alloc_get(ip);
+	if (!al)
+		return -ENOMEM;
 
 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
 	if (error)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 7175a4d..d636b3e 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -35,7 +35,6 @@
 #include "glock.h"
 #include "glops.h"
 #include "inode.h"
-#include "lm.h"
 #include "lops.h"
 #include "meta_io.h"
 #include "quota.h"
@@ -183,7 +182,8 @@
 	struct gfs2_sbd *sdp = gl->gl_sbd;
 	struct inode *aspace = gl->gl_aspace;
 
-	gfs2_lm_put_lock(sdp, gl->gl_lock);
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
 
 	if (aspace)
 		gfs2_aspace_put(aspace);
@@ -197,7 +197,7 @@
  *
  */
 
-void gfs2_glock_hold(struct gfs2_glock *gl)
+static void gfs2_glock_hold(struct gfs2_glock *gl)
 {
 	atomic_inc(&gl->gl_ref);
 }
@@ -293,6 +293,16 @@
 	gfs2_glock_put(gl);
 }
 
+static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
+		     void **lockp)
+{
+	int error = -EIO;
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
+				sdp->sd_lockstruct.ls_lockspace, name, lockp);
+	return error;
+}
+
 /**
  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  * @sdp: The GFS2 superblock
@@ -338,8 +348,6 @@
 	gl->gl_ip = 0;
 	gl->gl_ops = glops;
 	gl->gl_req_gh = NULL;
-	gl->gl_req_bh = NULL;
-	gl->gl_vn = 0;
 	gl->gl_stamp = jiffies;
 	gl->gl_tchange = jiffies;
 	gl->gl_object = NULL;
@@ -595,11 +603,12 @@
 			blocked = rq_mutex(gh);
 		} else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
 			blocked = rq_demote(gl);
-			if (gl->gl_waiters2 && !blocked) {
+			if (test_bit(GLF_WAITERS2, &gl->gl_flags) &&
+				     !blocked) {
 				set_bit(GLF_DEMOTE, &gl->gl_flags);
 				gl->gl_demote_state = LM_ST_UNLOCKED;
 			}
-			gl->gl_waiters2 = 0;
+			clear_bit(GLF_WAITERS2, &gl->gl_flags);
 		} else if (!list_empty(&gl->gl_waiters3)) {
 			gh = list_entry(gl->gl_waiters3.next,
 					struct gfs2_holder, gh_list);
@@ -710,7 +719,7 @@
 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
 			gl->gl_demote_state != state) {
 		if (test_bit(GLF_DEMOTE_IN_PROGRESS,  &gl->gl_flags)) 
-			gl->gl_waiters2 = 1;
+			set_bit(GLF_WAITERS2, &gl->gl_flags);
 		else 
 			gl->gl_demote_state = LM_ST_UNLOCKED;
 	}
@@ -743,6 +752,43 @@
 }
 
 /**
+ * drop_bh - Called after a lock module unlock completes
+ * @gl: the glock
+ * @ret: the return status
+ *
+ * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
+ * Doesn't drop the reference on the glock the top half took out
+ *
+ */
+
+static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
+{
+	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_holder *gh = gl->gl_req_gh;
+
+	gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
+	gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
+	gfs2_assert_warn(sdp, !ret);
+
+	state_change(gl, LM_ST_UNLOCKED);
+
+	if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) {
+		spin_lock(&gl->gl_spin);
+		gh->gh_error = 0;
+		spin_unlock(&gl->gl_spin);
+		gfs2_glock_xmote_th(gl, gl->gl_req_gh);
+		gfs2_glock_put(gl);
+		return;
+	}
+
+	spin_lock(&gl->gl_spin);
+	gfs2_demote_wake(gl);
+	clear_bit(GLF_LOCK, &gl->gl_flags);
+	spin_unlock(&gl->gl_spin);
+	gfs2_glock_put(gl);
+}
+
+/**
  * xmote_bh - Called after the lock module is done acquiring a lock
  * @gl: The glock in question
  * @ret: the int returned from the lock module
@@ -754,25 +800,19 @@
 	struct gfs2_sbd *sdp = gl->gl_sbd;
 	const struct gfs2_glock_operations *glops = gl->gl_ops;
 	struct gfs2_holder *gh = gl->gl_req_gh;
-	int prev_state = gl->gl_state;
 	int op_done = 1;
 
+	if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
+		drop_bh(gl, ret);
+		return;
+	}
+
 	gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
 	gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
 	gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
 
 	state_change(gl, ret & LM_OUT_ST_MASK);
 
-	if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
-		if (glops->go_inval)
-			glops->go_inval(gl, DIO_METADATA);
-	} else if (gl->gl_state == LM_ST_DEFERRED) {
-		/* We might not want to do this here.
-		   Look at moving to the inode glops. */
-		if (glops->go_inval)
-			glops->go_inval(gl, 0);
-	}
-
 	/*  Deal with each possible exit condition  */
 
 	if (!gh) {
@@ -782,7 +822,6 @@
 		} else {
 			spin_lock(&gl->gl_spin);
 			if (gl->gl_state != gl->gl_demote_state) {
-				gl->gl_req_bh = NULL;
 				spin_unlock(&gl->gl_spin);
 				gfs2_glock_drop_th(gl);
 				gfs2_glock_put(gl);
@@ -793,6 +832,14 @@
 		}
 	} else {
 		spin_lock(&gl->gl_spin);
+		if (ret & LM_OUT_CONV_DEADLK) {
+			gh->gh_error = 0;
+			set_bit(GLF_CONV_DEADLK, &gl->gl_flags);
+			spin_unlock(&gl->gl_spin);
+			gfs2_glock_drop_th(gl);
+			gfs2_glock_put(gl);
+			return;
+		}
 		list_del_init(&gh->gh_list);
 		gh->gh_error = -EIO;
 		if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
@@ -824,7 +871,6 @@
 	if (op_done) {
 		spin_lock(&gl->gl_spin);
 		gl->gl_req_gh = NULL;
-		gl->gl_req_bh = NULL;
 		clear_bit(GLF_LOCK, &gl->gl_flags);
 		spin_unlock(&gl->gl_spin);
 	}
@@ -835,6 +881,17 @@
 		gfs2_holder_wake(gh);
 }
 
+static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
+				 unsigned int cur_state, unsigned int req_state,
+				 unsigned int flags)
+{
+	int ret = 0;
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
+							 req_state, flags);
+	return ret;
+}
+
 /**
  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
  * @gl: The glock in question
@@ -856,6 +913,8 @@
 
 	if (glops->go_xmote_th)
 		glops->go_xmote_th(gl);
+	if (state == LM_ST_DEFERRED && glops->go_inval)
+		glops->go_inval(gl, DIO_METADATA);
 
 	gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
 	gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
@@ -863,7 +922,6 @@
 	gfs2_assert_warn(sdp, state != gl->gl_state);
 
 	gfs2_glock_hold(gl);
-	gl->gl_req_bh = xmote_bh;
 
 	lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
 
@@ -876,49 +934,13 @@
 		xmote_bh(gl, lck_ret);
 }
 
-/**
- * drop_bh - Called after a lock module unlock completes
- * @gl: the glock
- * @ret: the return status
- *
- * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
- * Doesn't drop the reference on the glock the top half took out
- *
- */
-
-static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
+static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
+				   unsigned int cur_state)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
-	const struct gfs2_glock_operations *glops = gl->gl_ops;
-	struct gfs2_holder *gh = gl->gl_req_gh;
-
-	gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
-	gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
-	gfs2_assert_warn(sdp, !ret);
-
-	state_change(gl, LM_ST_UNLOCKED);
-
-	if (glops->go_inval)
-		glops->go_inval(gl, DIO_METADATA);
-
-	if (gh) {
-		spin_lock(&gl->gl_spin);
-		list_del_init(&gh->gh_list);
-		gh->gh_error = 0;
-		spin_unlock(&gl->gl_spin);
-	}
-
-	spin_lock(&gl->gl_spin);
-	gfs2_demote_wake(gl);
-	gl->gl_req_gh = NULL;
-	gl->gl_req_bh = NULL;
-	clear_bit(GLF_LOCK, &gl->gl_flags);
-	spin_unlock(&gl->gl_spin);
-
-	gfs2_glock_put(gl);
-
-	if (gh)
-		gfs2_holder_wake(gh);
+	int ret = 0;
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		ret =  sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
+	return ret;
 }
 
 /**
@@ -935,13 +957,14 @@
 
 	if (glops->go_xmote_th)
 		glops->go_xmote_th(gl);
+	if (glops->go_inval)
+		glops->go_inval(gl, DIO_METADATA);
 
 	gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
 	gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
 	gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
 
 	gfs2_glock_hold(gl);
-	gl->gl_req_bh = drop_bh;
 
 	ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
 
@@ -964,16 +987,17 @@
 static void do_cancels(struct gfs2_holder *gh)
 {
 	struct gfs2_glock *gl = gh->gh_gl;
+	struct gfs2_sbd *sdp = gl->gl_sbd;
 
 	spin_lock(&gl->gl_spin);
 
 	while (gl->gl_req_gh != gh &&
 	       !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
 	       !list_empty(&gh->gh_list)) {
-		if (gl->gl_req_bh && !(gl->gl_req_gh &&
-				     (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
+		if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
 			spin_unlock(&gl->gl_spin);
-			gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
+			if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+				sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
 			msleep(100);
 			spin_lock(&gl->gl_spin);
 		} else {
@@ -1041,7 +1065,6 @@
 
 		spin_lock(&gl->gl_spin);
 		gl->gl_req_gh = NULL;
-		gl->gl_req_bh = NULL;
 		clear_bit(GLF_LOCK, &gl->gl_flags);
 		run_queue(gl);
 		spin_unlock(&gl->gl_spin);
@@ -1428,6 +1451,14 @@
 		gfs2_glock_dq_uninit(&ghs[x]);
 }
 
+static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
+{
+	int error = -EIO;
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
+	return error;
+}
+
 /**
  * gfs2_lvb_hold - attach a LVB from a glock
  * @gl: The glock in question
@@ -1463,12 +1494,15 @@
 
 void gfs2_lvb_unhold(struct gfs2_glock *gl)
 {
+	struct gfs2_sbd *sdp = gl->gl_sbd;
+
 	gfs2_glock_hold(gl);
 	gfs2_glmutex_lock(gl);
 
 	gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
 	if (atomic_dec_and_test(&gl->gl_lvb_count)) {
-		gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
+		if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+			sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
 		gl->gl_lvb = NULL;
 		gfs2_glock_put(gl);
 	}
@@ -1534,8 +1568,7 @@
 		gl = gfs2_glock_find(sdp, &async->lc_name);
 		if (gfs2_assert_warn(sdp, gl))
 			return;
-		if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
-			gl->gl_req_bh(gl, async->lc_ret);
+		xmote_bh(gl, async->lc_ret);
 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 			gfs2_glock_put(gl);
 		up_read(&gfs2_umount_flush_sem);
@@ -1594,10 +1627,10 @@
 		gfs2_glock_hold(gl);
 		list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
 		atomic_inc(&sdp->sd_reclaim_count);
-	}
-	spin_unlock(&sdp->sd_reclaim_lock);
-
-	wake_up(&sdp->sd_reclaim_wq);
+		spin_unlock(&sdp->sd_reclaim_lock);
+		wake_up(&sdp->sd_reclaim_wq);
+	} else
+		spin_unlock(&sdp->sd_reclaim_lock);
 }
 
 /**
@@ -1897,7 +1930,6 @@
 		print_dbg(gi, "  gl_owner = -1\n");
 	print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
 	print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
-	print_dbg(gi, "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
 	print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
 	print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
 	print_dbg(gi, "  reclaim = %s\n",
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 2f9c6d1..cdad3e6 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -32,24 +32,23 @@
 #define GLR_TRYFAILED		13
 #define GLR_CANCELED		14
 
-static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
+static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
 {
 	struct gfs2_holder *gh;
-	int locked = 0;
 	struct pid *pid;
 
 	/* Look in glock's list of holders for one with current task as owner */
 	spin_lock(&gl->gl_spin);
 	pid = task_pid(current);
 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
-		if (gh->gh_owner_pid == pid) {
-			locked = 1;
-			break;
-		}
+		if (gh->gh_owner_pid == pid)
+			goto out;
 	}
+	gh = NULL;
+out:
 	spin_unlock(&gl->gl_spin);
 
-	return locked;
+	return gh;
 }
 
 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
@@ -79,7 +78,6 @@
 int gfs2_glock_get(struct gfs2_sbd *sdp,
 		   u64 number, const struct gfs2_glock_operations *glops,
 		   int create, struct gfs2_glock **glp);
-void gfs2_glock_hold(struct gfs2_glock *gl);
 int gfs2_glock_put(struct gfs2_glock *gl);
 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
 		      struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index c663b7a..d31bada 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -126,7 +126,13 @@
 		return;
 
 	gfs2_meta_inval(gl);
-	gl->gl_vn++;
+	if (gl->gl_object == GFS2_I(gl->gl_sbd->sd_rindex))
+		gl->gl_sbd->sd_rindex_uptodate = 0;
+	else if (gl->gl_ops == &gfs2_rgrp_glops && gl->gl_object) {
+		struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
+
+		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
+	}
 }
 
 /**
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 525dcae..9c2c0b9 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -44,7 +44,6 @@
 
 struct gfs2_log_operations {
 	void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le);
-	void (*lo_incore_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
 	void (*lo_before_commit) (struct gfs2_sbd *sdp);
 	void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
 	void (*lo_before_scan) (struct gfs2_jdesc *jd,
@@ -70,7 +69,6 @@
 };
 
 struct gfs2_rgrp_host {
-	u32 rg_flags;
 	u32 rg_free;
 	u32 rg_dinodes;
 	u64 rg_igeneration;
@@ -87,17 +85,17 @@
 	u32 rd_data;			/* num of data blocks in rgrp */
 	u32 rd_bitbytes;		/* number of bytes in data bitmaps */
 	struct gfs2_rgrp_host rd_rg;
-	u64 rd_rg_vn;
 	struct gfs2_bitmap *rd_bits;
 	unsigned int rd_bh_count;
 	struct mutex rd_mutex;
 	u32 rd_free_clone;
 	struct gfs2_log_element rd_le;
-	u32 rd_last_alloc_data;
-	u32 rd_last_alloc_meta;
+	u32 rd_last_alloc;
 	struct gfs2_sbd *rd_sbd;
-	unsigned long rd_flags;
-#define GFS2_RDF_CHECK        0x0001          /* Need to check for unlinked inodes */
+	unsigned char rd_flags;
+#define GFS2_RDF_CHECK        0x01      /* Need to check for unlinked inodes */
+#define GFS2_RDF_NOALLOC      0x02      /* rg prohibits allocation */
+#define GFS2_RDF_UPTODATE     0x04      /* rg is up to date */
 };
 
 enum gfs2_state_bits {
@@ -168,6 +166,8 @@
 	GLF_DIRTY		= 5,
 	GLF_DEMOTE_IN_PROGRESS	= 6,
 	GLF_LFLUSH		= 7,
+	GLF_WAITERS2		= 8,
+	GLF_CONV_DEADLK		= 9,
 };
 
 struct gfs2_glock {
@@ -187,18 +187,15 @@
 	struct list_head gl_holders;
 	struct list_head gl_waiters1;	/* HIF_MUTEX */
 	struct list_head gl_waiters3;	/* HIF_PROMOTE */
-	int gl_waiters2;		/* GIF_DEMOTE */
 
 	const struct gfs2_glock_operations *gl_ops;
 
 	struct gfs2_holder *gl_req_gh;
-	gfs2_glop_bh_t gl_req_bh;
 
 	void *gl_lock;
 	char *gl_lvb;
 	atomic_t gl_lvb_count;
 
-	u64 gl_vn;
 	unsigned long gl_stamp;
 	unsigned long gl_tchange;
 	void *gl_object;
@@ -213,6 +210,8 @@
 	struct delayed_work gl_work;
 };
 
+#define GFS2_MIN_LVB_SIZE 32	/* Min size of LVB that gfs2 supports */
+
 struct gfs2_alloc {
 	/* Quota stuff */
 
@@ -241,14 +240,9 @@
 
 struct gfs2_dinode_host {
 	u64 di_size;		/* number of bytes in file */
-	u64 di_blocks;		/* number of blocks in file */
-	u64 di_goal_meta;	/* rgrp to alloc from next */
-	u64 di_goal_data;	/* data block goal */
 	u64 di_generation;	/* generation number for NFS */
 	u32 di_flags;		/* GFS2_DIF_... */
-	u16 di_height;		/* height of metadata */
 	/* These only apply to directories  */
-	u16 di_depth;		/* Number of bits in the table */
 	u32 di_entries;		/* The number of entries in the directory */
 	u64 di_eattr;		/* extended attribute block number */
 };
@@ -265,9 +259,10 @@
 	struct gfs2_holder i_iopen_gh;
 	struct gfs2_holder i_gh; /* for prepare/commit_write only */
 	struct gfs2_alloc *i_alloc;
-	u64 i_last_rg_alloc;
-
+	u64 i_goal;	/* goal block for allocations */
 	struct rw_semaphore i_rw_mutex;
+	u8 i_height;
+	u8 i_depth;
 };
 
 /*
@@ -490,9 +485,9 @@
 	u32 sd_qc_per_block;
 	u32 sd_max_dirres;	/* Max blocks needed to add a directory entry */
 	u32 sd_max_height;	/* Max height of a file's metadata tree */
-	u64 sd_heightsize[GFS2_MAX_META_HEIGHT];
+	u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
 	u32 sd_max_jheight; /* Max height of journaled file's meta tree */
-	u64 sd_jheightsize[GFS2_MAX_META_HEIGHT];
+	u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
 
 	struct gfs2_args sd_args;	/* Mount arguments */
 	struct gfs2_tune sd_tune;	/* Filesystem tuning structure */
@@ -533,7 +528,7 @@
 
 	/* Resource group stuff */
 
-	u64 sd_rindex_vn;
+	int sd_rindex_uptodate;
 	spinlock_t sd_rindex_spin;
 	struct mutex sd_rindex_mutex;
 	struct list_head sd_rindex_list;
@@ -637,9 +632,6 @@
 
 	/* Counters */
 
-	atomic_t sd_glock_count;
-	atomic_t sd_glock_held_count;
-	atomic_t sd_inode_count;
 	atomic_t sd_reclaimed;
 
 	char sd_fsname[GFS2_FSNAME_LEN];
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 37725ad..3a9ef52 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -149,7 +149,8 @@
 	} else if (S_ISLNK(mode)) {
 		inode->i_op = &gfs2_symlink_iops;
 	} else {
-		inode->i_op = &gfs2_dev_iops;
+		inode->i_op = &gfs2_file_iops;
+		init_special_inode(inode, inode->i_mode, inode->i_rdev);
 	}
 
 	unlock_new_inode(inode);
@@ -248,12 +249,10 @@
 {
 	struct gfs2_dinode_host *di = &ip->i_di;
 	const struct gfs2_dinode *str = buf;
+	u16 height, depth;
 
-	if (ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)) {
-		if (gfs2_consist_inode(ip))
-			gfs2_dinode_print(ip);
-		return -EIO;
-	}
+	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
+		goto corrupt;
 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
 	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
 	ip->i_inode.i_rdev = 0;
@@ -275,8 +274,7 @@
 	ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
 	di->di_size = be64_to_cpu(str->di_size);
 	i_size_write(&ip->i_inode, di->di_size);
-	di->di_blocks = be64_to_cpu(str->di_blocks);
-	gfs2_set_inode_blocks(&ip->i_inode);
+	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
 	ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
 	ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
 	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
@@ -284,15 +282,20 @@
 	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
 	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
 
-	di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
-	di->di_goal_data = be64_to_cpu(str->di_goal_data);
+	ip->i_goal = be64_to_cpu(str->di_goal_meta);
 	di->di_generation = be64_to_cpu(str->di_generation);
 
 	di->di_flags = be32_to_cpu(str->di_flags);
 	gfs2_set_inode_flags(&ip->i_inode);
-	di->di_height = be16_to_cpu(str->di_height);
+	height = be16_to_cpu(str->di_height);
+	if (unlikely(height > GFS2_MAX_META_HEIGHT))
+		goto corrupt;
+	ip->i_height = (u8)height;
 
-	di->di_depth = be16_to_cpu(str->di_depth);
+	depth = be16_to_cpu(str->di_depth);
+	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
+		goto corrupt;
+	ip->i_depth = (u8)depth;
 	di->di_entries = be32_to_cpu(str->di_entries);
 
 	di->di_eattr = be64_to_cpu(str->di_eattr);
@@ -300,6 +303,10 @@
 		gfs2_set_aops(&ip->i_inode);
 
 	return 0;
+corrupt:
+	if (gfs2_consist_inode(ip))
+		gfs2_dinode_print(ip);
+	return -EIO;
 }
 
 /**
@@ -337,13 +344,15 @@
 	struct gfs2_rgrpd *rgd;
 	int error;
 
-	if (ip->i_di.di_blocks != 1) {
+	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
 		if (gfs2_consist_inode(ip))
 			gfs2_dinode_print(ip);
 		return -EIO;
 	}
 
 	al = gfs2_alloc_get(ip);
+	if (!al)
+		return -ENOMEM;
 
 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
 	if (error)
@@ -487,7 +496,7 @@
 		return dir;
 	}
 
-	if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
+	if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) {
 		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
 		if (error)
 			return ERR_PTR(error);
@@ -818,7 +827,8 @@
 	int error;
 
 	munge_mode_uid_gid(dip, &mode, &uid, &gid);
-	gfs2_alloc_get(dip);
+	if (!gfs2_alloc_get(dip))
+		return -ENOMEM;
 
 	error = gfs2_quota_lock(dip, uid, gid);
 	if (error)
@@ -853,6 +863,8 @@
 	int error;
 
 	al = gfs2_alloc_get(dip);
+	if (!al)
+		return -ENOMEM;
 
 	error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
 	if (error)
@@ -1219,7 +1231,7 @@
 
 	x = ip->i_di.di_size + 1;
 	if (x > *len) {
-		*buf = kmalloc(x, GFP_KERNEL);
+		*buf = kmalloc(x, GFP_NOFS);
 		if (!*buf) {
 			error = -ENOMEM;
 			goto out_brelse;
@@ -1391,21 +1403,21 @@
 	str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
 	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
 	str->di_size = cpu_to_be64(di->di_size);
-	str->di_blocks = cpu_to_be64(di->di_blocks);
+	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
 	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
 	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
 	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
 
-	str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
-	str->di_goal_data = cpu_to_be64(di->di_goal_data);
+	str->di_goal_meta = cpu_to_be64(ip->i_goal);
+	str->di_goal_data = cpu_to_be64(ip->i_goal);
 	str->di_generation = cpu_to_be64(di->di_generation);
 
 	str->di_flags = cpu_to_be32(di->di_flags);
-	str->di_height = cpu_to_be16(di->di_height);
+	str->di_height = cpu_to_be16(ip->i_height);
 	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
 					     !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
 					     GFS2_FORMAT_DE : 0);
-	str->di_depth = cpu_to_be16(di->di_depth);
+	str->di_depth = cpu_to_be16(ip->i_depth);
 	str->di_entries = cpu_to_be32(di->di_entries);
 
 	str->di_eattr = cpu_to_be64(di->di_eattr);
@@ -1423,15 +1435,13 @@
 	printk(KERN_INFO "  no_addr = %llu\n",
 	       (unsigned long long)ip->i_no_addr);
 	printk(KERN_INFO "  di_size = %llu\n", (unsigned long long)di->di_size);
-	printk(KERN_INFO "  di_blocks = %llu\n",
-	       (unsigned long long)di->di_blocks);
-	printk(KERN_INFO "  di_goal_meta = %llu\n",
-	       (unsigned long long)di->di_goal_meta);
-	printk(KERN_INFO "  di_goal_data = %llu\n",
-	       (unsigned long long)di->di_goal_data);
+	printk(KERN_INFO "  blocks = %llu\n",
+	       (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
+	printk(KERN_INFO "  i_goal = %llu\n",
+	       (unsigned long long)ip->i_goal);
 	printk(KERN_INFO "  di_flags = 0x%.8X\n", di->di_flags);
-	printk(KERN_INFO "  di_height = %u\n", di->di_height);
-	printk(KERN_INFO "  di_depth = %u\n", di->di_depth);
+	printk(KERN_INFO "  i_height = %u\n", ip->i_height);
+	printk(KERN_INFO "  i_depth = %u\n", ip->i_depth);
 	printk(KERN_INFO "  di_entries = %u\n", di->di_entries);
 	printk(KERN_INFO "  di_eattr = %llu\n",
 	       (unsigned long long)di->di_eattr);
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index d446506..580da45 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -10,9 +10,11 @@
 #ifndef __INODE_DOT_H__
 #define __INODE_DOT_H__
 
+#include "util.h"
+
 static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
 {
-	return !ip->i_di.di_height;
+	return !ip->i_height;
 }
 
 static inline int gfs2_is_jdata(const struct gfs2_inode *ip)
@@ -37,13 +39,25 @@
 	return S_ISDIR(ip->i_inode.i_mode);
 }
 
-static inline void gfs2_set_inode_blocks(struct inode *inode)
+static inline void gfs2_set_inode_blocks(struct inode *inode, u64 blocks)
 {
-	struct gfs2_inode *ip = GFS2_I(inode);
-	inode->i_blocks = ip->i_di.di_blocks <<
+	inode->i_blocks = blocks <<
 		(GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
 }
 
+static inline u64 gfs2_get_inode_blocks(const struct inode *inode)
+{
+	return inode->i_blocks >>
+		(GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
+}
+
+static inline void gfs2_add_inode_blocks(struct inode *inode, s64 change)
+{
+	gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks > -change));
+	change *= (GFS2_SB(inode)->sd_sb.sb_bsize/GFS2_BASIC_BLOCK);
+	inode->i_blocks += change;
+}
+
 static inline int gfs2_check_inum(const struct gfs2_inode *ip, u64 no_addr,
 				  u64 no_formal_ino)
 {
diff --git a/fs/gfs2/lm.c b/fs/gfs2/lm.c
deleted file mode 100644
index cfcc39b..0000000
--- a/fs/gfs2/lm.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/buffer_head.h>
-#include <linux/delay.h>
-#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
-
-#include "gfs2.h"
-#include "incore.h"
-#include "glock.h"
-#include "lm.h"
-#include "super.h"
-#include "util.h"
-
-/**
- * gfs2_lm_mount - mount a locking protocol
- * @sdp: the filesystem
- * @args: mount arguements
- * @silent: if 1, don't complain if the FS isn't a GFS2 fs
- *
- * Returns: errno
- */
-
-int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
-{
-	char *proto = sdp->sd_proto_name;
-	char *table = sdp->sd_table_name;
-	int flags = 0;
-	int error;
-
-	if (sdp->sd_args.ar_spectator)
-		flags |= LM_MFLAG_SPECTATOR;
-
-	fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
-
-	error = gfs2_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata,
-				     gfs2_glock_cb, sdp,
-				     GFS2_MIN_LVB_SIZE, flags,
-				     &sdp->sd_lockstruct, &sdp->sd_kobj);
-	if (error) {
-		fs_info(sdp, "can't mount proto=%s, table=%s, hostdata=%s\n",
-			proto, table, sdp->sd_args.ar_hostdata);
-		goto out;
-	}
-
-	if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) ||
-	    gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
-	    gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
-				  GFS2_MIN_LVB_SIZE)) {
-		gfs2_unmount_lockproto(&sdp->sd_lockstruct);
-		goto out;
-	}
-
-	if (sdp->sd_args.ar_spectator)
-		snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", table);
-	else
-		snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", table,
-			 sdp->sd_lockstruct.ls_jid);
-
-	fs_info(sdp, "Joined cluster. Now mounting FS...\n");
-
-	if ((sdp->sd_lockstruct.ls_flags & LM_LSFLAG_LOCAL) &&
-	    !sdp->sd_args.ar_ignore_local_fs) {
-		sdp->sd_args.ar_localflocks = 1;
-		sdp->sd_args.ar_localcaching = 1;
-	}
-
-out:
-	return error;
-}
-
-void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
-{
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
-					sdp->sd_lockstruct.ls_lockspace);
-}
-
-void gfs2_lm_unmount(struct gfs2_sbd *sdp)
-{
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		gfs2_unmount_lockproto(&sdp->sd_lockstruct);
-}
-
-int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
-{
-	va_list args;
-
-	if (test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
-		return 0;
-
-	va_start(args, fmt);
-	vprintk(fmt, args);
-	va_end(args);
-
-	fs_err(sdp, "about to withdraw this file system\n");
-	BUG_ON(sdp->sd_args.ar_debug);
-
-	fs_err(sdp, "telling LM to withdraw\n");
-	gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
-	fs_err(sdp, "withdrawn\n");
-	dump_stack();
-
-	return -1;
-}
-
-int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
-		     void **lockp)
-{
-	int error = -EIO;
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
-				sdp->sd_lockstruct.ls_lockspace, name, lockp);
-	return error;
-}
-
-void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock)
-{
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		sdp->sd_lockstruct.ls_ops->lm_put_lock(lock);
-}
-
-unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
-			  unsigned int cur_state, unsigned int req_state,
-			  unsigned int flags)
-{
-	int ret = 0;
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
-							 req_state, flags);
-	return ret;
-}
-
-unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
-			    unsigned int cur_state)
-{
-	int ret = 0;
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		ret =  sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
-	return ret;
-}
-
-void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock)
-{
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		sdp->sd_lockstruct.ls_ops->lm_cancel(lock);
-}
-
-int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
-{
-	int error = -EIO;
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
-	return error;
-}
-
-void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb)
-{
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(lock, lvb);
-}
-
-int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
-		      struct file *file, struct file_lock *fl)
-{
-	int error = -EIO;
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		error = sdp->sd_lockstruct.ls_ops->lm_plock_get(
-				sdp->sd_lockstruct.ls_lockspace, name, file, fl);
-	return error;
-}
-
-int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
-		  struct file *file, int cmd, struct file_lock *fl)
-{
-	int error = -EIO;
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		error = sdp->sd_lockstruct.ls_ops->lm_plock(
-				sdp->sd_lockstruct.ls_lockspace, name, file, cmd, fl);
-	return error;
-}
-
-int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
-		    struct file *file, struct file_lock *fl)
-{
-	int error = -EIO;
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		error = sdp->sd_lockstruct.ls_ops->lm_punlock(
-				sdp->sd_lockstruct.ls_lockspace, name, file, fl);
-	return error;
-}
-
-void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
-			   unsigned int message)
-{
-	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		sdp->sd_lockstruct.ls_ops->lm_recovery_done(
-			sdp->sd_lockstruct.ls_lockspace, jid, message);
-}
-
diff --git a/fs/gfs2/lm.h b/fs/gfs2/lm.h
deleted file mode 100644
index 21cdc30..0000000
--- a/fs/gfs2/lm.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#ifndef __LM_DOT_H__
-#define __LM_DOT_H__
-
-struct gfs2_sbd;
-
-#define GFS2_MIN_LVB_SIZE 32
-
-int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent);
-void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp);
-void gfs2_lm_unmount(struct gfs2_sbd *sdp);
-int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
-				__attribute__ ((format(printf, 2, 3)));
-int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
-		     void **lockp);
-void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock);
-unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
-			 unsigned int cur_state, unsigned int req_state,
-			 unsigned int flags);
-unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
-			   unsigned int cur_state);
-void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock);
-int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp);
-void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb);
-int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
-		      struct file *file, struct file_lock *fl);
-int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
-		  struct file *file, int cmd, struct file_lock *fl);
-int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
-		    struct file *file, struct file_lock *fl);
-void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
-			   unsigned int message);
-
-#endif /* __LM_DOT_H__ */
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
index 542a797..cf7ea8a 100644
--- a/fs/gfs2/locking/dlm/lock.c
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -137,7 +137,8 @@
 
 		/* Conversion deadlock avoidance by DLM */
 
-		if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
+		if (!(lp->ls->fsflags & LM_MFLAG_CONV_NODROP) &&
+		    !test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
 		    !(lkf & DLM_LKF_NOQUEUE) &&
 		    cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
 			lkf |= DLM_LKF_CONVDEADLK;
@@ -164,7 +165,7 @@
 {
 	struct gdlm_lock *lp;
 
-	lp = kzalloc(sizeof(struct gdlm_lock), GFP_KERNEL);
+	lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
 	if (!lp)
 		return -ENOMEM;
 
@@ -382,7 +383,7 @@
 {
 	char *lvb;
 
-	lvb = kzalloc(GDLM_LVB_SIZE, GFP_KERNEL);
+	lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
 	if (!lvb)
 		return -ENOMEM;
 
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 9e8265d..58fcf8c 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -183,5 +183,10 @@
 		struct file_lock *);
 int gdlm_punlock(void *, struct lm_lockname *, struct file *,
 		struct file_lock *);
+
+/* mount.c */
+
+extern const struct lm_lockops gdlm_ops;
+
 #endif
 
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
index a0e7eda..36a2258 100644
--- a/fs/gfs2/locking/dlm/main.c
+++ b/fs/gfs2/locking/dlm/main.c
@@ -11,8 +11,6 @@
 
 #include "lock_dlm.h"
 
-extern struct lm_lockops gdlm_ops;
-
 static int __init init_lock_dlm(void)
 {
 	int error;
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
index a87b098..8479da4 100644
--- a/fs/gfs2/locking/dlm/sysfs.c
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -12,8 +12,6 @@
 
 #include "lock_dlm.h"
 
-extern struct lm_lockops gdlm_ops;
-
 static ssize_t proto_name_show(struct gdlm_ls *ls, char *buf)
 {
 	return sprintf(buf, "%s\n", gdlm_ops.lm_proto_name);
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
index 521694f..e53db6f 100644
--- a/fs/gfs2/locking/dlm/thread.c
+++ b/fs/gfs2/locking/dlm/thread.c
@@ -135,7 +135,15 @@
 			 lp->lksb.sb_status, lp->lockname.ln_type,
 			 (unsigned long long)lp->lockname.ln_number,
 			 lp->flags);
-		return;
+		if (lp->lksb.sb_status == -EDEADLOCK &&
+		    lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
+			lp->req = lp->cur;
+			acb.lc_ret |= LM_OUT_CONV_DEADLK;
+			if (lp->cur == DLM_LOCK_IV)
+				lp->lksb.sb_lkid = 0;
+			goto out;
+		} else
+			return;
 	}
 
 	/*
diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c
index d3b8ce6..284a5ec 100644
--- a/fs/gfs2/locking/nolock/main.c
+++ b/fs/gfs2/locking/nolock/main.c
@@ -140,7 +140,7 @@
 	struct nolock_lockspace *nl = lock;
 	int error = 0;
 
-	*lvbp = kzalloc(nl->nl_lvb_size, GFP_KERNEL);
+	*lvbp = kzalloc(nl->nl_lvb_size, GFP_NOFS);
 	if (!*lvbp)
 		error = -ENOMEM;
 
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 161ab6f..548264b 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -769,8 +769,8 @@
 	sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
 	gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
 	reserved = calc_reserved(sdp);
+	gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
 	unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
-	gfs2_assert_withdraw(sdp, unused >= 0);
 	atomic_add(unused, &sdp->sd_log_blks_free);
 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
 			     sdp->sd_jdesc->jd_blocks);
@@ -779,6 +779,21 @@
 	gfs2_log_unlock(sdp);
 }
 
+static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+{
+	struct list_head *head = &tr->tr_list_buf;
+	struct gfs2_bufdata *bd;
+
+	gfs2_log_lock(sdp);
+	while (!list_empty(head)) {
+		bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
+		list_del_init(&bd->bd_list_tr);
+		tr->tr_num_buf--;
+	}
+	gfs2_log_unlock(sdp);
+	gfs2_assert_warn(sdp, !tr->tr_num_buf);
+}
+
 /**
  * gfs2_log_commit - Commit a transaction to the log
  * @sdp: the filesystem
@@ -790,7 +805,7 @@
 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
 	log_refund(sdp, tr);
-	lops_incore_commit(sdp, tr);
+	buf_lo_incore_commit(sdp, tr);
 
 	sdp->sd_vfs->s_dirt = 1;
 	up_read(&sdp->sd_log_flush_lock);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index fae59d6..4390f6f 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -152,21 +152,6 @@
 	unlock_buffer(bd->bd_bh);
 }
 
-static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
-{
-	struct list_head *head = &tr->tr_list_buf;
-	struct gfs2_bufdata *bd;
-
-	gfs2_log_lock(sdp);
-	while (!list_empty(head)) {
-		bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
-		list_del_init(&bd->bd_list_tr);
-		tr->tr_num_buf--;
-	}
-	gfs2_log_unlock(sdp);
-	gfs2_assert_warn(sdp, !tr->tr_num_buf);
-}
-
 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
 {
 	struct buffer_head *bh;
@@ -419,8 +404,10 @@
 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
 
 			error = gfs2_revoke_add(sdp, blkno, start);
-			if (error < 0)
+			if (error < 0) {
+				brelse(bh);
 				return error;
+			}
 			else if (error)
 				sdp->sd_found_revokes++;
 
@@ -737,7 +724,6 @@
 
 const struct gfs2_log_operations gfs2_buf_lops = {
 	.lo_add = buf_lo_add,
-	.lo_incore_commit = buf_lo_incore_commit,
 	.lo_before_commit = buf_lo_before_commit,
 	.lo_after_commit = buf_lo_after_commit,
 	.lo_before_scan = buf_lo_before_scan,
@@ -763,7 +749,6 @@
 
 const struct gfs2_log_operations gfs2_databuf_lops = {
 	.lo_add = databuf_lo_add,
-	.lo_incore_commit = buf_lo_incore_commit,
 	.lo_before_commit = databuf_lo_before_commit,
 	.lo_after_commit = databuf_lo_after_commit,
 	.lo_scan_elements = databuf_lo_scan_elements,
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 41a00df..3c0b273 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -57,15 +57,6 @@
 		le->le_ops->lo_add(sdp, le);
 }
 
-static inline void lops_incore_commit(struct gfs2_sbd *sdp,
-				      struct gfs2_trans *tr)
-{
-	int x;
-	for (x = 0; gfs2_log_ops[x]; x++)
-		if (gfs2_log_ops[x]->lo_incore_commit)
-			gfs2_log_ops[x]->lo_incore_commit(sdp, tr);
-}
-
 static inline void lops_before_commit(struct gfs2_sbd *sdp)
 {
 	int x;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 9c7765c..053e2eb 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -89,6 +89,12 @@
 	if (!gfs2_bufdata_cachep)
 		goto fail;
 
+	gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd",
+					      sizeof(struct gfs2_rgrpd),
+					      0, 0, NULL);
+	if (!gfs2_rgrpd_cachep)
+		goto fail;
+
 	error = register_filesystem(&gfs2_fs_type);
 	if (error)
 		goto fail;
@@ -108,6 +114,9 @@
 fail:
 	gfs2_glock_exit();
 
+	if (gfs2_rgrpd_cachep)
+		kmem_cache_destroy(gfs2_rgrpd_cachep);
+
 	if (gfs2_bufdata_cachep)
 		kmem_cache_destroy(gfs2_bufdata_cachep);
 
@@ -133,6 +142,7 @@
 	unregister_filesystem(&gfs2_fs_type);
 	unregister_filesystem(&gfs2meta_fs_type);
 
+	kmem_cache_destroy(gfs2_rgrpd_cachep);
 	kmem_cache_destroy(gfs2_bufdata_cachep);
 	kmem_cache_destroy(gfs2_inode_cachep);
 	kmem_cache_destroy(gfs2_glock_cachep);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index ac772b6..90a04a6 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -21,7 +21,6 @@
 #include <linux/gfs2_ondisk.h>
 #include <linux/lm_interface.h>
 #include <linux/backing-dev.h>
-#include <linux/pagevec.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -104,11 +103,9 @@
 	loff_t i_size = i_size_read(inode);
 	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
 	unsigned offset;
-	int ret = -EIO;
 
 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
 		goto out;
-	ret = 0;
 	if (current->journal_info)
 		goto redirty;
 	/* Is the page fully outside i_size? (truncate in progress) */
@@ -280,7 +277,7 @@
 	int i;
 	int ret;
 
-	ret = gfs2_trans_begin(sdp, nrblocks, 0);
+	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
 	if (ret < 0)
 		return ret;
 
@@ -510,23 +507,26 @@
 static int gfs2_readpage(struct file *file, struct page *page)
 {
 	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
-	struct gfs2_holder gh;
+	struct gfs2_holder *gh;
 	int error;
 
-	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
-	error = gfs2_glock_nq_atime(&gh);
-	if (unlikely(error)) {
+	gh = gfs2_glock_is_locked_by_me(ip->i_gl);
+	if (!gh) {
+		gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS);
+		if (!gh)
+			return -ENOBUFS;
+		gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
 		unlock_page(page);
-		goto out;
-	}
-	error = __gfs2_readpage(file, page);
-	gfs2_glock_dq(&gh);
-out:
-	gfs2_holder_uninit(&gh);
-	if (error == GLR_TRYFAILED) {
-		yield();
+		error = gfs2_glock_nq_atime(gh);
+		if (likely(error != 0))
+			goto out;
 		return AOP_TRUNCATED_PAGE;
 	}
+	error = __gfs2_readpage(file, page);
+	gfs2_glock_dq(gh);
+out:
+	gfs2_holder_uninit(gh);
+	kfree(gh);
 	return error;
 }
 
@@ -648,15 +648,15 @@
 
 	if (alloc_required) {
 		al = gfs2_alloc_get(ip);
+		if (!al) {
+			error = -ENOMEM;
+			goto out_unlock;
+		}
 
-		error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+		error = gfs2_quota_lock_check(ip);
 		if (error)
 			goto out_alloc_put;
 
-		error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
-		if (error)
-			goto out_qunlock;
-
 		al->al_requested = data_blocks + ind_blocks;
 		error = gfs2_inplace_reserve(ip);
 		if (error)
@@ -828,7 +828,7 @@
 	unsigned int to = from + len;
 	int ret;
 
-	BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == 0);
+	BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
 
 	ret = gfs2_meta_inode_buffer(ip, &dibh);
 	if (unlikely(ret)) {
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 793e334..4a5e676 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -43,7 +43,7 @@
 	struct gfs2_holder d_gh;
 	struct gfs2_inode *ip = NULL;
 	int error;
-	int had_lock=0;
+	int had_lock = 0;
 
 	if (inode) {
 		if (is_bad_inode(inode))
@@ -54,7 +54,7 @@
 	if (sdp->sd_args.ar_localcaching)
 		goto valid;
 
-	had_lock = gfs2_glock_is_locked_by_me(dip->i_gl);
+	had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
 	if (!had_lock) {
 		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
 		if (error)
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 334c7f8..990d9f4 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -204,8 +204,6 @@
 	inode = gfs2_inode_lookup(sb, DT_UNKNOWN,
 					inum->no_addr,
 					0, 0);
-	if (!inode)
-		goto fail;
 	if (IS_ERR(inode)) {
 		error = PTR_ERR(inode);
 		goto fail;
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index f4842f2..e1b7d52 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -30,7 +30,6 @@
 #include "glock.h"
 #include "glops.h"
 #include "inode.h"
-#include "lm.h"
 #include "log.h"
 #include "meta_io.h"
 #include "quota.h"
@@ -39,6 +38,7 @@
 #include "util.h"
 #include "eaops.h"
 #include "ops_address.h"
+#include "ops_inode.h"
 
 /**
  * gfs2_llseek - seek to a location in a file
@@ -369,12 +369,9 @@
 	if (al == NULL)
 		goto out_unlock;
 
-	ret = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+	ret = gfs2_quota_lock_check(ip);
 	if (ret)
 		goto out_alloc_put;
-	ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
-	if (ret)
-		goto out_quota_unlock;
 	al->al_requested = data_blocks + ind_blocks;
 	ret = gfs2_inplace_reserve(ip);
 	if (ret)
@@ -596,6 +593,36 @@
 	return generic_setlease(file, arg, fl);
 }
 
+static int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
+		      struct file *file, struct file_lock *fl)
+{
+	int error = -EIO;
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		error = sdp->sd_lockstruct.ls_ops->lm_plock_get(
+				sdp->sd_lockstruct.ls_lockspace, name, file, fl);
+	return error;
+}
+
+static int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
+		  struct file *file, int cmd, struct file_lock *fl)
+{
+	int error = -EIO;
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		error = sdp->sd_lockstruct.ls_ops->lm_plock(
+				sdp->sd_lockstruct.ls_lockspace, name, file, cmd, fl);
+	return error;
+}
+
+static int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
+		    struct file *file, struct file_lock *fl)
+{
+	int error = -EIO;
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		error = sdp->sd_lockstruct.ls_ops->lm_punlock(
+				sdp->sd_lockstruct.ls_lockspace, name, file, fl);
+	return error;
+}
+
 /**
  * gfs2_lock - acquire/release a posix lock on a file
  * @file: the file pointer
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 4bee6aa..ef9c6c4 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -26,7 +26,6 @@
 #include "glock.h"
 #include "glops.h"
 #include "inode.h"
-#include "lm.h"
 #include "mount.h"
 #include "ops_fstype.h"
 #include "ops_dentry.h"
@@ -363,6 +362,13 @@
 	return rc;
 }
 
+static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
+{
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
+					sdp->sd_lockstruct.ls_lockspace);
+}
+
 static int init_journal(struct gfs2_sbd *sdp, int undo)
 {
 	struct gfs2_holder ji_gh;
@@ -542,7 +548,7 @@
 	}
 	ip = GFS2_I(sdp->sd_rindex);
 	set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
-	sdp->sd_rindex_vn = ip->i_gl->gl_vn - 1;
+	sdp->sd_rindex_uptodate = 0;
 
 	/* Read in the quota inode */
 	sdp->sd_quota_inode = gfs2_lookup_simple(sdp->sd_master_dir, "quota");
@@ -705,6 +711,69 @@
 }
 
 /**
+ * gfs2_lm_mount - mount a locking protocol
+ * @sdp: the filesystem
+ * @args: mount arguements
+ * @silent: if 1, don't complain if the FS isn't a GFS2 fs
+ *
+ * Returns: errno
+ */
+
+static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
+{
+	char *proto = sdp->sd_proto_name;
+	char *table = sdp->sd_table_name;
+	int flags = LM_MFLAG_CONV_NODROP;
+	int error;
+
+	if (sdp->sd_args.ar_spectator)
+		flags |= LM_MFLAG_SPECTATOR;
+
+	fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
+
+	error = gfs2_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata,
+				     gfs2_glock_cb, sdp,
+				     GFS2_MIN_LVB_SIZE, flags,
+				     &sdp->sd_lockstruct, &sdp->sd_kobj);
+	if (error) {
+		fs_info(sdp, "can't mount proto=%s, table=%s, hostdata=%s\n",
+			proto, table, sdp->sd_args.ar_hostdata);
+		goto out;
+	}
+
+	if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) ||
+	    gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
+	    gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
+				  GFS2_MIN_LVB_SIZE)) {
+		gfs2_unmount_lockproto(&sdp->sd_lockstruct);
+		goto out;
+	}
+
+	if (sdp->sd_args.ar_spectator)
+		snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", table);
+	else
+		snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", table,
+			 sdp->sd_lockstruct.ls_jid);
+
+	fs_info(sdp, "Joined cluster. Now mounting FS...\n");
+
+	if ((sdp->sd_lockstruct.ls_flags & LM_LSFLAG_LOCAL) &&
+	    !sdp->sd_args.ar_ignore_local_fs) {
+		sdp->sd_args.ar_localflocks = 1;
+		sdp->sd_args.ar_localcaching = 1;
+	}
+
+out:
+	return error;
+}
+
+void gfs2_lm_unmount(struct gfs2_sbd *sdp)
+{
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		gfs2_unmount_lockproto(&sdp->sd_lockstruct);
+}
+
+/**
  * fill_super - Read in superblock
  * @sb: The VFS superblock
  * @data: Mount options
@@ -874,7 +943,6 @@
 {
 	struct kstat stat;
 	struct nameidata nd;
-	struct file_system_type *fstype;
 	struct super_block *sb = NULL, *s;
 	int error;
 
@@ -886,8 +954,7 @@
 	}
 	error = vfs_getattr(nd.path.mnt, nd.path.dentry, &stat);
 
-	fstype = get_fs_type("gfs2");
-	list_for_each_entry(s, &fstype->fs_supers, s_instances) {
+	list_for_each_entry(s, &gfs2_fs_type.fs_supers, s_instances) {
 		if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
 		    (S_ISDIR(stat.mode) &&
 		     s == nd.path.dentry->d_inode->i_sb)) {
@@ -931,7 +998,6 @@
 		error = PTR_ERR(new);
 		goto error;
 	}
-	module_put(fs_type->owner);
 	new->s_flags = flags;
 	strlcpy(new->s_id, sb->s_id, sizeof(new->s_id));
 	sb_set_blocksize(new, sb->s_blocksize);
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index e874129..2686ad4 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -200,15 +200,15 @@
 
 	if (alloc_required) {
 		struct gfs2_alloc *al = gfs2_alloc_get(dip);
+		if (!al) {
+			error = -ENOMEM;
+			goto out_gunlock;
+		}
 
-		error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+		error = gfs2_quota_lock_check(dip);
 		if (error)
 			goto out_alloc;
 
-		error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
-		if (error)
-			goto out_gunlock_q;
-
 		al->al_requested = sdp->sd_max_dirres;
 
 		error = gfs2_inplace_reserve(dip);
@@ -716,15 +716,15 @@
 
 	if (alloc_required) {
 		struct gfs2_alloc *al = gfs2_alloc_get(ndip);
+		if (!al) {
+			error = -ENOMEM;
+			goto out_gunlock;
+		}
 
-		error = gfs2_quota_lock(ndip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+		error = gfs2_quota_lock_check(ndip);
 		if (error)
 			goto out_alloc;
 
-		error = gfs2_quota_check(ndip, ndip->i_inode.i_uid, ndip->i_inode.i_gid);
-		if (error)
-			goto out_gunlock_q;
-
 		al->al_requested = sdp->sd_max_dirres;
 
 		error = gfs2_inplace_reserve(ndip);
@@ -898,7 +898,7 @@
 	int error;
 	int unlock = 0;
 
-	if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
+	if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
 		if (error)
 			return error;
@@ -953,7 +953,8 @@
 	if (!(attr->ia_valid & ATTR_GID) || ogid == ngid)
 		ogid = ngid = NO_QUOTA_CHANGE;
 
-	gfs2_alloc_get(ip);
+	if (!gfs2_alloc_get(ip))
+		return -ENOMEM;
 
 	error = gfs2_quota_lock(ip, nuid, ngid);
 	if (error)
@@ -981,8 +982,9 @@
 	brelse(dibh);
 
 	if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
-		gfs2_quota_change(ip, -ip->i_di.di_blocks, ouid, ogid);
-		gfs2_quota_change(ip, ip->i_di.di_blocks, nuid, ngid);
+		u64 blocks = gfs2_get_inode_blocks(&ip->i_inode);
+		gfs2_quota_change(ip, -blocks, ouid, ogid);
+		gfs2_quota_change(ip, blocks, nuid, ngid);
 	}
 
 out_end_trans:
@@ -1064,7 +1066,7 @@
 	int error;
 	int unlock = 0;
 
-	if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
+	if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
 		if (error)
 			return error;
@@ -1148,16 +1150,6 @@
 	.removexattr = gfs2_removexattr,
 };
 
-const struct inode_operations gfs2_dev_iops = {
-	.permission = gfs2_permission,
-	.setattr = gfs2_setattr,
-	.getattr = gfs2_getattr,
-	.setxattr = gfs2_setxattr,
-	.getxattr = gfs2_getxattr,
-	.listxattr = gfs2_listxattr,
-	.removexattr = gfs2_removexattr,
-};
-
 const struct inode_operations gfs2_dir_iops = {
 	.create = gfs2_create,
 	.lookup = gfs2_lookup,
diff --git a/fs/gfs2/ops_inode.h b/fs/gfs2/ops_inode.h
index fd8cee2..14b4b79 100644
--- a/fs/gfs2/ops_inode.h
+++ b/fs/gfs2/ops_inode.h
@@ -15,7 +15,6 @@
 extern const struct inode_operations gfs2_file_iops;
 extern const struct inode_operations gfs2_dir_iops;
 extern const struct inode_operations gfs2_symlink_iops;
-extern const struct inode_operations gfs2_dev_iops;
 extern const struct file_operations gfs2_file_fops;
 extern const struct file_operations gfs2_dir_fops;
 extern const struct file_operations gfs2_file_fops_nolock;
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 5e52421..2278c68 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -25,7 +25,6 @@
 #include "incore.h"
 #include "glock.h"
 #include "inode.h"
-#include "lm.h"
 #include "log.h"
 #include "mount.h"
 #include "ops_super.h"
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a08dabd..56aaf91 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -94,7 +94,7 @@
 	struct gfs2_quota_data *qd;
 	int error;
 
-	qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
+	qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_NOFS);
 	if (!qd)
 		return -ENOMEM;
 
@@ -616,16 +616,9 @@
 	s64 value;
 	int err = -EIO;
 
-	if (gfs2_is_stuffed(ip)) {
-		struct gfs2_alloc *al = NULL;
-		al = gfs2_alloc_get(ip);
-		/* just request 1 blk */
-		al->al_requested = 1;
-		gfs2_inplace_reserve(ip);
+	if (gfs2_is_stuffed(ip))
 		gfs2_unstuff_dinode(ip, NULL);
-		gfs2_inplace_release(ip);
-		gfs2_alloc_put(ip);
-	}
+	
 	page = grab_cache_page(mapping, index);
 	if (!page)
 		return -ENOMEM;
@@ -690,14 +683,14 @@
 	unsigned int qx, x;
 	struct gfs2_quota_data *qd;
 	loff_t offset;
-	unsigned int nalloc = 0;
+	unsigned int nalloc = 0, blocks;
 	struct gfs2_alloc *al = NULL;
 	int error;
 
 	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 			      &data_blocks, &ind_blocks);
 
-	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
+	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 	if (!ghs)
 		return -ENOMEM;
 
@@ -727,30 +720,33 @@
 			nalloc++;
 	}
 
-	if (nalloc) {
-		al = gfs2_alloc_get(ip);
-
-		al->al_requested = nalloc * (data_blocks + ind_blocks);
-
-		error = gfs2_inplace_reserve(ip);
-		if (error)
-			goto out_alloc;
-
-		error = gfs2_trans_begin(sdp,
-					 al->al_rgd->rd_length +
-					 num_qd * data_blocks +
-					 nalloc * ind_blocks +
-					 RES_DINODE + num_qd +
-					 RES_STATFS, 0);
-		if (error)
-			goto out_ipres;
-	} else {
-		error = gfs2_trans_begin(sdp,
-					 num_qd * data_blocks +
-					 RES_DINODE + num_qd, 0);
-		if (error)
-			goto out_gunlock;
+	al = gfs2_alloc_get(ip);
+	if (!al) {
+		error = -ENOMEM;
+		goto out_gunlock;
 	}
+	/* 
+	 * 1 blk for unstuffing inode if stuffed. We add this extra
+	 * block to the reservation unconditionally. If the inode
+	 * doesn't need unstuffing, the block will be released to the 
+	 * rgrp since it won't be allocated during the transaction
+	 */
+	al->al_requested = 1;
+	/* +1 in the end for block requested above for unstuffing */
+	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
+
+	if (nalloc)
+		al->al_requested += nalloc * (data_blocks + ind_blocks);		
+	error = gfs2_inplace_reserve(ip);
+	if (error)
+		goto out_alloc;
+
+	if (nalloc)
+		blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
+
+	error = gfs2_trans_begin(sdp, blocks, 0);
+	if (error)
+		goto out_ipres;
 
 	for (x = 0; x < num_qd; x++) {
 		qd = qda[x];
@@ -769,11 +765,9 @@
 out_end_trans:
 	gfs2_trans_end(sdp);
 out_ipres:
-	if (nalloc)
-		gfs2_inplace_release(ip);
+	gfs2_inplace_release(ip);
 out_alloc:
-	if (nalloc)
-		gfs2_alloc_put(ip);
+	gfs2_alloc_put(ip);
 out_gunlock:
 	gfs2_glock_dq_uninit(&i_gh);
 out:
@@ -1124,12 +1118,12 @@
 	error = -ENOMEM;
 
 	sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
-				       sizeof(unsigned char *), GFP_KERNEL);
+				       sizeof(unsigned char *), GFP_NOFS);
 	if (!sdp->sd_quota_bitmap)
 		return error;
 
 	for (x = 0; x < sdp->sd_quota_chunks; x++) {
-		sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
+		sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
 		if (!sdp->sd_quota_bitmap[x])
 			goto fail;
 	}
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index a8be141..3b7f4b0 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -32,4 +32,21 @@
 void gfs2_quota_scan(struct gfs2_sbd *sdp);
 void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
 
+static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
+{
+	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+	int ret;
+	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+		return 0;
+	ret = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+	if (ret)
+		return ret;
+	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
+		return 0;
+	ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
+	if (ret)
+		gfs2_quota_unlock(ip);
+	return ret;
+}
+
 #endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 6fb07d6..2888e4b 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -20,7 +20,6 @@
 #include "bmap.h"
 #include "glock.h"
 #include "glops.h"
-#include "lm.h"
 #include "lops.h"
 #include "meta_io.h"
 #include "recovery.h"
@@ -69,7 +68,7 @@
 		return 0;
 	}
 
-	rr = kmalloc(sizeof(struct gfs2_revoke_replay), GFP_KERNEL);
+	rr = kmalloc(sizeof(struct gfs2_revoke_replay), GFP_NOFS);
 	if (!rr)
 		return -ENOMEM;
 
@@ -150,7 +149,7 @@
 			  struct gfs2_log_header_host *head)
 {
 	struct buffer_head *bh;
-	struct gfs2_log_header_host lh;
+	struct gfs2_log_header_host uninitialized_var(lh);
 	const u32 nothing = 0;
 	u32 hash;
 	int error;
@@ -425,6 +424,16 @@
 	return error;
 }
 
+
+static void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
+				  unsigned int message)
+{
+	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+		sdp->sd_lockstruct.ls_ops->lm_recovery_done(
+			sdp->sd_lockstruct.ls_lockspace, jid, message);
+}
+
+
 /**
  * gfs2_recover_journal - recovery a given journal
  * @jd: the struct gfs2_jdesc describing the journal
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 3552110..7e8f0b1 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -14,6 +14,7 @@
 #include <linux/fs.h>
 #include <linux/gfs2_ondisk.h>
 #include <linux/lm_interface.h>
+#include <linux/prefetch.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -33,6 +34,16 @@
 #define BFITNOENT ((u32)~0)
 #define NO_BLOCK ((u64)~0)
 
+#if BITS_PER_LONG == 32
+#define LBITMASK   (0x55555555UL)
+#define LBITSKIP55 (0x55555555UL)
+#define LBITSKIP00 (0x00000000UL)
+#else
+#define LBITMASK   (0x5555555555555555UL)
+#define LBITSKIP55 (0x5555555555555555UL)
+#define LBITSKIP00 (0x0000000000000000UL)
+#endif
+
 /*
  * These routines are used by the resource group routines (rgrp.c)
  * to keep track of block allocation.  Each block is represented by two
@@ -53,7 +64,8 @@
 };
 
 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
-                        unsigned char old_state, unsigned char new_state);
+                        unsigned char old_state, unsigned char new_state,
+			unsigned int *n);
 
 /**
  * gfs2_setbit - Set a bit in the bitmaps
@@ -64,26 +76,32 @@
  *
  */
 
-static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
-			unsigned int buflen, u32 block,
-			unsigned char new_state)
+static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1,
+			       unsigned char *buf2, unsigned int offset,
+			       unsigned int buflen, u32 block,
+			       unsigned char new_state)
 {
-	unsigned char *byte, *end, cur_state;
-	unsigned int bit;
+	unsigned char *byte1, *byte2, *end, cur_state;
+	const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
 
-	byte = buffer + (block / GFS2_NBBY);
-	bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
-	end = buffer + buflen;
+	byte1 = buf1 + offset + (block / GFS2_NBBY);
+	end = buf1 + offset + buflen;
 
-	gfs2_assert(rgd->rd_sbd, byte < end);
+	BUG_ON(byte1 >= end);
 
-	cur_state = (*byte >> bit) & GFS2_BIT_MASK;
+	cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
 
-	if (valid_change[new_state * 4 + cur_state]) {
-		*byte ^= cur_state << bit;
-		*byte |= new_state << bit;
-	} else
+	if (unlikely(!valid_change[new_state * 4 + cur_state])) {
 		gfs2_consist_rgrpd(rgd);
+		return;
+	}
+	*byte1 ^= (cur_state ^ new_state) << bit;
+
+	if (buf2) {
+		byte2 = buf2 + offset + (block / GFS2_NBBY);
+		cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
+		*byte2 ^= (cur_state ^ new_state) << bit;
+	}
 }
 
 /**
@@ -94,10 +112,12 @@
  *
  */
 
-static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
-				  unsigned int buflen, u32 block)
+static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
+					 const unsigned char *buffer,
+					 unsigned int buflen, u32 block)
 {
-	unsigned char *byte, *end, cur_state;
+	const unsigned char *byte, *end;
+	unsigned char cur_state;
 	unsigned int bit;
 
 	byte = buffer + (block / GFS2_NBBY);
@@ -126,47 +146,66 @@
  * Return: the block number (bitmap buffer scope) that was found
  */
 
-static u32 gfs2_bitfit(unsigned char *buffer, unsigned int buflen, u32 goal,
-		       unsigned char old_state)
+static u32 gfs2_bitfit(const u8 *buffer, unsigned int buflen, u32 goal,
+		       u8 old_state)
 {
-	unsigned char *byte;
-	u32 blk = goal;
-	unsigned int bit, bitlong;
-	unsigned long *plong, plong55;
+	const u8 *byte, *start, *end;
+	int bit, startbit;
+	u32 g1, g2, misaligned;
+	unsigned long *plong;
+	unsigned long lskipval;
 
-	byte = buffer + (goal / GFS2_NBBY);
-	plong = (unsigned long *)(buffer + (goal / GFS2_NBBY));
-	bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
-	bitlong = bit;
-#if BITS_PER_LONG == 32
-	plong55 = 0x55555555;
-#else
-	plong55 = 0x5555555555555555;
-#endif
-	while (byte < buffer + buflen) {
-
-		if (bitlong == 0 && old_state == 0 && *plong == plong55) {
-			plong++;
-			byte += sizeof(unsigned long);
-			blk += sizeof(unsigned long) * GFS2_NBBY;
-			continue;
+	lskipval = (old_state & GFS2_BLKST_USED) ? LBITSKIP00 : LBITSKIP55;
+	g1 = (goal / GFS2_NBBY);
+	start = buffer + g1;
+	byte = start;
+        end = buffer + buflen;
+	g2 = ALIGN(g1, sizeof(unsigned long));
+	plong = (unsigned long *)(buffer + g2);
+	startbit = bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
+	misaligned = g2 - g1;
+	if (!misaligned)
+		goto ulong_aligned;
+/* parse the bitmap a byte at a time */
+misaligned:
+	while (byte < end) {
+		if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) {
+			return goal +
+				(((byte - start) * GFS2_NBBY) +
+				 ((bit - startbit) >> 1));
 		}
-		if (((*byte >> bit) & GFS2_BIT_MASK) == old_state)
-			return blk;
 		bit += GFS2_BIT_SIZE;
-		if (bit >= 8) {
+		if (bit >= GFS2_NBBY * GFS2_BIT_SIZE) {
 			bit = 0;
 			byte++;
+			misaligned--;
+			if (!misaligned) {
+				plong = (unsigned long *)byte;
+				goto ulong_aligned;
+			}
 		}
-		bitlong += GFS2_BIT_SIZE;
-		if (bitlong >= sizeof(unsigned long) * 8) {
-			bitlong = 0;
-			plong++;
-		}
-
-		blk++;
 	}
+	return BFITNOENT;
 
+/* parse the bitmap a unsigned long at a time */
+ulong_aligned:
+	/* Stop at "end - 1" or else prefetch can go past the end and segfault.
+	   We could "if" it but we'd lose some of the performance gained.
+	   This way will only slow down searching the very last 4/8 bytes
+	   depending on architecture.  I've experimented with several ways
+	   of writing this section such as using an else before the goto
+	   but this one seems to be the fastest. */
+	while ((unsigned char *)plong < end - 1) {
+		prefetch(plong + 1);
+		if (((*plong) & LBITMASK) != lskipval)
+			break;
+		plong++;
+	}
+	if ((unsigned char *)plong < end) {
+		byte = (const u8 *)plong;
+		misaligned += sizeof(unsigned long) - 1;
+		goto misaligned;
+	}
 	return BFITNOENT;
 }
 
@@ -179,14 +218,14 @@
  * Returns: The number of bits
  */
 
-static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer,
-			      unsigned int buflen, unsigned char state)
+static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
+			 unsigned int buflen, u8 state)
 {
-	unsigned char *byte = buffer;
-	unsigned char *end = buffer + buflen;
-	unsigned char state1 = state << 2;
-	unsigned char state2 = state << 4;
-	unsigned char state3 = state << 6;
+	const u8 *byte = buffer;
+	const u8 *end = buffer + buflen;
+	const u8 state1 = state << 2;
+	const u8 state2 = state << 4;
+	const u8 state3 = state << 6;
 	u32 count = 0;
 
 	for (; byte < end; byte++) {
@@ -353,7 +392,7 @@
 		}
 
 		kfree(rgd->rd_bits);
-		kfree(rgd);
+		kmem_cache_free(gfs2_rgrpd_cachep, rgd);
 	}
 }
 
@@ -516,7 +555,7 @@
 		return error;
 	}
 
-	rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_NOFS);
+	rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
 	error = -ENOMEM;
 	if (!rgd)
 		return error;
@@ -539,7 +578,7 @@
 		return error;
 
 	rgd->rd_gl->gl_object = rgd;
-	rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1;
+	rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
 	rgd->rd_flags |= GFS2_RDF_CHECK;
 	return error;
 }
@@ -575,7 +614,7 @@
 		}
 	}
 
-	sdp->sd_rindex_vn = ip->i_gl->gl_vn;
+	sdp->sd_rindex_uptodate = 1;
 	return 0;
 }
 
@@ -609,7 +648,7 @@
 		}
 	}
 
-	sdp->sd_rindex_vn = ip->i_gl->gl_vn;
+	sdp->sd_rindex_uptodate = 1;
 	return 0;
 }
 
@@ -642,9 +681,9 @@
 		return error;
 
 	/* Read new copy from disk if we don't have the latest */
-	if (sdp->sd_rindex_vn != gl->gl_vn) {
+	if (!sdp->sd_rindex_uptodate) {
 		mutex_lock(&sdp->sd_rindex_mutex);
-		if (sdp->sd_rindex_vn != gl->gl_vn) {
+		if (!sdp->sd_rindex_uptodate) {
 			error = gfs2_ri_update(ip);
 			if (error)
 				gfs2_glock_dq_uninit(ri_gh);
@@ -655,21 +694,31 @@
 	return error;
 }
 
-static void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf)
+static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
 {
 	const struct gfs2_rgrp *str = buf;
+	struct gfs2_rgrp_host *rg = &rgd->rd_rg;
+	u32 rg_flags;
 
-	rg->rg_flags = be32_to_cpu(str->rg_flags);
+	rg_flags = be32_to_cpu(str->rg_flags);
+	if (rg_flags & GFS2_RGF_NOALLOC)
+		rgd->rd_flags |= GFS2_RDF_NOALLOC;
+	else
+		rgd->rd_flags &= ~GFS2_RDF_NOALLOC;
 	rg->rg_free = be32_to_cpu(str->rg_free);
 	rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
 	rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
 }
 
-static void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf)
+static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
 {
 	struct gfs2_rgrp *str = buf;
+	struct gfs2_rgrp_host *rg = &rgd->rd_rg;
+	u32 rg_flags = 0;
 
-	str->rg_flags = cpu_to_be32(rg->rg_flags);
+	if (rgd->rd_flags & GFS2_RDF_NOALLOC)
+		rg_flags |= GFS2_RGF_NOALLOC;
+	str->rg_flags = cpu_to_be32(rg_flags);
 	str->rg_free = cpu_to_be32(rg->rg_free);
 	str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
 	str->__pad = cpu_to_be32(0);
@@ -726,9 +775,9 @@
 		}
 	}
 
-	if (rgd->rd_rg_vn != gl->gl_vn) {
-		gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data);
-		rgd->rd_rg_vn = gl->gl_vn;
+	if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
+		gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
+		rgd->rd_flags |= GFS2_RDF_UPTODATE;
 	}
 
 	spin_lock(&sdp->sd_rindex_spin);
@@ -840,7 +889,7 @@
 	struct gfs2_sbd *sdp = rgd->rd_sbd;
 	int ret = 0;
 
-	if (rgd->rd_rg.rg_flags & GFS2_RGF_NOALLOC)
+	if (rgd->rd_flags & GFS2_RDF_NOALLOC)
 		return 0;
 
 	spin_lock(&sdp->sd_rindex_spin);
@@ -866,13 +915,15 @@
 	u32 goal = 0, block;
 	u64 no_addr;
 	struct gfs2_sbd *sdp = rgd->rd_sbd;
+	unsigned int n;
 
 	for(;;) {
 		if (goal >= rgd->rd_data)
 			break;
 		down_write(&sdp->sd_log_flush_lock);
+		n = 1;
 		block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
-				     GFS2_BLKST_UNLINKED);
+				     GFS2_BLKST_UNLINKED, &n);
 		up_write(&sdp->sd_log_flush_lock);
 		if (block == BFITNOENT)
 			break;
@@ -904,24 +955,20 @@
 static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
 					    u64 rglast)
 {
-	struct gfs2_rgrpd *rgd = NULL;
+	struct gfs2_rgrpd *rgd;
 
 	spin_lock(&sdp->sd_rindex_spin);
 
-	if (list_empty(&sdp->sd_rindex_recent_list))
-		goto out;
-
-	if (!rglast)
-		goto first;
-
-	list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
-		if (rgd->rd_addr == rglast)
-			goto out;
+	if (rglast) {
+		list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
+			if (rgrp_contains_block(rgd, rglast))
+				goto out;
+		}
 	}
-
-first:
-	rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd,
-			 rd_recent);
+	rgd = NULL;
+	if (!list_empty(&sdp->sd_rindex_recent_list))
+		rgd = list_entry(sdp->sd_rindex_recent_list.next,
+				 struct gfs2_rgrpd, rd_recent);
 out:
 	spin_unlock(&sdp->sd_rindex_spin);
 	return rgd;
@@ -1067,7 +1114,7 @@
 
 	/* Try recently successful rgrps */
 
-	rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc);
+	rgd = recent_rgrp_first(sdp, ip->i_goal);
 
 	while (rgd) {
 		rg_locked = 0;
@@ -1151,8 +1198,6 @@
 	}
 
 out:
-	ip->i_last_rg_alloc = rgd->rd_addr;
-
 	if (begin) {
 		recent_rgrp_add(rgd);
 		rgd = gfs2_rgrpd_get_next(rgd);
@@ -1275,6 +1320,7 @@
  * @goal: the goal block within the RG (start here to search for avail block)
  * @old_state: GFS2_BLKST_XXX the before-allocation state to find
  * @new_state: GFS2_BLKST_XXX the after-allocation block state
+ * @n: The extent length
  *
  * Walk rgrp's bitmap to find bits that represent a block in @old_state.
  * Add the found bitmap buffer to the transaction.
@@ -1290,13 +1336,17 @@
  */
 
 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
-			unsigned char old_state, unsigned char new_state)
+			unsigned char old_state, unsigned char new_state,
+			unsigned int *n)
 {
 	struct gfs2_bitmap *bi = NULL;
-	u32 length = rgd->rd_length;
+	const u32 length = rgd->rd_length;
 	u32 blk = 0;
 	unsigned int buf, x;
+	const unsigned int elen = *n;
+	const u8 *buffer;
 
+	*n = 0;
 	/* Find bitmap block that contains bits for goal block */
 	for (buf = 0; buf < length; buf++) {
 		bi = rgd->rd_bits + buf;
@@ -1317,12 +1367,11 @@
 	for (x = 0; x <= length; x++) {
 		/* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
 		   bitmaps, so we must search the originals for that. */
+		buffer = bi->bi_bh->b_data + bi->bi_offset;
 		if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
-			blk = gfs2_bitfit(bi->bi_clone + bi->bi_offset,
-					  bi->bi_len, goal, old_state);
-		else
-			blk = gfs2_bitfit(bi->bi_bh->b_data + bi->bi_offset,
-					  bi->bi_len, goal, old_state);
+			buffer = bi->bi_clone + bi->bi_offset;
+
+		blk = gfs2_bitfit(buffer, bi->bi_len, goal, old_state);
 		if (blk != BFITNOENT)
 			break;
 
@@ -1333,12 +1382,23 @@
 	}
 
 	if (blk != BFITNOENT && old_state != new_state) {
+		*n = 1;
 		gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
-		gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
+		gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
 			    bi->bi_len, blk, new_state);
-		if (bi->bi_clone)
-			gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset,
-				    bi->bi_len, blk, new_state);
+		goal = blk;
+		while (*n < elen) {
+			goal++;
+			if (goal >= (bi->bi_len * GFS2_NBBY))
+				break;
+			if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
+			    GFS2_BLKST_FREE)
+				break;
+			gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone,
+				    bi->bi_offset, bi->bi_len, goal,
+				    new_state);
+			(*n)++;
+		}
 	}
 
 	return (blk == BFITNOENT) ? blk : (bi->bi_start * GFS2_NBBY) + blk;
@@ -1393,7 +1453,7 @@
 			       bi->bi_len);
 		}
 		gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
-		gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
+		gfs2_setbit(rgd, bi->bi_bh->b_data, NULL, bi->bi_offset,
 			    bi->bi_len, buf_blk, new_state);
 	}
 
@@ -1401,13 +1461,13 @@
 }
 
 /**
- * gfs2_alloc_data - Allocate a data block
- * @ip: the inode to allocate the data block for
+ * gfs2_alloc_block - Allocate a block
+ * @ip: the inode to allocate the block for
  *
  * Returns: the allocated block
  */
 
-u64 gfs2_alloc_data(struct gfs2_inode *ip)
+u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct gfs2_alloc *al = ip->i_alloc;
@@ -1415,77 +1475,31 @@
 	u32 goal, blk;
 	u64 block;
 
-	if (rgrp_contains_block(rgd, ip->i_di.di_goal_data))
-		goal = ip->i_di.di_goal_data - rgd->rd_data0;
+	if (rgrp_contains_block(rgd, ip->i_goal))
+		goal = ip->i_goal - rgd->rd_data0;
 	else
-		goal = rgd->rd_last_alloc_data;
+		goal = rgd->rd_last_alloc;
 
-	blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
+	blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED, n);
 	BUG_ON(blk == BFITNOENT);
-	rgd->rd_last_alloc_data = blk;
 
+	rgd->rd_last_alloc = blk;
 	block = rgd->rd_data0 + blk;
-	ip->i_di.di_goal_data = block;
+	ip->i_goal = block;
 
-	gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
-	rgd->rd_rg.rg_free--;
+	gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free >= *n);
+	rgd->rd_rg.rg_free -= *n;
 
 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
+	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
 
-	al->al_alloced++;
+	al->al_alloced += *n;
 
-	gfs2_statfs_change(sdp, 0, -1, 0);
-	gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
+	gfs2_statfs_change(sdp, 0, -*n, 0);
+	gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
 
 	spin_lock(&sdp->sd_rindex_spin);
-	rgd->rd_free_clone--;
-	spin_unlock(&sdp->sd_rindex_spin);
-
-	return block;
-}
-
-/**
- * gfs2_alloc_meta - Allocate a metadata block
- * @ip: the inode to allocate the metadata block for
- *
- * Returns: the allocated block
- */
-
-u64 gfs2_alloc_meta(struct gfs2_inode *ip)
-{
-	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct gfs2_alloc *al = ip->i_alloc;
-	struct gfs2_rgrpd *rgd = al->al_rgd;
-	u32 goal, blk;
-	u64 block;
-
-	if (rgrp_contains_block(rgd, ip->i_di.di_goal_meta))
-		goal = ip->i_di.di_goal_meta - rgd->rd_data0;
-	else
-		goal = rgd->rd_last_alloc_meta;
-
-	blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
-	BUG_ON(blk == BFITNOENT);
-	rgd->rd_last_alloc_meta = blk;
-
-	block = rgd->rd_data0 + blk;
-	ip->i_di.di_goal_meta = block;
-
-	gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
-	rgd->rd_rg.rg_free--;
-
-	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
-
-	al->al_alloced++;
-
-	gfs2_statfs_change(sdp, 0, -1, 0);
-	gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
-	gfs2_trans_add_unrevoke(sdp, block);
-
-	spin_lock(&sdp->sd_rindex_spin);
-	rgd->rd_free_clone--;
+	rgd->rd_free_clone -= *n;
 	spin_unlock(&sdp->sd_rindex_spin);
 
 	return block;
@@ -1505,12 +1519,13 @@
 	struct gfs2_rgrpd *rgd = al->al_rgd;
 	u32 blk;
 	u64 block;
+	unsigned int n = 1;
 
-	blk = rgblk_search(rgd, rgd->rd_last_alloc_meta,
-			   GFS2_BLKST_FREE, GFS2_BLKST_DINODE);
+	blk = rgblk_search(rgd, rgd->rd_last_alloc,
+			   GFS2_BLKST_FREE, GFS2_BLKST_DINODE, &n);
 	BUG_ON(blk == BFITNOENT);
 
-	rgd->rd_last_alloc_meta = blk;
+	rgd->rd_last_alloc = blk;
 
 	block = rgd->rd_data0 + blk;
 
@@ -1519,12 +1534,12 @@
 	rgd->rd_rg.rg_dinodes++;
 	*generation = rgd->rd_rg.rg_igeneration++;
 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
+	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
 
 	al->al_alloced++;
 
 	gfs2_statfs_change(sdp, 0, -1, +1);
-	gfs2_trans_add_unrevoke(sdp, block);
+	gfs2_trans_add_unrevoke(sdp, block, 1);
 
 	spin_lock(&sdp->sd_rindex_spin);
 	rgd->rd_free_clone--;
@@ -1553,7 +1568,7 @@
 	rgd->rd_rg.rg_free += blen;
 
 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
+	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
 
 	gfs2_trans_add_rg(rgd);
 
@@ -1581,7 +1596,7 @@
 	rgd->rd_rg.rg_free += blen;
 
 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
+	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
 
 	gfs2_trans_add_rg(rgd);
 
@@ -1601,7 +1616,7 @@
 	if (!rgd)
 		return;
 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
+	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
 	gfs2_trans_add_rg(rgd);
 }
 
@@ -1621,7 +1636,7 @@
 	rgd->rd_rg.rg_free++;
 
 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
+	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
 
 	gfs2_statfs_change(sdp, 0, +1, -1);
 	gfs2_trans_add_rg(rgd);
@@ -1699,8 +1714,7 @@
  *
  */
 
-void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
-		      int flags)
+void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
 {
 	unsigned int x;
 
@@ -1708,7 +1722,7 @@
 				GFP_NOFS | __GFP_NOFAIL);
 	for (x = 0; x < rlist->rl_rgrps; x++)
 		gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
-				state, flags,
+				state, 0,
 				&rlist->rl_ghs[x]);
 }
 
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 149bb16..3181c7e 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -46,8 +46,7 @@
 
 unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block);
 
-u64 gfs2_alloc_data(struct gfs2_inode *ip);
-u64 gfs2_alloc_meta(struct gfs2_inode *ip);
+u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n);
 u64 gfs2_alloc_di(struct gfs2_inode *ip, u64 *generation);
 
 void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen);
@@ -64,8 +63,7 @@
 
 void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
 		    u64 block);
-void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
-		      int flags);
+void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state);
 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
 u64 gfs2_ri_total(struct gfs2_sbd *sdp);
 
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index ef0562c..7aeacbc 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -210,7 +210,7 @@
 	struct page *page;
 	struct bio *bio;
 
-	page = alloc_page(GFP_KERNEL);
+	page = alloc_page(GFP_NOFS);
 	if (unlikely(!page))
 		return -ENOBUFS;
 
@@ -218,7 +218,7 @@
 	ClearPageDirty(page);
 	lock_page(page);
 
-	bio = bio_alloc(GFP_KERNEL, 1);
+	bio = bio_alloc(GFP_NOFS, 1);
 	if (unlikely(!bio)) {
 		__free_page(page);
 		return -ENOBUFS;
@@ -316,6 +316,7 @@
 		sdp->sd_heightsize[x] = space;
 	}
 	sdp->sd_max_height = x;
+	sdp->sd_heightsize[x] = ~0;
 	gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
 
 	sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
@@ -334,6 +335,7 @@
 		sdp->sd_jheightsize[x] = space;
 	}
 	sdp->sd_max_jheight = x;
+	sdp->sd_jheightsize[x] = ~0;
 	gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);
 
 	return 0;
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 60a870e..44361ec 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -17,6 +17,7 @@
 int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent);
 int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent);
 int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector);
+void gfs2_lm_unmount(struct gfs2_sbd *sdp);
 
 static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
 {
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index eaa3b7b..9ab9fc8 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -20,7 +20,6 @@
 
 #include "gfs2.h"
 #include "incore.h"
-#include "lm.h"
 #include "sys.h"
 #include "super.h"
 #include "glock.h"
@@ -328,15 +327,9 @@
 }                                                                           \
 static struct counters_attr counters_attr_##name = __ATTR_RO(name)
 
-COUNTERS_ATTR(glock_count,      "%u\n");
-COUNTERS_ATTR(glock_held_count, "%u\n");
-COUNTERS_ATTR(inode_count,      "%u\n");
 COUNTERS_ATTR(reclaimed,        "%u\n");
 
 static struct attribute *counters_attrs[] = {
-	&counters_attr_glock_count.attr,
-	&counters_attr_glock_held_count.attr,
-	&counters_attr_inode_count.attr,
 	&counters_attr_reclaimed.attr,
 	NULL,
 };
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 73e5d92..f677b8a 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -146,30 +146,25 @@
 	lops_add(sdp, &bd->bd_le);
 }
 
-void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno)
+void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
 {
-	struct gfs2_bufdata *bd;
-	int found = 0;
+	struct gfs2_bufdata *bd, *tmp;
+	struct gfs2_trans *tr = current->journal_info;
+	unsigned int n = len;
 
 	gfs2_log_lock(sdp);
-
-	list_for_each_entry(bd, &sdp->sd_log_le_revoke, bd_le.le_list) {
-		if (bd->bd_blkno == blkno) {
+	list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_le.le_list) {
+		if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
 			list_del_init(&bd->bd_le.le_list);
 			gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
 			sdp->sd_log_num_revoke--;
-			found = 1;
-			break;
+			kmem_cache_free(gfs2_bufdata_cachep, bd);
+			tr->tr_num_revoke_rm++;
+			if (--n == 0)
+				break;
 		}
 	}
-
 	gfs2_log_unlock(sdp);
-
-	if (found) {
-		struct gfs2_trans *tr = current->journal_info;
-		kmem_cache_free(gfs2_bufdata_cachep, bd);
-		tr->tr_num_revoke_rm++;
-	}
 }
 
 void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd)
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index e826f0d..edf9d4b 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -32,7 +32,7 @@
 
 void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta);
 void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
-void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno);
+void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
 void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd);
 
 #endif /* __TRANS_DOT_H__ */
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 424a077..d31e355 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -19,12 +19,12 @@
 #include "gfs2.h"
 #include "incore.h"
 #include "glock.h"
-#include "lm.h"
 #include "util.h"
 
 struct kmem_cache *gfs2_glock_cachep __read_mostly;
 struct kmem_cache *gfs2_inode_cachep __read_mostly;
 struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
+struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
 
 void gfs2_assert_i(struct gfs2_sbd *sdp)
 {
@@ -32,6 +32,28 @@
 	       sdp->sd_fsname);
 }
 
+int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
+{
+	va_list args;
+
+	if (test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
+		return 0;
+
+	va_start(args, fmt);
+	vprintk(fmt, args);
+	va_end(args);
+
+	fs_err(sdp, "about to withdraw this file system\n");
+	BUG_ON(sdp->sd_args.ar_debug);
+
+	fs_err(sdp, "telling LM to withdraw\n");
+	gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
+	fs_err(sdp, "withdrawn\n");
+	dump_stack();
+
+	return -1;
+}
+
 /**
  * gfs2_assert_withdraw_i - Cause the machine to withdraw if @assertion is false
  * Returns: -1 if this call withdrew the machine,
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 28938a4..509c5d6 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -147,6 +147,7 @@
 extern struct kmem_cache *gfs2_glock_cachep;
 extern struct kmem_cache *gfs2_inode_cachep;
 extern struct kmem_cache *gfs2_bufdata_cachep;
+extern struct kmem_cache *gfs2_rgrpd_cachep;
 
 static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
 					   unsigned int *p)
@@ -163,6 +164,7 @@
 
 void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
 		      unsigned int bit, int new_value);
+int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...);
 
 #endif /* __UTIL_DOT_H__ */
 
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index e198506..2bc7d8a 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -2172,7 +2172,7 @@
 	}
 
 	/* update the free count for this dmap */
-	dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) - nblocks);
+	le32_add_cpu(&dp->nfree, -nblocks);
 
 	BMAP_LOCK(bmp);
 
@@ -2316,7 +2316,7 @@
 
 	/* update the free count for this dmap.
 	 */
-	dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) + nblocks);
+	le32_add_cpu(&dp->nfree, nblocks);
 
 	BMAP_LOCK(bmp);
 
@@ -3226,7 +3226,7 @@
 	}
 
 	/* update the free count for this dmap */
-	dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) - nblocks);
+	le32_add_cpu(&dp->nfree, -nblocks);
 
 	/* reconstruct summary tree */
 	dbInitDmapTree(dp);
@@ -3660,9 +3660,8 @@
 			goto initTree;
 		}
 	} else {
-		dp->nblocks =
-		    cpu_to_le32(le32_to_cpu(dp->nblocks) + nblocks);
-		dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) + nblocks);
+		le32_add_cpu(&dp->nblocks, nblocks);
+		le32_add_cpu(&dp->nfree, nblocks);
 	}
 
 	/* word number containing start block number */
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
index 11e6d47..1a6eb41 100644
--- a/fs/jfs/jfs_dmap.h
+++ b/fs/jfs/jfs_dmap.h
@@ -61,7 +61,7 @@
  * determine the maximum free string for four (lower level) nodes
  * of the tree.
  */
-static __inline signed char TREEMAX(signed char *cp)
+static inline signed char TREEMAX(signed char *cp)
 {
 	signed char tmp1, tmp2;
 
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 9bf29f7..734ec91 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -1019,8 +1019,7 @@
 		/* update the free inode counts at the iag, ag and
 		 * map level.
 		 */
-		iagp->nfreeinos =
-		    cpu_to_le32(le32_to_cpu(iagp->nfreeinos) + 1);
+		le32_add_cpu(&iagp->nfreeinos, 1);
 		imap->im_agctl[agno].numfree += 1;
 		atomic_inc(&imap->im_numfree);
 
@@ -1219,9 +1218,8 @@
 	/* update the number of free inodes and number of free extents
 	 * for the iag.
 	 */
-	iagp->nfreeinos = cpu_to_le32(le32_to_cpu(iagp->nfreeinos) -
-				      (INOSPEREXT - 1));
-	iagp->nfreeexts = cpu_to_le32(le32_to_cpu(iagp->nfreeexts) + 1);
+	le32_add_cpu(&iagp->nfreeinos, -(INOSPEREXT - 1));
+	le32_add_cpu(&iagp->nfreeexts, 1);
 
 	/* update the number of free inodes and backed inodes
 	 * at the ag and inode map level.
@@ -2124,7 +2122,7 @@
 	/* update the free inode count at the iag, ag, inode
 	 * map levels.
 	 */
-	iagp->nfreeinos = cpu_to_le32(le32_to_cpu(iagp->nfreeinos) - 1);
+	le32_add_cpu(&iagp->nfreeinos, -1);
 	imap->im_agctl[agno].numfree -= 1;
 	atomic_dec(&imap->im_numfree);
 
@@ -2378,9 +2376,8 @@
 	/* update the free inode and free extent counts for the
 	 * iag.
 	 */
-	iagp->nfreeinos = cpu_to_le32(le32_to_cpu(iagp->nfreeinos) +
-				      (INOSPEREXT - 1));
-	iagp->nfreeexts = cpu_to_le32(le32_to_cpu(iagp->nfreeexts) - 1);
+	le32_add_cpu(&iagp->nfreeinos, (INOSPEREXT - 1));
+	le32_add_cpu(&iagp->nfreeexts, -1);
 
 	/* update the free and backed inode counts for the ag.
 	 */
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index a000aaa..5a61ebf 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -905,8 +905,7 @@
 	XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr);
 
 	/* advance next available entry index */
-	p->header.nextindex =
-	    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+	le16_add_cpu(&p->header.nextindex, 1);
 
 	/* Don't log it if there are no links to the file */
 	if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -997,8 +996,7 @@
 			    split->addr);
 
 		/* advance next available entry index */
-		sp->header.nextindex =
-		    cpu_to_le16(le16_to_cpu(sp->header.nextindex) + 1);
+		le16_add_cpu(&sp->header.nextindex, 1);
 
 		/* Don't log it if there are no links to the file */
 		if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -1167,9 +1165,7 @@
 				    JFS_SBI(ip->i_sb)->nbperpage, rcbn);
 
 			/* advance next available entry index. */
-			sp->header.nextindex =
-			    cpu_to_le16(le16_to_cpu(sp->header.nextindex) +
-					1);
+			le16_add_cpu(&sp->header.nextindex, 1);
 
 			/* Don't log it if there are no links to the file */
 			if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -1738,8 +1734,7 @@
 		XT_PUTENTRY(xad, XAD_NEW, xoff, len, xaddr);
 
 		/* advance next available entry index */
-		p->header.nextindex =
-		    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+		le16_add_cpu(&p->header.nextindex, 1);
 	}
 
 	/* get back old entry */
@@ -1905,8 +1900,7 @@
 		XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr);
 
 		/* advance next available entry index */
-		p->header.nextindex =
-		    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+		le16_add_cpu(&p->header.nextindex, 1);
 	}
 
 	/* get back old XAD */
@@ -2567,8 +2561,7 @@
 	XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr);
 
 	/* advance next available entry index */
-	p->header.nextindex =
-	    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+	le16_add_cpu(&p->header.nextindex, 1);
 
 	xtlck->lwm.offset =
 	    (xtlck->lwm.offset) ? min(index,(int) xtlck->lwm.offset) : index;
@@ -2631,8 +2624,7 @@
 	 * delete the entry from the leaf page
 	 */
 	nextindex = le16_to_cpu(p->header.nextindex);
-	p->header.nextindex =
-	    cpu_to_le16(le16_to_cpu(p->header.nextindex) - 1);
+	le16_add_cpu(&p->header.nextindex, -1);
 
 	/*
 	 * if the leaf page bocome empty, free the page
@@ -2795,9 +2787,7 @@
 					(nextindex - index -
 					 1) << L2XTSLOTSIZE);
 
-			p->header.nextindex =
-			    cpu_to_le16(le16_to_cpu(p->header.nextindex) -
-					1);
+			le16_add_cpu(&p->header.nextindex, -1);
 			jfs_info("xtDeleteUp(entry): 0x%lx[%d]",
 				 (ulong) parent->bn, index);
 		}
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 4d4ce48..f6956de 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -2,7 +2,12 @@
 
 EXTRA_CFLAGS += -DCATCH_BH_JBD_RACES
 
-obj-$(CONFIG_OCFS2_FS) += ocfs2.o
+obj-$(CONFIG_OCFS2_FS) += 	\
+	ocfs2.o			\
+	ocfs2_stackglue.o
+
+obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_stack_o2cb.o
+obj-$(CONFIG_OCFS2_FS_USERSPACE_CLUSTER) += ocfs2_stack_user.o
 
 ocfs2-objs := \
 	alloc.o 		\
@@ -31,5 +36,10 @@
 	uptodate.o		\
 	ver.o
 
+ocfs2_stackglue-objs := stackglue.o
+ocfs2_stack_o2cb-objs := stack_o2cb.o
+ocfs2_stack_user-objs := stack_user.o
+
+# cluster/ is always needed when OCFS2_FS for masklog support
 obj-$(CONFIG_OCFS2_FS) += cluster/
-obj-$(CONFIG_OCFS2_FS) += dlm/
+obj-$(CONFIG_OCFS2_FS_O2CB) += dlm/
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 447206e..41f84c9 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1029,8 +1029,7 @@
 	BUG_ON(!next_free);
 
 	/* The tree code before us didn't allow enough room in the leaf. */
-	if (el->l_next_free_rec == el->l_count && !has_empty)
-		BUG();
+	BUG_ON(el->l_next_free_rec == el->l_count && !has_empty);
 
 	/*
 	 * The easiest way to approach this is to just remove the
@@ -1450,6 +1449,8 @@
  *   - When our insert into the right path leaf is at the leftmost edge
  *     and requires an update of the path immediately to it's left. This
  *     can occur at the end of some types of rotation and appending inserts.
+ *   - When we've adjusted the last extent record in the left path leaf and the
+ *     1st extent record in the right path leaf during cross extent block merge.
  */
 static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle,
 				       struct ocfs2_path *left_path,
@@ -2712,24 +2713,147 @@
 	}
 }
 
-/*
- * Remove split_rec clusters from the record at index and merge them
- * onto the beginning of the record at index + 1.
- */
-static int ocfs2_merge_rec_right(struct inode *inode, struct buffer_head *bh,
-				handle_t *handle,
-				struct ocfs2_extent_rec *split_rec,
-				struct ocfs2_extent_list *el, int index)
+static int ocfs2_get_right_path(struct inode *inode,
+				struct ocfs2_path *left_path,
+				struct ocfs2_path **ret_right_path)
 {
 	int ret;
+	u32 right_cpos;
+	struct ocfs2_path *right_path = NULL;
+	struct ocfs2_extent_list *left_el;
+
+	*ret_right_path = NULL;
+
+	/* This function shouldn't be called for non-trees. */
+	BUG_ON(left_path->p_tree_depth == 0);
+
+	left_el = path_leaf_el(left_path);
+	BUG_ON(left_el->l_next_free_rec != left_el->l_count);
+
+	ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path,
+					     &right_cpos);
+	if (ret) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	/* This function shouldn't be called for the rightmost leaf. */
+	BUG_ON(right_cpos == 0);
+
+	right_path = ocfs2_new_path(path_root_bh(left_path),
+				    path_root_el(left_path));
+	if (!right_path) {
+		ret = -ENOMEM;
+		mlog_errno(ret);
+		goto out;
+	}
+
+	ret = ocfs2_find_path(inode, right_path, right_cpos);
+	if (ret) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	*ret_right_path = right_path;
+out:
+	if (ret)
+		ocfs2_free_path(right_path);
+	return ret;
+}
+
+/*
+ * Remove split_rec clusters from the record at index and merge them
+ * onto the beginning of the record "next" to it.
+ * For index < l_count - 1, the next means the extent rec at index + 1.
+ * For index == l_count - 1, the "next" means the 1st extent rec of the
+ * next extent block.
+ */
+static int ocfs2_merge_rec_right(struct inode *inode,
+				 struct ocfs2_path *left_path,
+				 handle_t *handle,
+				 struct ocfs2_extent_rec *split_rec,
+				 int index)
+{
+	int ret, next_free, i;
 	unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
 	struct ocfs2_extent_rec *left_rec;
 	struct ocfs2_extent_rec *right_rec;
+	struct ocfs2_extent_list *right_el;
+	struct ocfs2_path *right_path = NULL;
+	int subtree_index = 0;
+	struct ocfs2_extent_list *el = path_leaf_el(left_path);
+	struct buffer_head *bh = path_leaf_bh(left_path);
+	struct buffer_head *root_bh = NULL;
 
 	BUG_ON(index >= le16_to_cpu(el->l_next_free_rec));
-
 	left_rec = &el->l_recs[index];
-	right_rec = &el->l_recs[index + 1];
+
+	if (index == le16_to_cpu(el->l_next_free_rec - 1) &&
+	    le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) {
+		/* we meet with a cross extent block merge. */
+		ret = ocfs2_get_right_path(inode, left_path, &right_path);
+		if (ret) {
+			mlog_errno(ret);
+			goto out;
+		}
+
+		right_el = path_leaf_el(right_path);
+		next_free = le16_to_cpu(right_el->l_next_free_rec);
+		BUG_ON(next_free <= 0);
+		right_rec = &right_el->l_recs[0];
+		if (ocfs2_is_empty_extent(right_rec)) {
+			BUG_ON(le16_to_cpu(next_free) <= 1);
+			right_rec = &right_el->l_recs[1];
+		}
+
+		BUG_ON(le32_to_cpu(left_rec->e_cpos) +
+		       le16_to_cpu(left_rec->e_leaf_clusters) !=
+		       le32_to_cpu(right_rec->e_cpos));
+
+		subtree_index = ocfs2_find_subtree_root(inode,
+							left_path, right_path);
+
+		ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
+						      handle->h_buffer_credits,
+						      right_path);
+		if (ret) {
+			mlog_errno(ret);
+			goto out;
+		}
+
+		root_bh = left_path->p_node[subtree_index].bh;
+		BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
+
+		ret = ocfs2_journal_access(handle, inode, root_bh,
+					   OCFS2_JOURNAL_ACCESS_WRITE);
+		if (ret) {
+			mlog_errno(ret);
+			goto out;
+		}
+
+		for (i = subtree_index + 1;
+		     i < path_num_items(right_path); i++) {
+			ret = ocfs2_journal_access(handle, inode,
+						   right_path->p_node[i].bh,
+						   OCFS2_JOURNAL_ACCESS_WRITE);
+			if (ret) {
+				mlog_errno(ret);
+				goto out;
+			}
+
+			ret = ocfs2_journal_access(handle, inode,
+						   left_path->p_node[i].bh,
+						   OCFS2_JOURNAL_ACCESS_WRITE);
+			if (ret) {
+				mlog_errno(ret);
+				goto out;
+			}
+		}
+
+	} else {
+		BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1);
+		right_rec = &el->l_recs[index + 1];
+	}
 
 	ret = ocfs2_journal_access(handle, inode, bh,
 				   OCFS2_JOURNAL_ACCESS_WRITE);
@@ -2751,30 +2875,156 @@
 	if (ret)
 		mlog_errno(ret);
 
+	if (right_path) {
+		ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
+		if (ret)
+			mlog_errno(ret);
+
+		ocfs2_complete_edge_insert(inode, handle, left_path,
+					   right_path, subtree_index);
+	}
 out:
+	if (right_path)
+		ocfs2_free_path(right_path);
+	return ret;
+}
+
+static int ocfs2_get_left_path(struct inode *inode,
+			       struct ocfs2_path *right_path,
+			       struct ocfs2_path **ret_left_path)
+{
+	int ret;
+	u32 left_cpos;
+	struct ocfs2_path *left_path = NULL;
+
+	*ret_left_path = NULL;
+
+	/* This function shouldn't be called for non-trees. */
+	BUG_ON(right_path->p_tree_depth == 0);
+
+	ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
+					    right_path, &left_cpos);
+	if (ret) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	/* This function shouldn't be called for the leftmost leaf. */
+	BUG_ON(left_cpos == 0);
+
+	left_path = ocfs2_new_path(path_root_bh(right_path),
+				   path_root_el(right_path));
+	if (!left_path) {
+		ret = -ENOMEM;
+		mlog_errno(ret);
+		goto out;
+	}
+
+	ret = ocfs2_find_path(inode, left_path, left_cpos);
+	if (ret) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	*ret_left_path = left_path;
+out:
+	if (ret)
+		ocfs2_free_path(left_path);
 	return ret;
 }
 
 /*
  * Remove split_rec clusters from the record at index and merge them
- * onto the tail of the record at index - 1.
+ * onto the tail of the record "before" it.
+ * For index > 0, the "before" means the extent rec at index - 1.
+ *
+ * For index == 0, the "before" means the last record of the previous
+ * extent block. And there is also a situation that we may need to
+ * remove the rightmost leaf extent block in the right_path and change
+ * the right path to indicate the new rightmost path.
  */
-static int ocfs2_merge_rec_left(struct inode *inode, struct buffer_head *bh,
+static int ocfs2_merge_rec_left(struct inode *inode,
+				struct ocfs2_path *right_path,
 				handle_t *handle,
 				struct ocfs2_extent_rec *split_rec,
-				struct ocfs2_extent_list *el, int index)
+				struct ocfs2_cached_dealloc_ctxt *dealloc,
+				int index)
 {
-	int ret, has_empty_extent = 0;
+	int ret, i, subtree_index = 0, has_empty_extent = 0;
 	unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
 	struct ocfs2_extent_rec *left_rec;
 	struct ocfs2_extent_rec *right_rec;
+	struct ocfs2_extent_list *el = path_leaf_el(right_path);
+	struct buffer_head *bh = path_leaf_bh(right_path);
+	struct buffer_head *root_bh = NULL;
+	struct ocfs2_path *left_path = NULL;
+	struct ocfs2_extent_list *left_el;
 
-	BUG_ON(index <= 0);
+	BUG_ON(index < 0);
 
-	left_rec = &el->l_recs[index - 1];
 	right_rec = &el->l_recs[index];
-	if (ocfs2_is_empty_extent(&el->l_recs[0]))
-		has_empty_extent = 1;
+	if (index == 0) {
+		/* we meet with a cross extent block merge. */
+		ret = ocfs2_get_left_path(inode, right_path, &left_path);
+		if (ret) {
+			mlog_errno(ret);
+			goto out;
+		}
+
+		left_el = path_leaf_el(left_path);
+		BUG_ON(le16_to_cpu(left_el->l_next_free_rec) !=
+		       le16_to_cpu(left_el->l_count));
+
+		left_rec = &left_el->l_recs[
+				le16_to_cpu(left_el->l_next_free_rec) - 1];
+		BUG_ON(le32_to_cpu(left_rec->e_cpos) +
+		       le16_to_cpu(left_rec->e_leaf_clusters) !=
+		       le32_to_cpu(split_rec->e_cpos));
+
+		subtree_index = ocfs2_find_subtree_root(inode,
+							left_path, right_path);
+
+		ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
+						      handle->h_buffer_credits,
+						      left_path);
+		if (ret) {
+			mlog_errno(ret);
+			goto out;
+		}
+
+		root_bh = left_path->p_node[subtree_index].bh;
+		BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
+
+		ret = ocfs2_journal_access(handle, inode, root_bh,
+					   OCFS2_JOURNAL_ACCESS_WRITE);
+		if (ret) {
+			mlog_errno(ret);
+			goto out;
+		}
+
+		for (i = subtree_index + 1;
+		     i < path_num_items(right_path); i++) {
+			ret = ocfs2_journal_access(handle, inode,
+						   right_path->p_node[i].bh,
+						   OCFS2_JOURNAL_ACCESS_WRITE);
+			if (ret) {
+				mlog_errno(ret);
+				goto out;
+			}
+
+			ret = ocfs2_journal_access(handle, inode,
+						   left_path->p_node[i].bh,
+						   OCFS2_JOURNAL_ACCESS_WRITE);
+			if (ret) {
+				mlog_errno(ret);
+				goto out;
+			}
+		}
+	} else {
+		left_rec = &el->l_recs[index - 1];
+		if (ocfs2_is_empty_extent(&el->l_recs[0]))
+			has_empty_extent = 1;
+	}
 
 	ret = ocfs2_journal_access(handle, inode, bh,
 				   OCFS2_JOURNAL_ACCESS_WRITE);
@@ -2790,9 +3040,8 @@
 		*left_rec = *split_rec;
 
 		has_empty_extent = 0;
-	} else {
+	} else
 		le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters);
-	}
 
 	le32_add_cpu(&right_rec->e_cpos, split_clusters);
 	le64_add_cpu(&right_rec->e_blkno,
@@ -2805,13 +3054,44 @@
 	if (ret)
 		mlog_errno(ret);
 
+	if (left_path) {
+		ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
+		if (ret)
+			mlog_errno(ret);
+
+		/*
+		 * In the situation that the right_rec is empty and the extent
+		 * block is empty also,  ocfs2_complete_edge_insert can't handle
+		 * it and we need to delete the right extent block.
+		 */
+		if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 &&
+		    le16_to_cpu(el->l_next_free_rec) == 1) {
+
+			ret = ocfs2_remove_rightmost_path(inode, handle,
+							  right_path, dealloc);
+			if (ret) {
+				mlog_errno(ret);
+				goto out;
+			}
+
+			/* Now the rightmost extent block has been deleted.
+			 * So we use the new rightmost path.
+			 */
+			ocfs2_mv_path(right_path, left_path);
+			left_path = NULL;
+		} else
+			ocfs2_complete_edge_insert(inode, handle, left_path,
+						   right_path, subtree_index);
+	}
 out:
+	if (left_path)
+		ocfs2_free_path(left_path);
 	return ret;
 }
 
 static int ocfs2_try_to_merge_extent(struct inode *inode,
 				     handle_t *handle,
-				     struct ocfs2_path *left_path,
+				     struct ocfs2_path *path,
 				     int split_index,
 				     struct ocfs2_extent_rec *split_rec,
 				     struct ocfs2_cached_dealloc_ctxt *dealloc,
@@ -2819,7 +3099,7 @@
 
 {
 	int ret = 0;
-	struct ocfs2_extent_list *el = path_leaf_el(left_path);
+	struct ocfs2_extent_list *el = path_leaf_el(path);
 	struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
 
 	BUG_ON(ctxt->c_contig_type == CONTIG_NONE);
@@ -2832,7 +3112,7 @@
 		 * extents - having more than one in a leaf is
 		 * illegal.
 		 */
-		ret = ocfs2_rotate_tree_left(inode, handle, left_path,
+		ret = ocfs2_rotate_tree_left(inode, handle, path,
 					     dealloc);
 		if (ret) {
 			mlog_errno(ret);
@@ -2847,7 +3127,6 @@
 		 * Left-right contig implies this.
 		 */
 		BUG_ON(!ctxt->c_split_covers_rec);
-		BUG_ON(split_index == 0);
 
 		/*
 		 * Since the leftright insert always covers the entire
@@ -2858,9 +3137,14 @@
 		 * Since the adding of an empty extent shifts
 		 * everything back to the right, there's no need to
 		 * update split_index here.
+		 *
+		 * When the split_index is zero, we need to merge it to the
+		 * prevoius extent block. It is more efficient and easier
+		 * if we do merge_right first and merge_left later.
 		 */
-		ret = ocfs2_merge_rec_left(inode, path_leaf_bh(left_path),
-					   handle, split_rec, el, split_index);
+		ret = ocfs2_merge_rec_right(inode, path,
+					    handle, split_rec,
+					    split_index);
 		if (ret) {
 			mlog_errno(ret);
 			goto out;
@@ -2871,32 +3155,30 @@
 		 */
 		BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0]));
 
-		/*
-		 * The left merge left us with an empty extent, remove
-		 * it.
-		 */
-		ret = ocfs2_rotate_tree_left(inode, handle, left_path, dealloc);
+		/* The merge left us with an empty extent, remove it. */
+		ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc);
 		if (ret) {
 			mlog_errno(ret);
 			goto out;
 		}
-		split_index--;
+
 		rec = &el->l_recs[split_index];
 
 		/*
 		 * Note that we don't pass split_rec here on purpose -
-		 * we've merged it into the left side.
+		 * we've merged it into the rec already.
 		 */
-		ret = ocfs2_merge_rec_right(inode, path_leaf_bh(left_path),
-					    handle, rec, el, split_index);
+		ret = ocfs2_merge_rec_left(inode, path,
+					   handle, rec,
+					   dealloc,
+					   split_index);
+
 		if (ret) {
 			mlog_errno(ret);
 			goto out;
 		}
 
-		BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0]));
-
-		ret = ocfs2_rotate_tree_left(inode, handle, left_path,
+		ret = ocfs2_rotate_tree_left(inode, handle, path,
 					     dealloc);
 		/*
 		 * Error from this last rotate is not critical, so
@@ -2915,8 +3197,9 @@
 		 */
 		if (ctxt->c_contig_type == CONTIG_RIGHT) {
 			ret = ocfs2_merge_rec_left(inode,
-						   path_leaf_bh(left_path),
-						   handle, split_rec, el,
+						   path,
+						   handle, split_rec,
+						   dealloc,
 						   split_index);
 			if (ret) {
 				mlog_errno(ret);
@@ -2924,8 +3207,8 @@
 			}
 		} else {
 			ret = ocfs2_merge_rec_right(inode,
-						    path_leaf_bh(left_path),
-						    handle, split_rec, el,
+						    path,
+						    handle, split_rec,
 						    split_index);
 			if (ret) {
 				mlog_errno(ret);
@@ -2938,7 +3221,7 @@
 			 * The merge may have left an empty extent in
 			 * our leaf. Try to rotate it away.
 			 */
-			ret = ocfs2_rotate_tree_left(inode, handle, left_path,
+			ret = ocfs2_rotate_tree_left(inode, handle, path,
 						     dealloc);
 			if (ret)
 				mlog_errno(ret);
@@ -3498,20 +3781,57 @@
 }
 
 static enum ocfs2_contig_type
-ocfs2_figure_merge_contig_type(struct inode *inode,
+ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
 			       struct ocfs2_extent_list *el, int index,
 			       struct ocfs2_extent_rec *split_rec)
 {
-	struct ocfs2_extent_rec *rec;
+	int status;
 	enum ocfs2_contig_type ret = CONTIG_NONE;
+	u32 left_cpos, right_cpos;
+	struct ocfs2_extent_rec *rec = NULL;
+	struct ocfs2_extent_list *new_el;
+	struct ocfs2_path *left_path = NULL, *right_path = NULL;
+	struct buffer_head *bh;
+	struct ocfs2_extent_block *eb;
+
+	if (index > 0) {
+		rec = &el->l_recs[index - 1];
+	} else if (path->p_tree_depth > 0) {
+		status = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
+						       path, &left_cpos);
+		if (status)
+			goto out;
+
+		if (left_cpos != 0) {
+			left_path = ocfs2_new_path(path_root_bh(path),
+						   path_root_el(path));
+			if (!left_path)
+				goto out;
+
+			status = ocfs2_find_path(inode, left_path, left_cpos);
+			if (status)
+				goto out;
+
+			new_el = path_leaf_el(left_path);
+
+			if (le16_to_cpu(new_el->l_next_free_rec) !=
+			    le16_to_cpu(new_el->l_count)) {
+				bh = path_leaf_bh(left_path);
+				eb = (struct ocfs2_extent_block *)bh->b_data;
+				OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb,
+								 eb);
+				goto out;
+			}
+			rec = &new_el->l_recs[
+				le16_to_cpu(new_el->l_next_free_rec) - 1];
+		}
+	}
 
 	/*
 	 * We're careful to check for an empty extent record here -
 	 * the merge code will know what to do if it sees one.
 	 */
-
-	if (index > 0) {
-		rec = &el->l_recs[index - 1];
+	if (rec) {
 		if (index == 1 && ocfs2_is_empty_extent(rec)) {
 			if (split_rec->e_cpos == el->l_recs[index].e_cpos)
 				ret = CONTIG_RIGHT;
@@ -3520,10 +3840,45 @@
 		}
 	}
 
-	if (index < (le16_to_cpu(el->l_next_free_rec) - 1)) {
+	rec = NULL;
+	if (index < (le16_to_cpu(el->l_next_free_rec) - 1))
+		rec = &el->l_recs[index + 1];
+	else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) &&
+		 path->p_tree_depth > 0) {
+		status = ocfs2_find_cpos_for_right_leaf(inode->i_sb,
+							path, &right_cpos);
+		if (status)
+			goto out;
+
+		if (right_cpos == 0)
+			goto out;
+
+		right_path = ocfs2_new_path(path_root_bh(path),
+					    path_root_el(path));
+		if (!right_path)
+			goto out;
+
+		status = ocfs2_find_path(inode, right_path, right_cpos);
+		if (status)
+			goto out;
+
+		new_el = path_leaf_el(right_path);
+		rec = &new_el->l_recs[0];
+		if (ocfs2_is_empty_extent(rec)) {
+			if (le16_to_cpu(new_el->l_next_free_rec) <= 1) {
+				bh = path_leaf_bh(right_path);
+				eb = (struct ocfs2_extent_block *)bh->b_data;
+				OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb,
+								 eb);
+				goto out;
+			}
+			rec = &new_el->l_recs[1];
+		}
+	}
+
+	if (rec) {
 		enum ocfs2_contig_type contig_type;
 
-		rec = &el->l_recs[index + 1];
 		contig_type = ocfs2_extent_contig(inode, rec, split_rec);
 
 		if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT)
@@ -3532,6 +3887,12 @@
 			ret = contig_type;
 	}
 
+out:
+	if (left_path)
+		ocfs2_free_path(left_path);
+	if (right_path)
+		ocfs2_free_path(right_path);
+
 	return ret;
 }
 
@@ -3994,7 +4355,7 @@
 		goto out;
 	}
 
-	ctxt.c_contig_type = ocfs2_figure_merge_contig_type(inode, el,
+	ctxt.c_contig_type = ocfs2_figure_merge_contig_type(inode, path, el,
 							    split_index,
 							    split_rec);
 
@@ -4788,6 +5149,8 @@
 	status = ocfs2_flush_truncate_log(osb);
 	if (status < 0)
 		mlog_errno(status);
+	else
+		ocfs2_init_inode_steal_slot(osb);
 
 	mlog_exit(status);
 }
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 90383ed..17964c0 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -467,11 +467,11 @@
 							 unsigned to)
 {
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-	handle_t *handle = NULL;
+	handle_t *handle;
 	int ret = 0;
 
 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
-	if (!handle) {
+	if (IS_ERR(handle)) {
 		ret = -ENOMEM;
 		mlog_errno(ret);
 		goto out;
@@ -487,7 +487,7 @@
 	}
 out:
 	if (ret) {
-		if (handle)
+		if (!IS_ERR(handle))
 			ocfs2_commit_trans(osb, handle);
 		handle = ERR_PTR(ret);
 	}
diff --git a/fs/ocfs2/cluster/Makefile b/fs/ocfs2/cluster/Makefile
index cdd162f..bc8c5e7 100644
--- a/fs/ocfs2/cluster/Makefile
+++ b/fs/ocfs2/cluster/Makefile
@@ -1,4 +1,4 @@
 obj-$(CONFIG_OCFS2_FS) += ocfs2_nodemanager.o
 
 ocfs2_nodemanager-objs := heartbeat.o masklog.o sys.o nodemanager.o \
-	quorum.o tcp.o ver.o
+	quorum.o tcp.o netdebug.o ver.o
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
new file mode 100644
index 0000000..7bf3c0e
--- /dev/null
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -0,0 +1,441 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * netdebug.c
+ *
+ * debug functionality for o2net
+ *
+ * Copyright (C) 2005, 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <linux/uaccess.h>
+
+#include "tcp.h"
+#include "nodemanager.h"
+#define MLOG_MASK_PREFIX ML_TCP
+#include "masklog.h"
+
+#include "tcp_internal.h"
+
+#define O2NET_DEBUG_DIR		"o2net"
+#define SC_DEBUG_NAME		"sock_containers"
+#define NST_DEBUG_NAME		"send_tracking"
+
+static struct dentry *o2net_dentry;
+static struct dentry *sc_dentry;
+static struct dentry *nst_dentry;
+
+static DEFINE_SPINLOCK(o2net_debug_lock);
+
+static LIST_HEAD(sock_containers);
+static LIST_HEAD(send_tracking);
+
+void o2net_debug_add_nst(struct o2net_send_tracking *nst)
+{
+	spin_lock(&o2net_debug_lock);
+	list_add(&nst->st_net_debug_item, &send_tracking);
+	spin_unlock(&o2net_debug_lock);
+}
+
+void o2net_debug_del_nst(struct o2net_send_tracking *nst)
+{
+	spin_lock(&o2net_debug_lock);
+	if (!list_empty(&nst->st_net_debug_item))
+		list_del_init(&nst->st_net_debug_item);
+	spin_unlock(&o2net_debug_lock);
+}
+
+static struct o2net_send_tracking
+			*next_nst(struct o2net_send_tracking *nst_start)
+{
+	struct o2net_send_tracking *nst, *ret = NULL;
+
+	assert_spin_locked(&o2net_debug_lock);
+
+	list_for_each_entry(nst, &nst_start->st_net_debug_item,
+			    st_net_debug_item) {
+		/* discover the head of the list */
+		if (&nst->st_net_debug_item == &send_tracking)
+			break;
+
+		/* use st_task to detect real nsts in the list */
+		if (nst->st_task != NULL) {
+			ret = nst;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void *nst_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct o2net_send_tracking *nst, *dummy_nst = seq->private;
+
+	spin_lock(&o2net_debug_lock);
+	nst = next_nst(dummy_nst);
+	spin_unlock(&o2net_debug_lock);
+
+	return nst;
+}
+
+static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct o2net_send_tracking *nst, *dummy_nst = seq->private;
+
+	spin_lock(&o2net_debug_lock);
+	nst = next_nst(dummy_nst);
+	list_del_init(&dummy_nst->st_net_debug_item);
+	if (nst)
+		list_add(&dummy_nst->st_net_debug_item,
+			 &nst->st_net_debug_item);
+	spin_unlock(&o2net_debug_lock);
+
+	return nst; /* unused, just needs to be null when done */
+}
+
+static int nst_seq_show(struct seq_file *seq, void *v)
+{
+	struct o2net_send_tracking *nst, *dummy_nst = seq->private;
+
+	spin_lock(&o2net_debug_lock);
+	nst = next_nst(dummy_nst);
+
+	if (nst != NULL) {
+		/* get_task_comm isn't exported.  oh well. */
+		seq_printf(seq, "%p:\n"
+			   "  pid:          %lu\n"
+			   "  tgid:         %lu\n"
+			   "  process name: %s\n"
+			   "  node:         %u\n"
+			   "  sc:           %p\n"
+			   "  message id:   %d\n"
+			   "  message type: %u\n"
+			   "  message key:  0x%08x\n"
+			   "  sock acquiry: %lu.%lu\n"
+			   "  send start:   %lu.%lu\n"
+			   "  wait start:   %lu.%lu\n",
+			   nst, (unsigned long)nst->st_task->pid,
+			   (unsigned long)nst->st_task->tgid,
+			   nst->st_task->comm, nst->st_node,
+			   nst->st_sc, nst->st_id, nst->st_msg_type,
+			   nst->st_msg_key,
+			   nst->st_sock_time.tv_sec, nst->st_sock_time.tv_usec,
+			   nst->st_send_time.tv_sec, nst->st_send_time.tv_usec,
+			   nst->st_status_time.tv_sec,
+			   nst->st_status_time.tv_usec);
+	}
+
+	spin_unlock(&o2net_debug_lock);
+
+	return 0;
+}
+
+static void nst_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static struct seq_operations nst_seq_ops = {
+	.start = nst_seq_start,
+	.next = nst_seq_next,
+	.stop = nst_seq_stop,
+	.show = nst_seq_show,
+};
+
+static int nst_fop_open(struct inode *inode, struct file *file)
+{
+	struct o2net_send_tracking *dummy_nst;
+	struct seq_file *seq;
+	int ret;
+
+	dummy_nst = kmalloc(sizeof(struct o2net_send_tracking), GFP_KERNEL);
+	if (dummy_nst == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	dummy_nst->st_task = NULL;
+
+	ret = seq_open(file, &nst_seq_ops);
+	if (ret)
+		goto out;
+
+	seq = file->private_data;
+	seq->private = dummy_nst;
+	o2net_debug_add_nst(dummy_nst);
+
+	dummy_nst = NULL;
+
+out:
+	kfree(dummy_nst);
+	return ret;
+}
+
+static int nst_fop_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	struct o2net_send_tracking *dummy_nst = seq->private;
+
+	o2net_debug_del_nst(dummy_nst);
+	return seq_release_private(inode, file);
+}
+
+static struct file_operations nst_seq_fops = {
+	.open = nst_fop_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = nst_fop_release,
+};
+
+void o2net_debug_add_sc(struct o2net_sock_container *sc)
+{
+	spin_lock(&o2net_debug_lock);
+	list_add(&sc->sc_net_debug_item, &sock_containers);
+	spin_unlock(&o2net_debug_lock);
+}
+
+void o2net_debug_del_sc(struct o2net_sock_container *sc)
+{
+	spin_lock(&o2net_debug_lock);
+	list_del_init(&sc->sc_net_debug_item);
+	spin_unlock(&o2net_debug_lock);
+}
+
+static struct o2net_sock_container
+			*next_sc(struct o2net_sock_container *sc_start)
+{
+	struct o2net_sock_container *sc, *ret = NULL;
+
+	assert_spin_locked(&o2net_debug_lock);
+
+	list_for_each_entry(sc, &sc_start->sc_net_debug_item,
+			    sc_net_debug_item) {
+		/* discover the head of the list miscast as a sc */
+		if (&sc->sc_net_debug_item == &sock_containers)
+			break;
+
+		/* use sc_page to detect real scs in the list */
+		if (sc->sc_page != NULL) {
+			ret = sc;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+
+	spin_lock(&o2net_debug_lock);
+	sc = next_sc(dummy_sc);
+	spin_unlock(&o2net_debug_lock);
+
+	return sc;
+}
+
+static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+
+	spin_lock(&o2net_debug_lock);
+	sc = next_sc(dummy_sc);
+	list_del_init(&dummy_sc->sc_net_debug_item);
+	if (sc)
+		list_add(&dummy_sc->sc_net_debug_item, &sc->sc_net_debug_item);
+	spin_unlock(&o2net_debug_lock);
+
+	return sc; /* unused, just needs to be null when done */
+}
+
+#define TV_SEC_USEC(TV) TV.tv_sec, TV.tv_usec
+
+static int sc_seq_show(struct seq_file *seq, void *v)
+{
+	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+
+	spin_lock(&o2net_debug_lock);
+	sc = next_sc(dummy_sc);
+
+	if (sc != NULL) {
+		struct inet_sock *inet = NULL;
+
+		__be32 saddr = 0, daddr = 0;
+		__be16 sport = 0, dport = 0;
+
+		if (sc->sc_sock) {
+			inet = inet_sk(sc->sc_sock->sk);
+			/* the stack's structs aren't sparse endian clean */
+			saddr = (__force __be32)inet->saddr;
+			daddr = (__force __be32)inet->daddr;
+			sport = (__force __be16)inet->sport;
+			dport = (__force __be16)inet->dport;
+		}
+
+		/* XXX sigh, inet-> doesn't have sparse annotation so any
+		 * use of it here generates a warning with -Wbitwise */
+		seq_printf(seq, "%p:\n"
+			   "  krefs:           %d\n"
+			   "  sock:            %u.%u.%u.%u:%u -> "
+					      "%u.%u.%u.%u:%u\n"
+			   "  remote node:     %s\n"
+			   "  page off:        %zu\n"
+			   "  handshake ok:    %u\n"
+			   "  timer:           %lu.%lu\n"
+			   "  data ready:      %lu.%lu\n"
+			   "  advance start:   %lu.%lu\n"
+			   "  advance stop:    %lu.%lu\n"
+			   "  func start:      %lu.%lu\n"
+			   "  func stop:       %lu.%lu\n"
+			   "  func key:        %u\n"
+			   "  func type:       %u\n",
+			   sc,
+			   atomic_read(&sc->sc_kref.refcount),
+			   NIPQUAD(saddr), inet ? ntohs(sport) : 0,
+			   NIPQUAD(daddr), inet ? ntohs(dport) : 0,
+			   sc->sc_node->nd_name,
+			   sc->sc_page_off,
+			   sc->sc_handshake_ok,
+			   TV_SEC_USEC(sc->sc_tv_timer),
+			   TV_SEC_USEC(sc->sc_tv_data_ready),
+			   TV_SEC_USEC(sc->sc_tv_advance_start),
+			   TV_SEC_USEC(sc->sc_tv_advance_stop),
+			   TV_SEC_USEC(sc->sc_tv_func_start),
+			   TV_SEC_USEC(sc->sc_tv_func_stop),
+			   sc->sc_msg_key,
+			   sc->sc_msg_type);
+	}
+
+
+	spin_unlock(&o2net_debug_lock);
+
+	return 0;
+}
+
+static void sc_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static struct seq_operations sc_seq_ops = {
+	.start = sc_seq_start,
+	.next = sc_seq_next,
+	.stop = sc_seq_stop,
+	.show = sc_seq_show,
+};
+
+static int sc_fop_open(struct inode *inode, struct file *file)
+{
+	struct o2net_sock_container *dummy_sc;
+	struct seq_file *seq;
+	int ret;
+
+	dummy_sc = kmalloc(sizeof(struct o2net_sock_container), GFP_KERNEL);
+	if (dummy_sc == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	dummy_sc->sc_page = NULL;
+
+	ret = seq_open(file, &sc_seq_ops);
+	if (ret)
+		goto out;
+
+	seq = file->private_data;
+	seq->private = dummy_sc;
+	o2net_debug_add_sc(dummy_sc);
+
+	dummy_sc = NULL;
+
+out:
+	kfree(dummy_sc);
+	return ret;
+}
+
+static int sc_fop_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	struct o2net_sock_container *dummy_sc = seq->private;
+
+	o2net_debug_del_sc(dummy_sc);
+	return seq_release_private(inode, file);
+}
+
+static struct file_operations sc_seq_fops = {
+	.open = sc_fop_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = sc_fop_release,
+};
+
+int o2net_debugfs_init(void)
+{
+	o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
+	if (!o2net_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
+	nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR,
+					 o2net_dentry, NULL,
+					 &nst_seq_fops);
+	if (!nst_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
+	sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR,
+					o2net_dentry, NULL,
+					&sc_seq_fops);
+	if (!sc_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
+	return 0;
+bail:
+	if (sc_dentry)
+		debugfs_remove(sc_dentry);
+	if (nst_dentry)
+		debugfs_remove(nst_dentry);
+	if (o2net_dentry)
+		debugfs_remove(o2net_dentry);
+	return -ENOMEM;
+}
+
+void o2net_debugfs_exit(void)
+{
+	if (sc_dentry)
+		debugfs_remove(sc_dentry);
+	if (nst_dentry)
+		debugfs_remove(nst_dentry);
+	if (o2net_dentry)
+		debugfs_remove(o2net_dentry);
+}
+
+#endif	/* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 709fba2..cf9401e 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -959,7 +959,10 @@
 	cluster_print_version();
 
 	o2hb_init();
-	o2net_init();
+
+	ret = o2net_init();
+	if (ret)
+		goto out;
 
 	ocfs2_table_header = register_sysctl_table(ocfs2_root_table);
 	if (!ocfs2_table_header) {
diff --git a/fs/ocfs2/cluster/sys.c b/fs/ocfs2/cluster/sys.c
index 0c095ce..98429fd 100644
--- a/fs/ocfs2/cluster/sys.c
+++ b/fs/ocfs2/cluster/sys.c
@@ -57,6 +57,7 @@
 void o2cb_sys_shutdown(void)
 {
 	mlog_sys_shutdown();
+	sysfs_remove_link(NULL, "o2cb");
 	kset_unregister(o2cb_kset);
 }
 
@@ -68,6 +69,14 @@
 	if (!o2cb_kset)
 		return -ENOMEM;
 
+	/*
+	 * Create this symlink for backwards compatibility with old
+	 * versions of ocfs2-tools which look for things in /sys/o2cb.
+	 */
+	ret = sysfs_create_link(NULL, &o2cb_kset->kobj, "o2cb");
+	if (ret)
+		goto error;
+
 	ret = sysfs_create_group(&o2cb_kset->kobj, &o2cb_attr_group);
 	if (ret)
 		goto error;
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index b8057c5..1e44ad1 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -142,23 +142,65 @@
 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
 static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
 
-/*
- * FIXME: These should use to_o2nm_cluster_from_node(), but we end up
- * losing our parent link to the cluster during shutdown. This can be
- * solved by adding a pre-removal callback to configfs, or passing
- * around the cluster with the node. -jeffm
- */
-static inline int o2net_reconnect_delay(struct o2nm_node *node)
+static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
+			   u32 msgkey, struct task_struct *task, u8 node)
+{
+#ifdef CONFIG_DEBUG_FS
+	INIT_LIST_HEAD(&nst->st_net_debug_item);
+	nst->st_task = task;
+	nst->st_msg_type = msgtype;
+	nst->st_msg_key = msgkey;
+	nst->st_node = node;
+#endif
+}
+
+static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
+{
+#ifdef CONFIG_DEBUG_FS
+	do_gettimeofday(&nst->st_sock_time);
+#endif
+}
+
+static void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
+{
+#ifdef CONFIG_DEBUG_FS
+	do_gettimeofday(&nst->st_send_time);
+#endif
+}
+
+static void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
+{
+#ifdef CONFIG_DEBUG_FS
+	do_gettimeofday(&nst->st_status_time);
+#endif
+}
+
+static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
+					 struct o2net_sock_container *sc)
+{
+#ifdef CONFIG_DEBUG_FS
+	nst->st_sc = sc;
+#endif
+}
+
+static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id)
+{
+#ifdef CONFIG_DEBUG_FS
+	nst->st_id = msg_id;
+#endif
+}
+
+static inline int o2net_reconnect_delay(void)
 {
 	return o2nm_single_cluster->cl_reconnect_delay_ms;
 }
 
-static inline int o2net_keepalive_delay(struct o2nm_node *node)
+static inline int o2net_keepalive_delay(void)
 {
 	return o2nm_single_cluster->cl_keepalive_delay_ms;
 }
 
-static inline int o2net_idle_timeout(struct o2nm_node *node)
+static inline int o2net_idle_timeout(void)
 {
 	return o2nm_single_cluster->cl_idle_timeout_ms;
 }
@@ -296,6 +338,7 @@
 	o2nm_node_put(sc->sc_node);
 	sc->sc_node = NULL;
 
+	o2net_debug_del_sc(sc);
 	kfree(sc);
 }
 
@@ -336,6 +379,7 @@
 
 	ret = sc;
 	sc->sc_page = page;
+	o2net_debug_add_sc(sc);
 	sc = NULL;
 	page = NULL;
 
@@ -399,8 +443,6 @@
 	mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
 	mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
 
-	/* we won't reconnect after our valid conn goes away for
-	 * this hb iteration.. here so it shows up in the logs */
 	if (was_valid && !valid && err == 0)
 		err = -ENOTCONN;
 
@@ -430,11 +472,6 @@
 
 	if (!was_valid && valid) {
 		o2quo_conn_up(o2net_num_from_nn(nn));
-		/* this is a bit of a hack.  we only try reconnecting
-		 * when heartbeating starts until we get a connection.
-		 * if that connection then dies we don't try reconnecting.
-		 * the only way to start connecting again is to down
-		 * heartbeat and bring it back up. */
 		cancel_delayed_work(&nn->nn_connect_expired);
 		printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n",
 		       o2nm_this_node() > sc->sc_node->nd_num ?
@@ -451,12 +488,24 @@
 		/* delay if we're withing a RECONNECT_DELAY of the
 		 * last attempt */
 		delay = (nn->nn_last_connect_attempt +
-			 msecs_to_jiffies(o2net_reconnect_delay(NULL)))
+			 msecs_to_jiffies(o2net_reconnect_delay()))
 			- jiffies;
-		if (delay > msecs_to_jiffies(o2net_reconnect_delay(NULL)))
+		if (delay > msecs_to_jiffies(o2net_reconnect_delay()))
 			delay = 0;
 		mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
 		queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay);
+
+		/*
+		 * Delay the expired work after idle timeout.
+		 *
+		 * We might have lots of failed connection attempts that run
+		 * through here but we only cancel the connect_expired work when
+		 * a connection attempt succeeds.  So only the first enqueue of
+		 * the connect_expired work will do anything.  The rest will see
+		 * that it's already queued and do nothing.
+		 */
+		delay += msecs_to_jiffies(o2net_idle_timeout());
+		queue_delayed_work(o2net_wq, &nn->nn_connect_expired, delay);
 	}
 
 	/* keep track of the nn's sc ref for the caller */
@@ -914,6 +963,9 @@
 	struct o2net_status_wait nsw = {
 		.ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
 	};
+	struct o2net_send_tracking nst;
+
+	o2net_init_nst(&nst, msg_type, key, current, target_node);
 
 	if (o2net_wq == NULL) {
 		mlog(0, "attempt to tx without o2netd running\n");
@@ -939,6 +991,10 @@
 		goto out;
 	}
 
+	o2net_debug_add_nst(&nst);
+
+	o2net_set_nst_sock_time(&nst);
+
 	ret = wait_event_interruptible(nn->nn_sc_wq,
 				       o2net_tx_can_proceed(nn, &sc, &error));
 	if (!ret && error)
@@ -946,6 +1002,8 @@
 	if (ret)
 		goto out;
 
+	o2net_set_nst_sock_container(&nst, sc);
+
 	veclen = caller_veclen + 1;
 	vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
 	if (vec == NULL) {
@@ -972,6 +1030,9 @@
 		goto out;
 
 	msg->msg_num = cpu_to_be32(nsw.ns_id);
+	o2net_set_nst_msg_id(&nst, nsw.ns_id);
+
+	o2net_set_nst_send_time(&nst);
 
 	/* finally, convert the message header to network byte-order
 	 * and send */
@@ -986,6 +1047,7 @@
 	}
 
 	/* wait on other node's handler */
+	o2net_set_nst_status_time(&nst);
 	wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
 
 	/* Note that we avoid overwriting the callers status return
@@ -998,6 +1060,7 @@
 	mlog(0, "woken, returning system status %d, user status %d\n",
 	     ret, nsw.ns_status);
 out:
+	o2net_debug_del_nst(&nst); /* must be before dropping sc and node */
 	if (sc)
 		sc_put(sc);
 	if (vec)
@@ -1154,23 +1217,23 @@
 	 * but isn't. This can ultimately cause corruption.
 	 */
 	if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
-				o2net_idle_timeout(sc->sc_node)) {
+				o2net_idle_timeout()) {
 		mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of "
 		     "%u ms, but we use %u ms locally.  disconnecting\n",
 		     SC_NODEF_ARGS(sc),
 		     be32_to_cpu(hand->o2net_idle_timeout_ms),
-		     o2net_idle_timeout(sc->sc_node));
+		     o2net_idle_timeout());
 		o2net_ensure_shutdown(nn, sc, -ENOTCONN);
 		return -1;
 	}
 
 	if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
-			o2net_keepalive_delay(sc->sc_node)) {
+			o2net_keepalive_delay()) {
 		mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of "
 		     "%u ms, but we use %u ms locally.  disconnecting\n",
 		     SC_NODEF_ARGS(sc),
 		     be32_to_cpu(hand->o2net_keepalive_delay_ms),
-		     o2net_keepalive_delay(sc->sc_node));
+		     o2net_keepalive_delay());
 		o2net_ensure_shutdown(nn, sc, -ENOTCONN);
 		return -1;
 	}
@@ -1193,6 +1256,7 @@
 	 * shut down already */
 	if (nn->nn_sc == sc) {
 		o2net_sc_reset_idle_timer(sc);
+		atomic_set(&nn->nn_timeout, 0);
 		o2net_set_nn_state(nn, sc, 1, 0);
 	}
 	spin_unlock(&nn->nn_lock);
@@ -1347,12 +1411,11 @@
 {
 	o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32(
 		O2HB_MAX_WRITE_TIMEOUT_MS);
-	o2net_hand->o2net_idle_timeout_ms = cpu_to_be32(
-		o2net_idle_timeout(NULL));
+	o2net_hand->o2net_idle_timeout_ms = cpu_to_be32(o2net_idle_timeout());
 	o2net_hand->o2net_keepalive_delay_ms = cpu_to_be32(
-		o2net_keepalive_delay(NULL));
+		o2net_keepalive_delay());
 	o2net_hand->o2net_reconnect_delay_ms = cpu_to_be32(
-		o2net_reconnect_delay(NULL));
+		o2net_reconnect_delay());
 }
 
 /* ------------------------------------------------------------ */
@@ -1391,14 +1454,15 @@
 static void o2net_idle_timer(unsigned long data)
 {
 	struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
+	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
 	struct timeval now;
 
 	do_gettimeofday(&now);
 
 	printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
 	     "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
-		     o2net_idle_timeout(sc->sc_node) / 1000,
-		     o2net_idle_timeout(sc->sc_node) % 1000);
+		     o2net_idle_timeout() / 1000,
+		     o2net_idle_timeout() % 1000);
 	mlog(ML_NOTICE, "here are some times that might help debug the "
 	     "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
 	     "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
@@ -1413,6 +1477,12 @@
 	     sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec,
 	     sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec);
 
+	/*
+	 * Initialize the nn_timeout so that the next connection attempt
+	 * will continue in o2net_start_connect.
+	 */
+	atomic_set(&nn->nn_timeout, 1);
+
 	o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
 }
 
@@ -1420,10 +1490,10 @@
 {
 	o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
 	o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
-		      msecs_to_jiffies(o2net_keepalive_delay(sc->sc_node)));
+		      msecs_to_jiffies(o2net_keepalive_delay()));
 	do_gettimeofday(&sc->sc_tv_timer);
 	mod_timer(&sc->sc_idle_timeout,
-	       jiffies + msecs_to_jiffies(o2net_idle_timeout(sc->sc_node)));
+	       jiffies + msecs_to_jiffies(o2net_idle_timeout()));
 }
 
 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
@@ -1447,6 +1517,7 @@
 	struct socket *sock = NULL;
 	struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
 	int ret = 0, stop;
+	unsigned int timeout;
 
 	/* if we're greater we initiate tx, otherwise we accept */
 	if (o2nm_this_node() <= o2net_num_from_nn(nn))
@@ -1466,8 +1537,17 @@
 	}
 
 	spin_lock(&nn->nn_lock);
-	/* see if we already have one pending or have given up */
-	stop = (nn->nn_sc || nn->nn_persistent_error);
+	/*
+	 * see if we already have one pending or have given up.
+	 * For nn_timeout, it is set when we close the connection
+	 * because of the idle time out. So it means that we have
+	 * at least connected to that node successfully once,
+	 * now try to connect to it again.
+	 */
+	timeout = atomic_read(&nn->nn_timeout);
+	stop = (nn->nn_sc ||
+		(nn->nn_persistent_error &&
+		(nn->nn_persistent_error != -ENOTCONN || timeout == 0)));
 	spin_unlock(&nn->nn_lock);
 	if (stop)
 		goto out;
@@ -1555,8 +1635,8 @@
 		mlog(ML_ERROR, "no connection established with node %u after "
 		     "%u.%u seconds, giving up and returning errors.\n",
 		     o2net_num_from_nn(nn),
-		     o2net_idle_timeout(NULL) / 1000,
-		     o2net_idle_timeout(NULL) % 1000);
+		     o2net_idle_timeout() / 1000,
+		     o2net_idle_timeout() % 1000);
 
 		o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
 	}
@@ -1579,6 +1659,7 @@
 
 	/* don't reconnect until it's heartbeating again */
 	spin_lock(&nn->nn_lock);
+	atomic_set(&nn->nn_timeout, 0);
 	o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
 	spin_unlock(&nn->nn_lock);
 
@@ -1610,20 +1691,15 @@
 
 	/* ensure an immediate connect attempt */
 	nn->nn_last_connect_attempt = jiffies -
-		(msecs_to_jiffies(o2net_reconnect_delay(node)) + 1);
+		(msecs_to_jiffies(o2net_reconnect_delay()) + 1);
 
 	if (node_num != o2nm_this_node()) {
-		/* heartbeat doesn't work unless a local node number is
-		 * configured and doing so brings up the o2net_wq, so we can
-		 * use it.. */
-		queue_delayed_work(o2net_wq, &nn->nn_connect_expired,
-		                   msecs_to_jiffies(o2net_idle_timeout(node)));
-
 		/* believe it or not, accept and node hearbeating testing
 		 * can succeed for this node before we got here.. so
 		 * only use set_nn_state to clear the persistent error
 		 * if that hasn't already happened */
 		spin_lock(&nn->nn_lock);
+		atomic_set(&nn->nn_timeout, 0);
 		if (nn->nn_persistent_error)
 			o2net_set_nn_state(nn, NULL, 0, 0);
 		spin_unlock(&nn->nn_lock);
@@ -1747,6 +1823,7 @@
 	new_sock = NULL;
 
 	spin_lock(&nn->nn_lock);
+	atomic_set(&nn->nn_timeout, 0);
 	o2net_set_nn_state(nn, sc, 0, 0);
 	spin_unlock(&nn->nn_lock);
 
@@ -1922,6 +1999,9 @@
 
 	o2quo_init();
 
+	if (o2net_debugfs_init())
+		return -ENOMEM;
+
 	o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL);
 	o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
 	o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
@@ -1941,6 +2021,7 @@
 	for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
 		struct o2net_node *nn = o2net_nn_from_num(i);
 
+		atomic_set(&nn->nn_timeout, 0);
 		spin_lock_init(&nn->nn_lock);
 		INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
 		INIT_DELAYED_WORK(&nn->nn_connect_expired,
@@ -1962,4 +2043,5 @@
 	kfree(o2net_hand);
 	kfree(o2net_keep_req);
 	kfree(o2net_keep_resp);
+	o2net_debugfs_exit();
 }
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index f36f66a..a705d5d 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -117,4 +117,36 @@
 int o2net_init(void);
 void o2net_exit(void);
 
+struct o2net_send_tracking;
+struct o2net_sock_container;
+
+#ifdef CONFIG_DEBUG_FS
+int o2net_debugfs_init(void);
+void o2net_debugfs_exit(void);
+void o2net_debug_add_nst(struct o2net_send_tracking *nst);
+void o2net_debug_del_nst(struct o2net_send_tracking *nst);
+void o2net_debug_add_sc(struct o2net_sock_container *sc);
+void o2net_debug_del_sc(struct o2net_sock_container *sc);
+#else
+static int o2net_debugfs_init(void)
+{
+	return 0;
+}
+static void o2net_debugfs_exit(void)
+{
+}
+static void o2net_debug_add_nst(struct o2net_send_tracking *nst)
+{
+}
+static void o2net_debug_del_nst(struct o2net_send_tracking *nst)
+{
+}
+static void o2net_debug_add_sc(struct o2net_sock_container *sc)
+{
+}
+static void o2net_debug_del_sc(struct o2net_sock_container *sc)
+{
+}
+#endif	/* CONFIG_DEBUG_FS */
+
 #endif /* O2CLUSTER_TCP_H */
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index d25b9af..8d58cfe 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -95,6 +95,8 @@
 	unsigned			nn_sc_valid:1;
 	/* if this is set tx just returns it */
 	int				nn_persistent_error;
+	/* It is only set to 1 after the idle time out. */
+	atomic_t			nn_timeout;
 
 	/* threads waiting for an sc to arrive wait on the wq for generation
 	 * to increase.  it is increased when a connecting socket succeeds
@@ -164,7 +166,9 @@
 	/* original handlers for the sockets */
 	void			(*sc_state_change)(struct sock *sk);
 	void			(*sc_data_ready)(struct sock *sk, int bytes);
-
+#ifdef CONFIG_DEBUG_FS
+	struct list_head        sc_net_debug_item;
+#endif
 	struct timeval 		sc_tv_timer;
 	struct timeval 		sc_tv_data_ready;
 	struct timeval 		sc_tv_advance_start;
@@ -206,4 +210,24 @@
 	struct list_head	ns_node_item;
 };
 
+#ifdef CONFIG_DEBUG_FS
+/* just for state dumps */
+struct o2net_send_tracking {
+	struct list_head		st_net_debug_item;
+	struct task_struct		*st_task;
+	struct o2net_sock_container	*st_sc;
+	u32				st_id;
+	u32				st_msg_type;
+	u32				st_msg_key;
+	u8				st_node;
+	struct timeval			st_sock_time;
+	struct timeval			st_send_time;
+	struct timeval			st_status_time;
+};
+#else
+struct o2net_send_tracking {
+	u32	dummy;
+};
+#endif	/* CONFIG_DEBUG_FS */
+
 #endif /* O2CLUSTER_TCP_INTERNAL_H */
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index ce3f7c2..1903613 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,6 +1,6 @@
 EXTRA_CFLAGS += -Ifs/ocfs2
 
-obj-$(CONFIG_OCFS2_FS) += ocfs2_dlm.o ocfs2_dlmfs.o
+obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlmfs.o
 
 ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
 	dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index dc8ea66..d5a86fb 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -49,6 +49,41 @@
 /* Intended to make it easier for us to switch out hash functions */
 #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
 
+enum dlm_mle_type {
+	DLM_MLE_BLOCK,
+	DLM_MLE_MASTER,
+	DLM_MLE_MIGRATION
+};
+
+struct dlm_lock_name {
+	u8 len;
+	u8 name[DLM_LOCKID_NAME_MAX];
+};
+
+struct dlm_master_list_entry {
+	struct list_head list;
+	struct list_head hb_events;
+	struct dlm_ctxt *dlm;
+	spinlock_t spinlock;
+	wait_queue_head_t wq;
+	atomic_t woken;
+	struct kref mle_refs;
+	int inuse;
+	unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+	unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+	unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+	unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+	u8 master;
+	u8 new_master;
+	enum dlm_mle_type type;
+	struct o2hb_callback_func mle_hb_up;
+	struct o2hb_callback_func mle_hb_down;
+	union {
+		struct dlm_lock_resource *res;
+		struct dlm_lock_name name;
+	} u;
+};
+
 enum dlm_ast_type {
 	DLM_AST = 0,
 	DLM_BAST,
@@ -101,6 +136,7 @@
 	struct list_head purge_list;
 	struct list_head pending_asts;
 	struct list_head pending_basts;
+	struct list_head tracking_list;
 	unsigned int purge_count;
 	spinlock_t spinlock;
 	spinlock_t ast_lock;
@@ -122,6 +158,9 @@
 	atomic_t remote_resources;
 	atomic_t unknown_resources;
 
+	struct dlm_debug_ctxt *dlm_debug_ctxt;
+	struct dentry *dlm_debugfs_subroot;
+
 	/* NOTE: Next three are protected by dlm_domain_lock */
 	struct kref dlm_refs;
 	enum dlm_ctxt_state dlm_state;
@@ -270,6 +309,9 @@
 	struct list_head dirty;
 	struct list_head recovering; // dlm_recovery_ctxt.resources list
 
+	/* Added during init and removed during release */
+	struct list_head tracking;	/* dlm->tracking_list */
+
 	/* unused lock resources have their last_used stamped and are
 	 * put on a list for the dlm thread to run. */
 	unsigned long    last_used;
@@ -963,9 +1005,16 @@
 					  DLM_LOCK_RES_MIGRATING));
 }
 
+/* create/destroy slab caches */
+int dlm_init_master_caches(void);
+void dlm_destroy_master_caches(void);
+
+int dlm_init_lock_cache(void);
+void dlm_destroy_lock_cache(void);
 
 int dlm_init_mle_cache(void);
 void dlm_destroy_mle_cache(void);
+
 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
 			 struct dlm_lock_resource *res);
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 64239b3..5f6d8587 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -5,7 +5,7 @@
  *
  * debug functionality for the dlm
  *
- * Copyright (C) 2004 Oracle.  All rights reserved.
+ * Copyright (C) 2004, 2008 Oracle.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
@@ -30,6 +30,7 @@
 #include <linux/utsname.h>
 #include <linux/sysctl.h>
 #include <linux/spinlock.h>
+#include <linux/debugfs.h>
 
 #include "cluster/heartbeat.h"
 #include "cluster/nodemanager.h"
@@ -37,17 +38,16 @@
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
-
 #include "dlmdomain.h"
+#include "dlmdebug.h"
 
 #define MLOG_MASK_PREFIX ML_DLM
 #include "cluster/masklog.h"
 
+int stringify_lockname(const char *lockname, int locklen, char *buf, int len);
+
 void dlm_print_one_lock_resource(struct dlm_lock_resource *res)
 {
-	mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n",
-	       res->lockname.len, res->lockname.name,
-	       res->owner, res->state);
 	spin_lock(&res->spinlock);
 	__dlm_print_one_lock_resource(res);
 	spin_unlock(&res->spinlock);
@@ -58,7 +58,7 @@
 	int bit;
 	assert_spin_locked(&res->spinlock);
 
-	mlog(ML_NOTICE, "  refmap nodes: [ ");
+	printk("  refmap nodes: [ ");
 	bit = 0;
 	while (1) {
 		bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
@@ -70,63 +70,66 @@
 	printk("], inflight=%u\n", res->inflight_locks);
 }
 
+static void __dlm_print_lock(struct dlm_lock *lock)
+{
+	spin_lock(&lock->spinlock);
+
+	printk("    type=%d, conv=%d, node=%u, cookie=%u:%llu, "
+	       "ref=%u, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c), "
+	       "pending=(conv=%c,lock=%c,cancel=%c,unlock=%c)\n",
+	       lock->ml.type, lock->ml.convert_type, lock->ml.node,
+	       dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	       dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+	       atomic_read(&lock->lock_refs.refcount),
+	       (list_empty(&lock->ast_list) ? 'y' : 'n'),
+	       (lock->ast_pending ? 'y' : 'n'),
+	       (list_empty(&lock->bast_list) ? 'y' : 'n'),
+	       (lock->bast_pending ? 'y' : 'n'),
+	       (lock->convert_pending ? 'y' : 'n'),
+	       (lock->lock_pending ? 'y' : 'n'),
+	       (lock->cancel_pending ? 'y' : 'n'),
+	       (lock->unlock_pending ? 'y' : 'n'));
+
+	spin_unlock(&lock->spinlock);
+}
+
 void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
 {
 	struct list_head *iter2;
 	struct dlm_lock *lock;
+	char buf[DLM_LOCKID_NAME_MAX];
 
 	assert_spin_locked(&res->spinlock);
 
-	mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n",
-	       res->lockname.len, res->lockname.name,
-	       res->owner, res->state);
-	mlog(ML_NOTICE, "  last used: %lu, on purge list: %s\n",
-	     res->last_used, list_empty(&res->purge) ? "no" : "yes");
+	stringify_lockname(res->lockname.name, res->lockname.len,
+			   buf, sizeof(buf) - 1);
+	printk("lockres: %s, owner=%u, state=%u\n",
+	       buf, res->owner, res->state);
+	printk("  last used: %lu, refcnt: %u, on purge list: %s\n",
+	       res->last_used, atomic_read(&res->refs.refcount),
+	       list_empty(&res->purge) ? "no" : "yes");
+	printk("  on dirty list: %s, on reco list: %s, "
+	       "migrating pending: %s\n",
+	       list_empty(&res->dirty) ? "no" : "yes",
+	       list_empty(&res->recovering) ? "no" : "yes",
+	       res->migration_pending ? "yes" : "no");
+	printk("  inflight locks: %d, asts reserved: %d\n",
+	       res->inflight_locks, atomic_read(&res->asts_reserved));
 	dlm_print_lockres_refmap(res);
-	mlog(ML_NOTICE, "  granted queue: \n");
+	printk("  granted queue:\n");
 	list_for_each(iter2, &res->granted) {
 		lock = list_entry(iter2, struct dlm_lock, list);
-		spin_lock(&lock->spinlock);
-		mlog(ML_NOTICE, "    type=%d, conv=%d, node=%u, "
-		       "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", 
-		       lock->ml.type, lock->ml.convert_type, lock->ml.node, 
-		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
-		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
-		       list_empty(&lock->ast_list) ? 'y' : 'n',
-		       lock->ast_pending ? 'y' : 'n',
-		       list_empty(&lock->bast_list) ? 'y' : 'n',
-		       lock->bast_pending ? 'y' : 'n');
-		spin_unlock(&lock->spinlock);
+		__dlm_print_lock(lock);
 	}
-	mlog(ML_NOTICE, "  converting queue: \n");
+	printk("  converting queue:\n");
 	list_for_each(iter2, &res->converting) {
 		lock = list_entry(iter2, struct dlm_lock, list);
-		spin_lock(&lock->spinlock);
-		mlog(ML_NOTICE, "    type=%d, conv=%d, node=%u, "
-		       "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", 
-		       lock->ml.type, lock->ml.convert_type, lock->ml.node, 
-		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
-		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
-		       list_empty(&lock->ast_list) ? 'y' : 'n',
-		       lock->ast_pending ? 'y' : 'n',
-		       list_empty(&lock->bast_list) ? 'y' : 'n',
-		       lock->bast_pending ? 'y' : 'n');
-		spin_unlock(&lock->spinlock);
+		__dlm_print_lock(lock);
 	}
-	mlog(ML_NOTICE, "  blocked queue: \n");
+	printk("  blocked queue:\n");
 	list_for_each(iter2, &res->blocked) {
 		lock = list_entry(iter2, struct dlm_lock, list);
-		spin_lock(&lock->spinlock);
-		mlog(ML_NOTICE, "    type=%d, conv=%d, node=%u, "
-		       "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", 
-		       lock->ml.type, lock->ml.convert_type, lock->ml.node, 
-		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
-		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
-		       list_empty(&lock->ast_list) ? 'y' : 'n',
-		       lock->ast_pending ? 'y' : 'n',
-		       list_empty(&lock->bast_list) ? 'y' : 'n',
-		       lock->bast_pending ? 'y' : 'n');
-		spin_unlock(&lock->spinlock);
+		__dlm_print_lock(lock);
 	}
 }
 
@@ -136,31 +139,6 @@
 }
 EXPORT_SYMBOL_GPL(dlm_print_one_lock);
 
-#if 0
-void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
-{
-	struct dlm_lock_resource *res;
-	struct hlist_node *iter;
-	struct hlist_head *bucket;
-	int i;
-
-	mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n",
-		  dlm->name, dlm->node_num, dlm->key);
-	if (!dlm || !dlm->name) {
-		mlog(ML_ERROR, "dlm=%p\n", dlm);
-		return;
-	}
-
-	spin_lock(&dlm->spinlock);
-	for (i=0; i<DLM_HASH_BUCKETS; i++) {
-		bucket = dlm_lockres_hash(dlm, i);
-		hlist_for_each_entry(res, iter, bucket, hash_node)
-			dlm_print_one_lock_resource(res);
-	}
-	spin_unlock(&dlm->spinlock);
-}
-#endif  /*  0  */
-
 static const char *dlm_errnames[] = {
 	[DLM_NORMAL] =			"DLM_NORMAL",
 	[DLM_GRANTED] =			"DLM_GRANTED",
@@ -266,3 +244,792 @@
 	return dlm_errnames[err];
 }
 EXPORT_SYMBOL_GPL(dlm_errname);
+
+/* NOTE: This function converts a lockname into a string. It uses knowledge
+ * of the format of the lockname that should be outside the purview of the dlm.
+ * We are adding only to make dlm debugging slightly easier.
+ *
+ * For more on lockname formats, please refer to dlmglue.c and ocfs2_lockid.h.
+ */
+int stringify_lockname(const char *lockname, int locklen, char *buf, int len)
+{
+	int out = 0;
+	__be64 inode_blkno_be;
+
+#define OCFS2_DENTRY_LOCK_INO_START	18
+	if (*lockname == 'N') {
+		memcpy((__be64 *)&inode_blkno_be,
+		       (char *)&lockname[OCFS2_DENTRY_LOCK_INO_START],
+		       sizeof(__be64));
+		out += snprintf(buf + out, len - out, "%.*s%08x",
+				OCFS2_DENTRY_LOCK_INO_START - 1, lockname,
+				(unsigned int)be64_to_cpu(inode_blkno_be));
+	} else
+		out += snprintf(buf + out, len - out, "%.*s",
+				locklen, lockname);
+	return out;
+}
+
+static int stringify_nodemap(unsigned long *nodemap, int maxnodes,
+			     char *buf, int len)
+{
+	int out = 0;
+	int i = -1;
+
+	while ((i = find_next_bit(nodemap, maxnodes, i + 1)) < maxnodes)
+		out += snprintf(buf + out, len - out, "%d ", i);
+
+	return out;
+}
+
+static int dump_mle(struct dlm_master_list_entry *mle, char *buf, int len)
+{
+	int out = 0;
+	unsigned int namelen;
+	const char *name;
+	char *mle_type;
+
+	if (mle->type != DLM_MLE_MASTER) {
+		namelen = mle->u.name.len;
+		name = mle->u.name.name;
+	} else {
+		namelen = mle->u.res->lockname.len;
+		name = mle->u.res->lockname.name;
+	}
+
+	if (mle->type == DLM_MLE_BLOCK)
+		mle_type = "BLK";
+	else if (mle->type == DLM_MLE_MASTER)
+		mle_type = "MAS";
+	else
+		mle_type = "MIG";
+
+	out += stringify_lockname(name, namelen, buf + out, len - out);
+	out += snprintf(buf + out, len - out,
+			"\t%3s\tmas=%3u\tnew=%3u\tevt=%1d\tuse=%1d\tref=%3d\n",
+			mle_type, mle->master, mle->new_master,
+			!list_empty(&mle->hb_events),
+			!!mle->inuse,
+			atomic_read(&mle->mle_refs.refcount));
+
+	out += snprintf(buf + out, len - out, "Maybe=");
+	out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES,
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
+
+	out += snprintf(buf + out, len - out, "Vote=");
+	out += stringify_nodemap(mle->vote_map, O2NM_MAX_NODES,
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
+
+	out += snprintf(buf + out, len - out, "Response=");
+	out += stringify_nodemap(mle->response_map, O2NM_MAX_NODES,
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
+
+	out += snprintf(buf + out, len - out, "Node=");
+	out += stringify_nodemap(mle->node_map, O2NM_MAX_NODES,
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
+
+	out += snprintf(buf + out, len - out, "\n");
+
+	return out;
+}
+
+void dlm_print_one_mle(struct dlm_master_list_entry *mle)
+{
+	char *buf;
+
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (buf) {
+		dump_mle(mle, buf, PAGE_SIZE - 1);
+		free_page((unsigned long)buf);
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *dlm_debugfs_root = NULL;
+
+#define DLM_DEBUGFS_DIR				"o2dlm"
+#define DLM_DEBUGFS_DLM_STATE			"dlm_state"
+#define DLM_DEBUGFS_LOCKING_STATE		"locking_state"
+#define DLM_DEBUGFS_MLE_STATE			"mle_state"
+#define DLM_DEBUGFS_PURGE_LIST			"purge_list"
+
+/* begin - utils funcs */
+static void dlm_debug_free(struct kref *kref)
+{
+	struct dlm_debug_ctxt *dc;
+
+	dc = container_of(kref, struct dlm_debug_ctxt, debug_refcnt);
+
+	kfree(dc);
+}
+
+void dlm_debug_put(struct dlm_debug_ctxt *dc)
+{
+	if (dc)
+		kref_put(&dc->debug_refcnt, dlm_debug_free);
+}
+
+static void dlm_debug_get(struct dlm_debug_ctxt *dc)
+{
+	kref_get(&dc->debug_refcnt);
+}
+
+static struct debug_buffer *debug_buffer_allocate(void)
+{
+	struct debug_buffer *db = NULL;
+
+	db = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+	if (!db)
+		goto bail;
+
+	db->len = PAGE_SIZE;
+	db->buf = kmalloc(db->len, GFP_KERNEL);
+	if (!db->buf)
+		goto bail;
+
+	return db;
+bail:
+	kfree(db);
+	return NULL;
+}
+
+static ssize_t debug_buffer_read(struct file *file, char __user *buf,
+				 size_t nbytes, loff_t *ppos)
+{
+	struct debug_buffer *db = file->private_data;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, db->buf, db->len);
+}
+
+static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence)
+{
+	struct debug_buffer *db = file->private_data;
+	loff_t new = -1;
+
+	switch (whence) {
+	case 0:
+		new = off;
+		break;
+	case 1:
+		new = file->f_pos + off;
+		break;
+	}
+
+	if (new < 0 || new > db->len)
+		return -EINVAL;
+
+	return (file->f_pos = new);
+}
+
+static int debug_buffer_release(struct inode *inode, struct file *file)
+{
+	struct debug_buffer *db = (struct debug_buffer *)file->private_data;
+
+	if (db)
+		kfree(db->buf);
+	kfree(db);
+
+	return 0;
+}
+/* end - util funcs */
+
+/* begin - purge list funcs */
+static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+{
+	struct dlm_lock_resource *res;
+	int out = 0;
+	unsigned long total = 0;
+
+	out += snprintf(db->buf + out, db->len - out,
+			"Dumping Purgelist for Domain: %s\n", dlm->name);
+
+	spin_lock(&dlm->spinlock);
+	list_for_each_entry(res, &dlm->purge_list, purge) {
+		++total;
+		if (db->len - out < 100)
+			continue;
+		spin_lock(&res->spinlock);
+		out += stringify_lockname(res->lockname.name,
+					  res->lockname.len,
+					  db->buf + out, db->len - out);
+		out += snprintf(db->buf + out, db->len - out, "\t%ld\n",
+				(jiffies - res->last_used)/HZ);
+		spin_unlock(&res->spinlock);
+	}
+	spin_unlock(&dlm->spinlock);
+
+	out += snprintf(db->buf + out, db->len - out,
+			"Total on list: %ld\n", total);
+
+	return out;
+}
+
+static int debug_purgelist_open(struct inode *inode, struct file *file)
+{
+	struct dlm_ctxt *dlm = inode->i_private;
+	struct debug_buffer *db;
+
+	db = debug_buffer_allocate();
+	if (!db)
+		goto bail;
+
+	db->len = debug_purgelist_print(dlm, db);
+
+	file->private_data = db;
+
+	return 0;
+bail:
+	return -ENOMEM;
+}
+
+static struct file_operations debug_purgelist_fops = {
+	.open =		debug_purgelist_open,
+	.release =	debug_buffer_release,
+	.read =		debug_buffer_read,
+	.llseek =	debug_buffer_llseek,
+};
+/* end - purge list funcs */
+
+/* begin - debug mle funcs */
+static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+{
+	struct dlm_master_list_entry *mle;
+	int out = 0;
+	unsigned long total = 0;
+
+	out += snprintf(db->buf + out, db->len - out,
+			"Dumping MLEs for Domain: %s\n", dlm->name);
+
+	spin_lock(&dlm->master_lock);
+	list_for_each_entry(mle, &dlm->master_list, list) {
+		++total;
+		if (db->len - out < 200)
+			continue;
+		out += dump_mle(mle, db->buf + out, db->len - out);
+	}
+	spin_unlock(&dlm->master_lock);
+
+	out += snprintf(db->buf + out, db->len - out,
+			"Total on list: %ld\n", total);
+	return out;
+}
+
+static int debug_mle_open(struct inode *inode, struct file *file)
+{
+	struct dlm_ctxt *dlm = inode->i_private;
+	struct debug_buffer *db;
+
+	db = debug_buffer_allocate();
+	if (!db)
+		goto bail;
+
+	db->len = debug_mle_print(dlm, db);
+
+	file->private_data = db;
+
+	return 0;
+bail:
+	return -ENOMEM;
+}
+
+static struct file_operations debug_mle_fops = {
+	.open =		debug_mle_open,
+	.release =	debug_buffer_release,
+	.read =		debug_buffer_read,
+	.llseek =	debug_buffer_llseek,
+};
+
+/* end - debug mle funcs */
+
+/* begin - debug lockres funcs */
+static int dump_lock(struct dlm_lock *lock, int list_type, char *buf, int len)
+{
+	int out;
+
+#define DEBUG_LOCK_VERSION	1
+	spin_lock(&lock->spinlock);
+	out = snprintf(buf, len, "LOCK:%d,%d,%d,%d,%d,%d:%lld,%d,%d,%d,%d,%d,"
+		       "%d,%d,%d,%d\n",
+		       DEBUG_LOCK_VERSION,
+		       list_type, lock->ml.type, lock->ml.convert_type,
+		       lock->ml.node,
+		       dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		       dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+		       !list_empty(&lock->ast_list),
+		       !list_empty(&lock->bast_list),
+		       lock->ast_pending, lock->bast_pending,
+		       lock->convert_pending, lock->lock_pending,
+		       lock->cancel_pending, lock->unlock_pending,
+		       atomic_read(&lock->lock_refs.refcount));
+	spin_unlock(&lock->spinlock);
+
+	return out;
+}
+
+static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len)
+{
+	struct dlm_lock *lock;
+	int i;
+	int out = 0;
+
+	out += snprintf(buf + out, len - out, "NAME:");
+	out += stringify_lockname(res->lockname.name, res->lockname.len,
+				  buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
+
+#define DEBUG_LRES_VERSION	1
+	out += snprintf(buf + out, len - out,
+			"LRES:%d,%d,%d,%ld,%d,%d,%d,%d,%d,%d,%d\n",
+			DEBUG_LRES_VERSION,
+			res->owner, res->state, res->last_used,
+			!list_empty(&res->purge),
+			!list_empty(&res->dirty),
+			!list_empty(&res->recovering),
+			res->inflight_locks, res->migration_pending,
+			atomic_read(&res->asts_reserved),
+			atomic_read(&res->refs.refcount));
+
+	/* refmap */
+	out += snprintf(buf + out, len - out, "RMAP:");
+	out += stringify_nodemap(res->refmap, O2NM_MAX_NODES,
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
+
+	/* lvb */
+	out += snprintf(buf + out, len - out, "LVBX:");
+	for (i = 0; i < DLM_LVB_LEN; i++)
+		out += snprintf(buf + out, len - out,
+					"%02x", (unsigned char)res->lvb[i]);
+	out += snprintf(buf + out, len - out, "\n");
+
+	/* granted */
+	list_for_each_entry(lock, &res->granted, list)
+		out += dump_lock(lock, 0, buf + out, len - out);
+
+	/* converting */
+	list_for_each_entry(lock, &res->converting, list)
+		out += dump_lock(lock, 1, buf + out, len - out);
+
+	/* blocked */
+	list_for_each_entry(lock, &res->blocked, list)
+		out += dump_lock(lock, 2, buf + out, len - out);
+
+	out += snprintf(buf + out, len - out, "\n");
+
+	return out;
+}
+
+static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
+{
+	struct debug_lockres *dl = m->private;
+	struct dlm_ctxt *dlm = dl->dl_ctxt;
+	struct dlm_lock_resource *res = NULL;
+
+	spin_lock(&dlm->spinlock);
+
+	if (dl->dl_res) {
+		list_for_each_entry(res, &dl->dl_res->tracking, tracking) {
+			if (dl->dl_res) {
+				dlm_lockres_put(dl->dl_res);
+				dl->dl_res = NULL;
+			}
+			if (&res->tracking == &dlm->tracking_list) {
+				mlog(0, "End of list found, %p\n", res);
+				dl = NULL;
+				break;
+			}
+			dlm_lockres_get(res);
+			dl->dl_res = res;
+			break;
+		}
+	} else {
+		if (!list_empty(&dlm->tracking_list)) {
+			list_for_each_entry(res, &dlm->tracking_list, tracking)
+				break;
+			dlm_lockres_get(res);
+			dl->dl_res = res;
+		} else
+			dl = NULL;
+	}
+
+	if (dl) {
+		spin_lock(&dl->dl_res->spinlock);
+		dump_lockres(dl->dl_res, dl->dl_buf, dl->dl_len - 1);
+		spin_unlock(&dl->dl_res->spinlock);
+	}
+
+	spin_unlock(&dlm->spinlock);
+
+	return dl;
+}
+
+static void lockres_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static void *lockres_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	return NULL;
+}
+
+static int lockres_seq_show(struct seq_file *s, void *v)
+{
+	struct debug_lockres *dl = (struct debug_lockres *)v;
+
+	seq_printf(s, "%s", dl->dl_buf);
+
+	return 0;
+}
+
+static struct seq_operations debug_lockres_ops = {
+	.start =	lockres_seq_start,
+	.stop =		lockres_seq_stop,
+	.next =		lockres_seq_next,
+	.show =		lockres_seq_show,
+};
+
+static int debug_lockres_open(struct inode *inode, struct file *file)
+{
+	struct dlm_ctxt *dlm = inode->i_private;
+	int ret = -ENOMEM;
+	struct seq_file *seq;
+	struct debug_lockres *dl = NULL;
+
+	dl = kzalloc(sizeof(struct debug_lockres), GFP_KERNEL);
+	if (!dl) {
+		mlog_errno(ret);
+		goto bail;
+	}
+
+	dl->dl_len = PAGE_SIZE;
+	dl->dl_buf = kmalloc(dl->dl_len, GFP_KERNEL);
+	if (!dl->dl_buf) {
+		mlog_errno(ret);
+		goto bail;
+	}
+
+	ret = seq_open(file, &debug_lockres_ops);
+	if (ret) {
+		mlog_errno(ret);
+		goto bail;
+	}
+
+	seq = (struct seq_file *) file->private_data;
+	seq->private = dl;
+
+	dlm_grab(dlm);
+	dl->dl_ctxt = dlm;
+
+	return 0;
+bail:
+	if (dl)
+		kfree(dl->dl_buf);
+	kfree(dl);
+	return ret;
+}
+
+static int debug_lockres_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = (struct seq_file *)file->private_data;
+	struct debug_lockres *dl = (struct debug_lockres *)seq->private;
+
+	if (dl->dl_res)
+		dlm_lockres_put(dl->dl_res);
+	dlm_put(dl->dl_ctxt);
+	kfree(dl->dl_buf);
+	return seq_release_private(inode, file);
+}
+
+static struct file_operations debug_lockres_fops = {
+	.open =		debug_lockres_open,
+	.release =	debug_lockres_release,
+	.read =		seq_read,
+	.llseek =	seq_lseek,
+};
+/* end - debug lockres funcs */
+
+/* begin - debug state funcs */
+static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+{
+	int out = 0;
+	struct dlm_reco_node_data *node;
+	char *state;
+	int lres, rres, ures, tres;
+
+	lres = atomic_read(&dlm->local_resources);
+	rres = atomic_read(&dlm->remote_resources);
+	ures = atomic_read(&dlm->unknown_resources);
+	tres = lres + rres + ures;
+
+	spin_lock(&dlm->spinlock);
+
+	switch (dlm->dlm_state) {
+	case DLM_CTXT_NEW:
+		state = "NEW"; break;
+	case DLM_CTXT_JOINED:
+		state = "JOINED"; break;
+	case DLM_CTXT_IN_SHUTDOWN:
+		state = "SHUTDOWN"; break;
+	case DLM_CTXT_LEAVING:
+		state = "LEAVING"; break;
+	default:
+		state = "UNKNOWN"; break;
+	}
+
+	/* Domain: xxxxxxxxxx  Key: 0xdfbac769 */
+	out += snprintf(db->buf + out, db->len - out,
+			"Domain: %s  Key: 0x%08x\n", dlm->name, dlm->key);
+
+	/* Thread Pid: xxx  Node: xxx  State: xxxxx */
+	out += snprintf(db->buf + out, db->len - out,
+			"Thread Pid: %d  Node: %d  State: %s\n",
+			dlm->dlm_thread_task->pid, dlm->node_num, state);
+
+	/* Number of Joins: xxx  Joining Node: xxx */
+	out += snprintf(db->buf + out, db->len - out,
+			"Number of Joins: %d  Joining Node: %d\n",
+			dlm->num_joins, dlm->joining_node);
+
+	/* Domain Map: xx xx xx */
+	out += snprintf(db->buf + out, db->len - out, "Domain Map: ");
+	out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES,
+				 db->buf + out, db->len - out);
+	out += snprintf(db->buf + out, db->len - out, "\n");
+
+	/* Live Map: xx xx xx */
+	out += snprintf(db->buf + out, db->len - out, "Live Map: ");
+	out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
+				 db->buf + out, db->len - out);
+	out += snprintf(db->buf + out, db->len - out, "\n");
+
+	/* Mastered Resources Total: xxx  Locally: xxx  Remotely: ... */
+	out += snprintf(db->buf + out, db->len - out,
+			"Mastered Resources Total: %d  Locally: %d  "
+			"Remotely: %d  Unknown: %d\n",
+			tres, lres, rres, ures);
+
+	/* Lists: Dirty=Empty  Purge=InUse  PendingASTs=Empty  ... */
+	out += snprintf(db->buf + out, db->len - out,
+			"Lists: Dirty=%s  Purge=%s  PendingASTs=%s  "
+			"PendingBASTs=%s  Master=%s\n",
+			(list_empty(&dlm->dirty_list) ? "Empty" : "InUse"),
+			(list_empty(&dlm->purge_list) ? "Empty" : "InUse"),
+			(list_empty(&dlm->pending_asts) ? "Empty" : "InUse"),
+			(list_empty(&dlm->pending_basts) ? "Empty" : "InUse"),
+			(list_empty(&dlm->master_list) ? "Empty" : "InUse"));
+
+	/* Purge Count: xxx  Refs: xxx */
+	out += snprintf(db->buf + out, db->len - out,
+			"Purge Count: %d  Refs: %d\n", dlm->purge_count,
+			atomic_read(&dlm->dlm_refs.refcount));
+
+	/* Dead Node: xxx */
+	out += snprintf(db->buf + out, db->len - out,
+			"Dead Node: %d\n", dlm->reco.dead_node);
+
+	/* What about DLM_RECO_STATE_FINALIZE? */
+	if (dlm->reco.state == DLM_RECO_STATE_ACTIVE)
+		state = "ACTIVE";
+	else
+		state = "INACTIVE";
+
+	/* Recovery Pid: xxxx  Master: xxx  State: xxxx */
+	out += snprintf(db->buf + out, db->len - out,
+			"Recovery Pid: %d  Master: %d  State: %s\n",
+			dlm->dlm_reco_thread_task->pid,
+			dlm->reco.new_master, state);
+
+	/* Recovery Map: xx xx */
+	out += snprintf(db->buf + out, db->len - out, "Recovery Map: ");
+	out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES,
+				 db->buf + out, db->len - out);
+	out += snprintf(db->buf + out, db->len - out, "\n");
+
+	/* Recovery Node State: */
+	out += snprintf(db->buf + out, db->len - out, "Recovery Node State:\n");
+	list_for_each_entry(node, &dlm->reco.node_data, list) {
+		switch (node->state) {
+		case DLM_RECO_NODE_DATA_INIT:
+			state = "INIT";
+			break;
+		case DLM_RECO_NODE_DATA_REQUESTING:
+			state = "REQUESTING";
+			break;
+		case DLM_RECO_NODE_DATA_DEAD:
+			state = "DEAD";
+			break;
+		case DLM_RECO_NODE_DATA_RECEIVING:
+			state = "RECEIVING";
+			break;
+		case DLM_RECO_NODE_DATA_REQUESTED:
+			state = "REQUESTED";
+			break;
+		case DLM_RECO_NODE_DATA_DONE:
+			state = "DONE";
+			break;
+		case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+			state = "FINALIZE-SENT";
+			break;
+		default:
+			state = "BAD";
+			break;
+		}
+		out += snprintf(db->buf + out, db->len - out, "\t%u - %s\n",
+				node->node_num, state);
+	}
+
+	spin_unlock(&dlm->spinlock);
+
+	return out;
+}
+
+static int debug_state_open(struct inode *inode, struct file *file)
+{
+	struct dlm_ctxt *dlm = inode->i_private;
+	struct debug_buffer *db = NULL;
+
+	db = debug_buffer_allocate();
+	if (!db)
+		goto bail;
+
+	db->len = debug_state_print(dlm, db);
+
+	file->private_data = db;
+
+	return 0;
+bail:
+	return -ENOMEM;
+}
+
+static struct file_operations debug_state_fops = {
+	.open =		debug_state_open,
+	.release =	debug_buffer_release,
+	.read =		debug_buffer_read,
+	.llseek =	debug_buffer_llseek,
+};
+/* end  - debug state funcs */
+
+/* files in subroot */
+int dlm_debug_init(struct dlm_ctxt *dlm)
+{
+	struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
+
+	/* for dumping dlm_ctxt */
+	dc->debug_state_dentry = debugfs_create_file(DLM_DEBUGFS_DLM_STATE,
+						     S_IFREG|S_IRUSR,
+						     dlm->dlm_debugfs_subroot,
+						     dlm, &debug_state_fops);
+	if (!dc->debug_state_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
+	/* for dumping lockres */
+	dc->debug_lockres_dentry =
+			debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE,
+					    S_IFREG|S_IRUSR,
+					    dlm->dlm_debugfs_subroot,
+					    dlm, &debug_lockres_fops);
+	if (!dc->debug_lockres_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
+	/* for dumping mles */
+	dc->debug_mle_dentry = debugfs_create_file(DLM_DEBUGFS_MLE_STATE,
+						   S_IFREG|S_IRUSR,
+						   dlm->dlm_debugfs_subroot,
+						   dlm, &debug_mle_fops);
+	if (!dc->debug_mle_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
+	/* for dumping lockres on the purge list */
+	dc->debug_purgelist_dentry =
+			debugfs_create_file(DLM_DEBUGFS_PURGE_LIST,
+					    S_IFREG|S_IRUSR,
+					    dlm->dlm_debugfs_subroot,
+					    dlm, &debug_purgelist_fops);
+	if (!dc->debug_purgelist_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
+	dlm_debug_get(dc);
+	return 0;
+
+bail:
+	dlm_debug_shutdown(dlm);
+	return -ENOMEM;
+}
+
+void dlm_debug_shutdown(struct dlm_ctxt *dlm)
+{
+	struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
+
+	if (dc) {
+		if (dc->debug_purgelist_dentry)
+			debugfs_remove(dc->debug_purgelist_dentry);
+		if (dc->debug_mle_dentry)
+			debugfs_remove(dc->debug_mle_dentry);
+		if (dc->debug_lockres_dentry)
+			debugfs_remove(dc->debug_lockres_dentry);
+		if (dc->debug_state_dentry)
+			debugfs_remove(dc->debug_state_dentry);
+		dlm_debug_put(dc);
+	}
+}
+
+/* subroot - domain dir */
+int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+{
+	dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name,
+						      dlm_debugfs_root);
+	if (!dlm->dlm_debugfs_subroot) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
+	dlm->dlm_debug_ctxt = kzalloc(sizeof(struct dlm_debug_ctxt),
+				      GFP_KERNEL);
+	if (!dlm->dlm_debug_ctxt) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+	kref_init(&dlm->dlm_debug_ctxt->debug_refcnt);
+
+	return 0;
+bail:
+	dlm_destroy_debugfs_subroot(dlm);
+	return -ENOMEM;
+}
+
+void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
+{
+	if (dlm->dlm_debugfs_subroot)
+		debugfs_remove(dlm->dlm_debugfs_subroot);
+}
+
+/* debugfs root */
+int dlm_create_debugfs_root(void)
+{
+	dlm_debugfs_root = debugfs_create_dir(DLM_DEBUGFS_DIR, NULL);
+	if (!dlm_debugfs_root) {
+		mlog_errno(-ENOMEM);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void dlm_destroy_debugfs_root(void)
+{
+	if (dlm_debugfs_root)
+		debugfs_remove(dlm_debugfs_root);
+}
+#endif	/* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
new file mode 100644
index 0000000..d34a62a
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -0,0 +1,86 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmdebug.h
+ *
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef DLMDEBUG_H
+#define DLMDEBUG_H
+
+void dlm_print_one_mle(struct dlm_master_list_entry *mle);
+
+#ifdef CONFIG_DEBUG_FS
+
+struct dlm_debug_ctxt {
+	struct kref debug_refcnt;
+	struct dentry *debug_state_dentry;
+	struct dentry *debug_lockres_dentry;
+	struct dentry *debug_mle_dentry;
+	struct dentry *debug_purgelist_dentry;
+};
+
+struct debug_buffer {
+	int len;
+	char *buf;
+};
+
+struct debug_lockres {
+	int dl_len;
+	char *dl_buf;
+	struct dlm_ctxt *dl_ctxt;
+	struct dlm_lock_resource *dl_res;
+};
+
+int dlm_debug_init(struct dlm_ctxt *dlm);
+void dlm_debug_shutdown(struct dlm_ctxt *dlm);
+
+int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm);
+void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm);
+
+int dlm_create_debugfs_root(void);
+void dlm_destroy_debugfs_root(void);
+
+#else
+
+static int dlm_debug_init(struct dlm_ctxt *dlm)
+{
+	return 0;
+}
+static void dlm_debug_shutdown(struct dlm_ctxt *dlm)
+{
+}
+static int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+{
+	return 0;
+}
+static void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
+{
+}
+static int dlm_create_debugfs_root(void)
+{
+	return 0;
+}
+static void dlm_destroy_debugfs_root(void)
+{
+}
+
+#endif	/* CONFIG_DEBUG_FS */
+#endif	/* DLMDEBUG_H */
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 0879d86..63f8125 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -33,6 +33,7 @@
 #include <linux/spinlock.h>
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/debugfs.h>
 
 #include "cluster/heartbeat.h"
 #include "cluster/nodemanager.h"
@@ -40,8 +41,8 @@
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
-
 #include "dlmdomain.h"
+#include "dlmdebug.h"
 
 #include "dlmver.h"
 
@@ -298,6 +299,8 @@
 
 static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
 {
+	dlm_destroy_debugfs_subroot(dlm);
+
 	if (dlm->lockres_hash)
 		dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
 
@@ -395,6 +398,7 @@
 static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
 {
 	dlm_unregister_domain_handlers(dlm);
+	dlm_debug_shutdown(dlm);
 	dlm_complete_thread(dlm);
 	dlm_complete_recovery_thread(dlm);
 	dlm_destroy_dlm_worker(dlm);
@@ -644,6 +648,7 @@
 void dlm_unregister_domain(struct dlm_ctxt *dlm)
 {
 	int leave = 0;
+	struct dlm_lock_resource *res;
 
 	spin_lock(&dlm_domain_lock);
 	BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
@@ -673,6 +678,15 @@
 			msleep(500);
 			mlog(0, "%s: more migration to do\n", dlm->name);
 		}
+
+		/* This list should be empty. If not, print remaining lockres */
+		if (!list_empty(&dlm->tracking_list)) {
+			mlog(ML_ERROR, "Following lockres' are still on the "
+			     "tracking list:\n");
+			list_for_each_entry(res, &dlm->tracking_list, tracking)
+				dlm_print_one_lock_resource(res);
+		}
+
 		dlm_mark_domain_leaving(dlm);
 		dlm_leave_domain(dlm);
 		dlm_complete_dlm_shutdown(dlm);
@@ -1405,6 +1419,12 @@
 		goto bail;
 	}
 
+	status = dlm_debug_init(dlm);
+	if (status < 0) {
+		mlog_errno(status);
+		goto bail;
+	}
+
 	status = dlm_launch_thread(dlm);
 	if (status < 0) {
 		mlog_errno(status);
@@ -1472,6 +1492,7 @@
 
 	if (status) {
 		dlm_unregister_domain_handlers(dlm);
+		dlm_debug_shutdown(dlm);
 		dlm_complete_thread(dlm);
 		dlm_complete_recovery_thread(dlm);
 		dlm_destroy_dlm_worker(dlm);
@@ -1484,6 +1505,7 @@
 				u32 key)
 {
 	int i;
+	int ret;
 	struct dlm_ctxt *dlm = NULL;
 
 	dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
@@ -1516,6 +1538,15 @@
 	dlm->key = key;
 	dlm->node_num = o2nm_this_node();
 
+	ret = dlm_create_debugfs_subroot(dlm);
+	if (ret < 0) {
+		dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
+		kfree(dlm->name);
+		kfree(dlm);
+		dlm = NULL;
+		goto leave;
+	}
+
 	spin_lock_init(&dlm->spinlock);
 	spin_lock_init(&dlm->master_lock);
 	spin_lock_init(&dlm->ast_lock);
@@ -1526,6 +1557,7 @@
 	INIT_LIST_HEAD(&dlm->reco.node_data);
 	INIT_LIST_HEAD(&dlm->purge_list);
 	INIT_LIST_HEAD(&dlm->dlm_domain_handlers);
+	INIT_LIST_HEAD(&dlm->tracking_list);
 	dlm->reco.state = 0;
 
 	INIT_LIST_HEAD(&dlm->pending_asts);
@@ -1816,21 +1848,49 @@
 	dlm_print_version();
 
 	status = dlm_init_mle_cache();
-	if (status)
-		return -1;
+	if (status) {
+		mlog(ML_ERROR, "Could not create o2dlm_mle slabcache\n");
+		goto error;
+	}
+
+	status = dlm_init_master_caches();
+	if (status) {
+		mlog(ML_ERROR, "Could not create o2dlm_lockres and "
+		     "o2dlm_lockname slabcaches\n");
+		goto error;
+	}
+
+	status = dlm_init_lock_cache();
+	if (status) {
+		mlog(ML_ERROR, "Count not create o2dlm_lock slabcache\n");
+		goto error;
+	}
 
 	status = dlm_register_net_handlers();
 	if (status) {
-		dlm_destroy_mle_cache();
-		return -1;
+		mlog(ML_ERROR, "Unable to register network handlers\n");
+		goto error;
 	}
 
+	status = dlm_create_debugfs_root();
+	if (status)
+		goto error;
+
 	return 0;
+error:
+	dlm_unregister_net_handlers();
+	dlm_destroy_lock_cache();
+	dlm_destroy_master_caches();
+	dlm_destroy_mle_cache();
+	return -1;
 }
 
 static void __exit dlm_exit (void)
 {
+	dlm_destroy_debugfs_root();
 	dlm_unregister_net_handlers();
+	dlm_destroy_lock_cache();
+	dlm_destroy_master_caches();
 	dlm_destroy_mle_cache();
 }
 
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 52578d9..83a9f29 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -53,6 +53,8 @@
 #define MLOG_MASK_PREFIX ML_DLM
 #include "cluster/masklog.h"
 
+static struct kmem_cache *dlm_lock_cache = NULL;
+
 static DEFINE_SPINLOCK(dlm_cookie_lock);
 static u64 dlm_next_cookie = 1;
 
@@ -64,6 +66,22 @@
 static void dlm_lock_release(struct kref *kref);
 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
 
+int dlm_init_lock_cache(void)
+{
+	dlm_lock_cache = kmem_cache_create("o2dlm_lock",
+					   sizeof(struct dlm_lock),
+					   0, SLAB_HWCACHE_ALIGN, NULL);
+	if (dlm_lock_cache == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+void dlm_destroy_lock_cache(void)
+{
+	if (dlm_lock_cache)
+		kmem_cache_destroy(dlm_lock_cache);
+}
+
 /* Tell us whether we can grant a new lock request.
  * locking:
  *   caller needs:  res->spinlock
@@ -353,7 +371,7 @@
 		mlog(0, "freeing kernel-allocated lksb\n");
 		kfree(lock->lksb);
 	}
-	kfree(lock);
+	kmem_cache_free(dlm_lock_cache, lock);
 }
 
 /* associate a lock with it's lockres, getting a ref on the lockres */
@@ -412,7 +430,7 @@
 	struct dlm_lock *lock;
 	int kernel_allocated = 0;
 
-	lock = kzalloc(sizeof(*lock), GFP_NOFS);
+	lock = (struct dlm_lock *) kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS);
 	if (!lock)
 		return NULL;
 
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ea6b895..efc015c 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -48,47 +48,11 @@
 #include "dlmapi.h"
 #include "dlmcommon.h"
 #include "dlmdomain.h"
+#include "dlmdebug.h"
 
 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
 #include "cluster/masklog.h"
 
-enum dlm_mle_type {
-	DLM_MLE_BLOCK,
-	DLM_MLE_MASTER,
-	DLM_MLE_MIGRATION
-};
-
-struct dlm_lock_name
-{
-	u8 len;
-	u8 name[DLM_LOCKID_NAME_MAX];
-};
-
-struct dlm_master_list_entry
-{
-	struct list_head list;
-	struct list_head hb_events;
-	struct dlm_ctxt *dlm;
-	spinlock_t spinlock;
-	wait_queue_head_t wq;
-	atomic_t woken;
-	struct kref mle_refs;
-	int inuse;
-	unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
-	unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
-	unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
-	unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
-	u8 master;
-	u8 new_master;
-	enum dlm_mle_type type;
-	struct o2hb_callback_func mle_hb_up;
-	struct o2hb_callback_func mle_hb_down;
-	union {
-		struct dlm_lock_resource *res;
-		struct dlm_lock_name name;
-	} u;
-};
-
 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
 			      struct dlm_master_list_entry *mle,
 			      struct o2nm_node *node,
@@ -128,98 +92,10 @@
 	return 1;
 }
 
-#define dlm_print_nodemap(m)  _dlm_print_nodemap(m,#m)
-static void _dlm_print_nodemap(unsigned long *map, const char *mapname)
-{
-	int i;
-	printk("%s=[ ", mapname);
-	for (i=0; i<O2NM_MAX_NODES; i++)
-		if (test_bit(i, map))
-			printk("%d ", i);
-	printk("]");
-}
-
-static void dlm_print_one_mle(struct dlm_master_list_entry *mle)
-{
-	int refs;
-	char *type;
-	char attached;
-	u8 master;
-	unsigned int namelen;
-	const char *name;
-	struct kref *k;
-	unsigned long *maybe = mle->maybe_map,
-		      *vote = mle->vote_map,
-		      *resp = mle->response_map,
-		      *node = mle->node_map;
-
-	k = &mle->mle_refs;
-	if (mle->type == DLM_MLE_BLOCK)
-		type = "BLK";
-	else if (mle->type == DLM_MLE_MASTER)
-		type = "MAS";
-	else
-		type = "MIG";
-	refs = atomic_read(&k->refcount);
-	master = mle->master;
-	attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
-
-	if (mle->type != DLM_MLE_MASTER) {
-		namelen = mle->u.name.len;
-		name = mle->u.name.name;
-	} else {
-		namelen = mle->u.res->lockname.len;
-		name = mle->u.res->lockname.name;
-	}
-
-	mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
-		  namelen, name, type, refs, master, mle->new_master, attached,
-		  mle->inuse);
-	dlm_print_nodemap(maybe);
-	printk(", ");
-	dlm_print_nodemap(vote);
-	printk(", ");
-	dlm_print_nodemap(resp);
-	printk(", ");
-	dlm_print_nodemap(node);
-	printk(", ");
-	printk("\n");
-}
-
-#if 0
-/* Code here is included but defined out as it aids debugging */
-
-static void dlm_dump_mles(struct dlm_ctxt *dlm)
-{
-	struct dlm_master_list_entry *mle;
-	
-	mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
-	spin_lock(&dlm->master_lock);
-	list_for_each_entry(mle, &dlm->master_list, list)
-		dlm_print_one_mle(mle);
-	spin_unlock(&dlm->master_lock);
-}
-
-int dlm_dump_all_mles(const char __user *data, unsigned int len)
-{
-	struct dlm_ctxt *dlm;
-
-	spin_lock(&dlm_domain_lock);
-	list_for_each_entry(dlm, &dlm_domains, list) {
-		mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
-		dlm_dump_mles(dlm);
-	}
-	spin_unlock(&dlm_domain_lock);
-	return len;
-}
-EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
-
-#endif  /*  0  */
-
-
+static struct kmem_cache *dlm_lockres_cache = NULL;
+static struct kmem_cache *dlm_lockname_cache = NULL;
 static struct kmem_cache *dlm_mle_cache = NULL;
 
-
 static void dlm_mle_release(struct kref *kref);
 static void dlm_init_mle(struct dlm_master_list_entry *mle,
 			enum dlm_mle_type type,
@@ -507,7 +383,7 @@
 
 int dlm_init_mle_cache(void)
 {
-	dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
+	dlm_mle_cache = kmem_cache_create("o2dlm_mle",
 					  sizeof(struct dlm_master_list_entry),
 					  0, SLAB_HWCACHE_ALIGN,
 					  NULL);
@@ -560,6 +436,35 @@
  * LOCK RESOURCE FUNCTIONS
  */
 
+int dlm_init_master_caches(void)
+{
+	dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
+					      sizeof(struct dlm_lock_resource),
+					      0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!dlm_lockres_cache)
+		goto bail;
+
+	dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
+					       DLM_LOCKID_NAME_MAX, 0,
+					       SLAB_HWCACHE_ALIGN, NULL);
+	if (!dlm_lockname_cache)
+		goto bail;
+
+	return 0;
+bail:
+	dlm_destroy_master_caches();
+	return -ENOMEM;
+}
+
+void dlm_destroy_master_caches(void)
+{
+	if (dlm_lockname_cache)
+		kmem_cache_destroy(dlm_lockname_cache);
+
+	if (dlm_lockres_cache)
+		kmem_cache_destroy(dlm_lockres_cache);
+}
+
 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
 				  struct dlm_lock_resource *res,
 				  u8 owner)
@@ -610,6 +515,14 @@
 	mlog(0, "destroying lockres %.*s\n", res->lockname.len,
 	     res->lockname.name);
 
+	if (!list_empty(&res->tracking))
+		list_del_init(&res->tracking);
+	else {
+		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
+		     res->lockname.len, res->lockname.name);
+		dlm_print_one_lock_resource(res);
+	}
+
 	if (!hlist_unhashed(&res->hash_node) ||
 	    !list_empty(&res->granted) ||
 	    !list_empty(&res->converting) ||
@@ -642,9 +555,9 @@
 	BUG_ON(!list_empty(&res->recovering));
 	BUG_ON(!list_empty(&res->purge));
 
-	kfree(res->lockname.name);
+	kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
 
-	kfree(res);
+	kmem_cache_free(dlm_lockres_cache, res);
 }
 
 void dlm_lockres_put(struct dlm_lock_resource *res)
@@ -677,6 +590,7 @@
 	INIT_LIST_HEAD(&res->dirty);
 	INIT_LIST_HEAD(&res->recovering);
 	INIT_LIST_HEAD(&res->purge);
+	INIT_LIST_HEAD(&res->tracking);
 	atomic_set(&res->asts_reserved, 0);
 	res->migration_pending = 0;
 	res->inflight_locks = 0;
@@ -692,6 +606,8 @@
 
 	res->last_used = 0;
 
+	list_add_tail(&res->tracking, &dlm->tracking_list);
+
 	memset(res->lvb, 0, DLM_LVB_LEN);
 	memset(res->refmap, 0, sizeof(res->refmap));
 }
@@ -700,20 +616,28 @@
 				   const char *name,
 				   unsigned int namelen)
 {
-	struct dlm_lock_resource *res;
+	struct dlm_lock_resource *res = NULL;
 
-	res = kmalloc(sizeof(struct dlm_lock_resource), GFP_NOFS);
+	res = (struct dlm_lock_resource *)
+				kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
 	if (!res)
-		return NULL;
+		goto error;
 
-	res->lockname.name = kmalloc(namelen, GFP_NOFS);
-	if (!res->lockname.name) {
-		kfree(res);
-		return NULL;
-	}
+	res->lockname.name = (char *)
+				kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
+	if (!res->lockname.name)
+		goto error;
 
 	dlm_init_lockres(dlm, res, name, namelen);
 	return res;
+
+error:
+	if (res && res->lockname.name)
+		kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
+
+	if (res)
+		kmem_cache_free(dlm_lockres_cache, res);
+	return NULL;
 }
 
 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 1f1873b..394d25a 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -27,18 +27,11 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
-#include <linux/crc32.h>
 #include <linux/kthread.h>
 #include <linux/pagemap.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 
-#include <cluster/heartbeat.h>
-#include <cluster/nodemanager.h>
-#include <cluster/tcp.h>
-
-#include <dlm/dlmapi.h>
-
 #define MLOG_MASK_PREFIX ML_DLM_GLUE
 #include <cluster/masklog.h>
 
@@ -53,6 +46,7 @@
 #include "heartbeat.h"
 #include "inode.h"
 #include "journal.h"
+#include "stackglue.h"
 #include "slot_map.h"
 #include "super.h"
 #include "uptodate.h"
@@ -113,7 +107,8 @@
 				     unsigned int line,
 				     struct ocfs2_lock_res *lockres)
 {
-	struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+	struct ocfs2_meta_lvb *lvb =
+		(struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb);
 
 	mlog(level, "LVB information for %s (called from %s:%u):\n",
 	     lockres->l_name, function, line);
@@ -259,31 +254,6 @@
 	.flags		= 0,
 };
 
-/*
- * This is the filesystem locking protocol version.
- *
- * Whenever the filesystem does new things with locks (adds or removes a
- * lock, orders them differently, does different things underneath a lock),
- * the version must be changed.  The protocol is negotiated when joining
- * the dlm domain.  A node may join the domain if its major version is
- * identical to all other nodes and its minor version is greater than
- * or equal to all other nodes.  When its minor version is greater than
- * the other nodes, it will run at the minor version specified by the
- * other nodes.
- *
- * If a locking change is made that will not be compatible with older
- * versions, the major number must be increased and the minor version set
- * to zero.  If a change merely adds a behavior that can be disabled when
- * speaking to older versions, the minor version must be increased.  If a
- * change adds a fully backwards compatible change (eg, LVB changes that
- * are just ignored by older versions), the version does not need to be
- * updated.
- */
-const struct dlm_protocol_version ocfs2_locking_protocol = {
-	.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
-	.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
-};
-
 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
 {
 	return lockres->l_type == OCFS2_LOCK_TYPE_META ||
@@ -316,7 +286,7 @@
 static int ocfs2_lock_create(struct ocfs2_super *osb,
 			     struct ocfs2_lock_res *lockres,
 			     int level,
-			     int dlm_flags);
+			     u32 dlm_flags);
 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
 						     int wanted);
 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
@@ -330,10 +300,9 @@
 					struct ocfs2_lock_res *lockres);
 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
 						int convert);
-#define ocfs2_log_dlm_error(_func, _stat, _lockres) do {	\
-	mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on "	\
-		"resource %s: %s\n", dlm_errname(_stat), _func,	\
-		_lockres->l_name, dlm_errmsg(_stat));		\
+#define ocfs2_log_dlm_error(_func, _err, _lockres) do {			\
+	mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
+	     _err, _func, _lockres->l_name);				\
 } while (0)
 static int ocfs2_downconvert_thread(void *arg);
 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
@@ -342,12 +311,13 @@
 				  struct buffer_head **bh);
 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
 static inline int ocfs2_highest_compat_lock_level(int level);
-static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
-				      int new_level);
+static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
+					      int new_level);
 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
 				  struct ocfs2_lock_res *lockres,
 				  int new_level,
-				  int lvb);
+				  int lvb,
+				  unsigned int generation);
 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
 				        struct ocfs2_lock_res *lockres);
 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
@@ -406,9 +376,9 @@
 	res->l_ops           = ops;
 	res->l_priv          = priv;
 
-	res->l_level         = LKM_IVMODE;
-	res->l_requested     = LKM_IVMODE;
-	res->l_blocking      = LKM_IVMODE;
+	res->l_level         = DLM_LOCK_IV;
+	res->l_requested     = DLM_LOCK_IV;
+	res->l_blocking      = DLM_LOCK_IV;
 	res->l_action        = OCFS2_AST_INVALID;
 	res->l_unlock_action = OCFS2_UNLOCK_INVALID;
 
@@ -604,10 +574,10 @@
 	BUG_ON(!lockres);
 
 	switch(level) {
-	case LKM_EXMODE:
+	case DLM_LOCK_EX:
 		lockres->l_ex_holders++;
 		break;
-	case LKM_PRMODE:
+	case DLM_LOCK_PR:
 		lockres->l_ro_holders++;
 		break;
 	default:
@@ -625,11 +595,11 @@
 	BUG_ON(!lockres);
 
 	switch(level) {
-	case LKM_EXMODE:
+	case DLM_LOCK_EX:
 		BUG_ON(!lockres->l_ex_holders);
 		lockres->l_ex_holders--;
 		break;
-	case LKM_PRMODE:
+	case DLM_LOCK_PR:
 		BUG_ON(!lockres->l_ro_holders);
 		lockres->l_ro_holders--;
 		break;
@@ -644,12 +614,12 @@
  * lock types are added. */
 static inline int ocfs2_highest_compat_lock_level(int level)
 {
-	int new_level = LKM_EXMODE;
+	int new_level = DLM_LOCK_EX;
 
-	if (level == LKM_EXMODE)
-		new_level = LKM_NLMODE;
-	else if (level == LKM_PRMODE)
-		new_level = LKM_PRMODE;
+	if (level == DLM_LOCK_EX)
+		new_level = DLM_LOCK_NL;
+	else if (level == DLM_LOCK_PR)
+		new_level = DLM_LOCK_PR;
 	return new_level;
 }
 
@@ -688,12 +658,12 @@
 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
-	BUG_ON(lockres->l_blocking <= LKM_NLMODE);
+	BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
 
 	lockres->l_level = lockres->l_requested;
 	if (lockres->l_level <=
 	    ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
-		lockres->l_blocking = LKM_NLMODE;
+		lockres->l_blocking = DLM_LOCK_NL;
 		lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
 	}
 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
@@ -712,7 +682,7 @@
 	 * information is already up to data. Convert from NL to
 	 * *anything* however should mark ourselves as needing an
 	 * update */
-	if (lockres->l_level == LKM_NLMODE &&
+	if (lockres->l_level == DLM_LOCK_NL &&
 	    lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
 		lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
 
@@ -729,7 +699,7 @@
 	BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
 	BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
 
-	if (lockres->l_requested > LKM_NLMODE &&
+	if (lockres->l_requested > DLM_LOCK_NL &&
 	    !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
 	    lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
 		lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
@@ -767,6 +737,113 @@
 	return needs_downconvert;
 }
 
+/*
+ * OCFS2_LOCK_PENDING and l_pending_gen.
+ *
+ * Why does OCFS2_LOCK_PENDING exist?  To close a race between setting
+ * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock().  See ocfs2_unblock_lock()
+ * for more details on the race.
+ *
+ * OCFS2_LOCK_PENDING closes the race quite nicely.  However, it introduces
+ * a race on itself.  In o2dlm, we can get the ast before ocfs2_dlm_lock()
+ * returns.  The ast clears OCFS2_LOCK_BUSY, and must therefore clear
+ * OCFS2_LOCK_PENDING at the same time.  When ocfs2_dlm_lock() returns,
+ * the caller is going to try to clear PENDING again.  If nothing else is
+ * happening, __lockres_clear_pending() sees PENDING is unset and does
+ * nothing.
+ *
+ * But what if another path (eg downconvert thread) has just started a
+ * new locking action?  The other path has re-set PENDING.  Our path
+ * cannot clear PENDING, because that will re-open the original race
+ * window.
+ *
+ * [Example]
+ *
+ * ocfs2_meta_lock()
+ *  ocfs2_cluster_lock()
+ *   set BUSY
+ *   set PENDING
+ *   drop l_lock
+ *   ocfs2_dlm_lock()
+ *    ocfs2_locking_ast()		ocfs2_downconvert_thread()
+ *     clear PENDING			 ocfs2_unblock_lock()
+ *					  take_l_lock
+ *					  !BUSY
+ *					  ocfs2_prepare_downconvert()
+ *					   set BUSY
+ *					   set PENDING
+ *					  drop l_lock
+ *   take l_lock
+ *   clear PENDING
+ *   drop l_lock
+ *			<window>
+ *					  ocfs2_dlm_lock()
+ *
+ * So as you can see, we now have a window where l_lock is not held,
+ * PENDING is not set, and ocfs2_dlm_lock() has not been called.
+ *
+ * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
+ * set by ocfs2_prepare_downconvert().  That wasn't nice.
+ *
+ * To solve this we introduce l_pending_gen.  A call to
+ * lockres_clear_pending() will only do so when it is passed a generation
+ * number that matches the lockres.  lockres_set_pending() will return the
+ * current generation number.  When ocfs2_cluster_lock() goes to clear
+ * PENDING, it passes the generation it got from set_pending().  In our
+ * example above, the generation numbers will *not* match.  Thus,
+ * ocfs2_cluster_lock() will not clear the PENDING set by
+ * ocfs2_prepare_downconvert().
+ */
+
+/* Unlocked version for ocfs2_locking_ast() */
+static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
+				    unsigned int generation,
+				    struct ocfs2_super *osb)
+{
+	assert_spin_locked(&lockres->l_lock);
+
+	/*
+	 * The ast and locking functions can race us here.  The winner
+	 * will clear pending, the loser will not.
+	 */
+	if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
+	    (lockres->l_pending_gen != generation))
+		return;
+
+	lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
+	lockres->l_pending_gen++;
+
+	/*
+	 * The downconvert thread may have skipped us because we
+	 * were PENDING.  Wake it up.
+	 */
+	if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+		ocfs2_wake_downconvert_thread(osb);
+}
+
+/* Locked version for callers of ocfs2_dlm_lock() */
+static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
+				  unsigned int generation,
+				  struct ocfs2_super *osb)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&lockres->l_lock, flags);
+	__lockres_clear_pending(lockres, generation, osb);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
+}
+
+static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
+{
+	assert_spin_locked(&lockres->l_lock);
+	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
+
+	lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
+
+	return lockres->l_pending_gen;
+}
+
+
 static void ocfs2_blocking_ast(void *opaque, int level)
 {
 	struct ocfs2_lock_res *lockres = opaque;
@@ -774,7 +851,7 @@
 	int needs_downconvert;
 	unsigned long flags;
 
-	BUG_ON(level <= LKM_NLMODE);
+	BUG_ON(level <= DLM_LOCK_NL);
 
 	mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
 	     lockres->l_name, level, lockres->l_level,
@@ -801,14 +878,22 @@
 static void ocfs2_locking_ast(void *opaque)
 {
 	struct ocfs2_lock_res *lockres = opaque;
-	struct dlm_lockstatus *lksb = &lockres->l_lksb;
+	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
 	unsigned long flags;
+	int status;
 
 	spin_lock_irqsave(&lockres->l_lock, flags);
 
-	if (lksb->status != DLM_NORMAL) {
-		mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
-		     lockres->l_name, lksb->status);
+	status = ocfs2_dlm_lock_status(&lockres->l_lksb);
+
+	if (status == -EAGAIN) {
+		lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+		goto out;
+	}
+
+	if (status) {
+		mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
+		     lockres->l_name, status);
 		spin_unlock_irqrestore(&lockres->l_lock, flags);
 		return;
 	}
@@ -831,11 +916,23 @@
 		     lockres->l_unlock_action);
 		BUG();
 	}
-
+out:
 	/* set it to something invalid so if we get called again we
 	 * can catch it. */
 	lockres->l_action = OCFS2_AST_INVALID;
 
+	/* Did we try to cancel this lock?  Clear that state */
+	if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
+		lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
+
+	/*
+	 * We may have beaten the locking functions here.  We certainly
+	 * know that dlm_lock() has been called :-)
+	 * Because we can't have two lock calls in flight at once, we
+	 * can use lockres->l_pending_gen.
+	 */
+	__lockres_clear_pending(lockres, lockres->l_pending_gen,  osb);
+
 	wake_up(&lockres->l_event);
 	spin_unlock_irqrestore(&lockres->l_lock, flags);
 }
@@ -865,15 +962,15 @@
 static int ocfs2_lock_create(struct ocfs2_super *osb,
 			     struct ocfs2_lock_res *lockres,
 			     int level,
-			     int dlm_flags)
+			     u32 dlm_flags)
 {
 	int ret = 0;
-	enum dlm_status status = DLM_NORMAL;
 	unsigned long flags;
+	unsigned int gen;
 
 	mlog_entry_void();
 
-	mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
+	mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
 	     dlm_flags);
 
 	spin_lock_irqsave(&lockres->l_lock, flags);
@@ -886,24 +983,23 @@
 	lockres->l_action = OCFS2_AST_ATTACH;
 	lockres->l_requested = level;
 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+	gen = lockres_set_pending(lockres);
 	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
-	status = dlmlock(osb->dlm,
-			 level,
-			 &lockres->l_lksb,
-			 dlm_flags,
-			 lockres->l_name,
-			 OCFS2_LOCK_ID_MAX_LEN - 1,
-			 ocfs2_locking_ast,
-			 lockres,
-			 ocfs2_blocking_ast);
-	if (status != DLM_NORMAL) {
-		ocfs2_log_dlm_error("dlmlock", status, lockres);
-		ret = -EINVAL;
+	ret = ocfs2_dlm_lock(osb->cconn,
+			     level,
+			     &lockres->l_lksb,
+			     dlm_flags,
+			     lockres->l_name,
+			     OCFS2_LOCK_ID_MAX_LEN - 1,
+			     lockres);
+	lockres_clear_pending(lockres, gen, osb);
+	if (ret) {
+		ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
 		ocfs2_recover_from_dlm_error(lockres, 1);
 	}
 
-	mlog(0, "lock %s, successfull return from dlmlock\n", lockres->l_name);
+	mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
 
 bail:
 	mlog_exit(ret);
@@ -1016,21 +1112,22 @@
 static int ocfs2_cluster_lock(struct ocfs2_super *osb,
 			      struct ocfs2_lock_res *lockres,
 			      int level,
-			      int lkm_flags,
+			      u32 lkm_flags,
 			      int arg_flags)
 {
 	struct ocfs2_mask_waiter mw;
-	enum dlm_status status;
 	int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
 	int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
 	unsigned long flags;
+	unsigned int gen;
+	int noqueue_attempted = 0;
 
 	mlog_entry_void();
 
 	ocfs2_init_mask_waiter(&mw);
 
 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
-		lkm_flags |= LKM_VALBLK;
+		lkm_flags |= DLM_LKF_VALBLK;
 
 again:
 	wait = 0;
@@ -1068,52 +1165,56 @@
 	}
 
 	if (level > lockres->l_level) {
+		if (noqueue_attempted > 0) {
+			ret = -EAGAIN;
+			goto unlock;
+		}
+		if (lkm_flags & DLM_LKF_NOQUEUE)
+			noqueue_attempted = 1;
+
 		if (lockres->l_action != OCFS2_AST_INVALID)
 			mlog(ML_ERROR, "lockres %s has action %u pending\n",
 			     lockres->l_name, lockres->l_action);
 
 		if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
 			lockres->l_action = OCFS2_AST_ATTACH;
-			lkm_flags &= ~LKM_CONVERT;
+			lkm_flags &= ~DLM_LKF_CONVERT;
 		} else {
 			lockres->l_action = OCFS2_AST_CONVERT;
-			lkm_flags |= LKM_CONVERT;
+			lkm_flags |= DLM_LKF_CONVERT;
 		}
 
 		lockres->l_requested = level;
 		lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+		gen = lockres_set_pending(lockres);
 		spin_unlock_irqrestore(&lockres->l_lock, flags);
 
-		BUG_ON(level == LKM_IVMODE);
-		BUG_ON(level == LKM_NLMODE);
+		BUG_ON(level == DLM_LOCK_IV);
+		BUG_ON(level == DLM_LOCK_NL);
 
 		mlog(0, "lock %s, convert from %d to level = %d\n",
 		     lockres->l_name, lockres->l_level, level);
 
 		/* call dlm_lock to upgrade lock now */
-		status = dlmlock(osb->dlm,
-				 level,
-				 &lockres->l_lksb,
-				 lkm_flags,
-				 lockres->l_name,
-				 OCFS2_LOCK_ID_MAX_LEN - 1,
-				 ocfs2_locking_ast,
-				 lockres,
-				 ocfs2_blocking_ast);
-		if (status != DLM_NORMAL) {
-			if ((lkm_flags & LKM_NOQUEUE) &&
-			    (status == DLM_NOTQUEUED))
-				ret = -EAGAIN;
-			else {
-				ocfs2_log_dlm_error("dlmlock", status,
-						    lockres);
-				ret = -EINVAL;
+		ret = ocfs2_dlm_lock(osb->cconn,
+				     level,
+				     &lockres->l_lksb,
+				     lkm_flags,
+				     lockres->l_name,
+				     OCFS2_LOCK_ID_MAX_LEN - 1,
+				     lockres);
+		lockres_clear_pending(lockres, gen, osb);
+		if (ret) {
+			if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
+			    (ret != -EAGAIN)) {
+				ocfs2_log_dlm_error("ocfs2_dlm_lock",
+						    ret, lockres);
 			}
 			ocfs2_recover_from_dlm_error(lockres, 1);
 			goto out;
 		}
 
-		mlog(0, "lock %s, successfull return from dlmlock\n",
+		mlog(0, "lock %s, successfull return from ocfs2_dlm_lock\n",
 		     lockres->l_name);
 
 		/* At this point we've gone inside the dlm and need to
@@ -1177,9 +1278,9 @@
 				 int ex,
 				 int local)
 {
-	int level =  ex ? LKM_EXMODE : LKM_PRMODE;
+	int level =  ex ? DLM_LOCK_EX : DLM_LOCK_PR;
 	unsigned long flags;
-	int lkm_flags = local ? LKM_LOCAL : 0;
+	u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
 
 	spin_lock_irqsave(&lockres->l_lock, flags);
 	BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
@@ -1222,7 +1323,7 @@
 	}
 
 	/*
-	 * We don't want to use LKM_LOCAL on a meta data lock as they
+	 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
 	 * don't use a generation in their lock names.
 	 */
 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
@@ -1261,7 +1362,7 @@
 
 	lockres = &OCFS2_I(inode)->ip_rw_lockres;
 
-	level = write ? LKM_EXMODE : LKM_PRMODE;
+	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
 
 	status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
 				    0);
@@ -1274,7 +1375,7 @@
 
 void ocfs2_rw_unlock(struct inode *inode, int write)
 {
-	int level = write ? LKM_EXMODE : LKM_PRMODE;
+	int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
@@ -1312,7 +1413,7 @@
 	lockres = &OCFS2_I(inode)->ip_open_lockres;
 
 	status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
-				    LKM_PRMODE, 0, 0);
+				    DLM_LOCK_PR, 0, 0);
 	if (status < 0)
 		mlog_errno(status);
 
@@ -1340,16 +1441,16 @@
 
 	lockres = &OCFS2_I(inode)->ip_open_lockres;
 
-	level = write ? LKM_EXMODE : LKM_PRMODE;
+	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
 
 	/*
 	 * The file system may already holding a PRMODE/EXMODE open lock.
-	 * Since we pass LKM_NOQUEUE, the request won't block waiting on
+	 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
 	 * other nodes and the -EAGAIN will indicate to the caller that
 	 * this inode is still in use.
 	 */
 	status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
-				    level, LKM_NOQUEUE, 0);
+				    level, DLM_LKF_NOQUEUE, 0);
 
 out:
 	mlog_exit(status);
@@ -1374,10 +1475,10 @@
 
 	if(lockres->l_ro_holders)
 		ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
-				     LKM_PRMODE);
+				     DLM_LOCK_PR);
 	if(lockres->l_ex_holders)
 		ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
-				     LKM_EXMODE);
+				     DLM_LOCK_EX);
 
 out:
 	mlog_exit_void();
@@ -1464,7 +1565,7 @@
 	ocfs2_init_mask_waiter(&mw);
 
 	if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
-	    (lockres->l_level > LKM_NLMODE)) {
+	    (lockres->l_level > DLM_LOCK_NL)) {
 		mlog(ML_ERROR,
 		     "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
 		     "level: %u\n", lockres->l_name, lockres->l_flags,
@@ -1503,14 +1604,12 @@
 	lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
 	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
-	ret = dlmlock(osb->dlm, level, &lockres->l_lksb, lkm_flags,
-		      lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
-		      ocfs2_locking_ast, lockres, ocfs2_blocking_ast);
-	if (ret != DLM_NORMAL) {
-		if (trylock && ret == DLM_NOTQUEUED)
-			ret = -EAGAIN;
-		else {
-			ocfs2_log_dlm_error("dlmlock", ret, lockres);
+	ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
+			     lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
+			     lockres);
+	if (ret) {
+		if (!trylock || (ret != -EAGAIN)) {
+			ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
 			ret = -EINVAL;
 		}
 
@@ -1537,6 +1636,10 @@
 		 * to just bubble sucess back up to the user.
 		 */
 		ret = ocfs2_flock_handle_signal(lockres, level);
+	} else if (!ret && (level > lockres->l_level)) {
+		/* Trylock failed asynchronously */
+		BUG_ON(!trylock);
+		ret = -EAGAIN;
 	}
 
 out:
@@ -1549,6 +1652,7 @@
 void ocfs2_file_unlock(struct file *file)
 {
 	int ret;
+	unsigned int gen;
 	unsigned long flags;
 	struct ocfs2_file_private *fp = file->private_data;
 	struct ocfs2_lock_res *lockres = &fp->fp_flock;
@@ -1572,13 +1676,13 @@
 	 * Fake a blocking ast for the downconvert code.
 	 */
 	lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
-	lockres->l_blocking = LKM_EXMODE;
+	lockres->l_blocking = DLM_LOCK_EX;
 
-	ocfs2_prepare_downconvert(lockres, LKM_NLMODE);
+	gen = ocfs2_prepare_downconvert(lockres, LKM_NLMODE);
 	lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
 	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
-	ret = ocfs2_downconvert_lock(osb, lockres, LKM_NLMODE, 0);
+	ret = ocfs2_downconvert_lock(osb, lockres, LKM_NLMODE, 0, gen);
 	if (ret) {
 		mlog_errno(ret);
 		return;
@@ -1601,11 +1705,11 @@
 	 * condition. */
 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
 		switch(lockres->l_blocking) {
-		case LKM_EXMODE:
+		case DLM_LOCK_EX:
 			if (!lockres->l_ex_holders && !lockres->l_ro_holders)
 				kick = 1;
 			break;
-		case LKM_PRMODE:
+		case DLM_LOCK_PR:
 			if (!lockres->l_ex_holders)
 				kick = 1;
 			break;
@@ -1648,7 +1752,7 @@
 
 	mlog_entry_void();
 
-	lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+	lvb = (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb);
 
 	/*
 	 * Invalidate the LVB of a deleted inode - this way other
@@ -1700,7 +1804,7 @@
 
 	mlog_meta_lvb(0, lockres);
 
-	lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+	lvb = (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb);
 
 	/* We're safe here without the lockres lock... */
 	spin_lock(&oi->ip_lock);
@@ -1735,7 +1839,8 @@
 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
 					      struct ocfs2_lock_res *lockres)
 {
-	struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+	struct ocfs2_meta_lvb *lvb =
+		(struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb);
 
 	if (lvb->lvb_version == OCFS2_LVB_VERSION
 	    && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
@@ -1923,7 +2028,8 @@
 			 int ex,
 			 int arg_flags)
 {
-	int status, level, dlm_flags, acquired;
+	int status, level, acquired;
+	u32 dlm_flags;
 	struct ocfs2_lock_res *lockres = NULL;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct buffer_head *local_bh = NULL;
@@ -1950,14 +2056,13 @@
 		goto local;
 
 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
-		wait_event(osb->recovery_event,
-			   ocfs2_node_map_is_empty(osb, &osb->recovery_map));
+		ocfs2_wait_for_recovery(osb);
 
 	lockres = &OCFS2_I(inode)->ip_inode_lockres;
-	level = ex ? LKM_EXMODE : LKM_PRMODE;
+	level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
 	dlm_flags = 0;
 	if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
-		dlm_flags |= LKM_NOQUEUE;
+		dlm_flags |= DLM_LKF_NOQUEUE;
 
 	status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
 	if (status < 0) {
@@ -1974,8 +2079,7 @@
 	 * committed to owning this lock so we don't allow signals to
 	 * abort the operation. */
 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
-		wait_event(osb->recovery_event,
-			   ocfs2_node_map_is_empty(osb, &osb->recovery_map));
+		ocfs2_wait_for_recovery(osb);
 
 local:
 	/*
@@ -2109,7 +2213,7 @@
 void ocfs2_inode_unlock(struct inode *inode,
 		       int ex)
 {
-	int level = ex ? LKM_EXMODE : LKM_PRMODE;
+	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
@@ -2130,10 +2234,8 @@
 		     int ex)
 {
 	int status = 0;
-	int level = ex ? LKM_EXMODE : LKM_PRMODE;
+	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
 	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
-	struct buffer_head *bh;
-	struct ocfs2_slot_info *si = osb->slot_info;
 
 	mlog_entry_void();
 
@@ -2159,11 +2261,7 @@
 		goto bail;
 	}
 	if (status) {
-		bh = si->si_bh;
-		status = ocfs2_read_block(osb, bh->b_blocknr, &bh, 0,
-					  si->si_inode);
-		if (status == 0)
-			ocfs2_update_slot_info(si);
+		status = ocfs2_refresh_slot_info(osb);
 
 		ocfs2_complete_lock_res_refresh(lockres, status);
 
@@ -2178,7 +2276,7 @@
 void ocfs2_super_unlock(struct ocfs2_super *osb,
 			int ex)
 {
-	int level = ex ? LKM_EXMODE : LKM_PRMODE;
+	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
 	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
 
 	if (!ocfs2_mount_local(osb))
@@ -2196,7 +2294,7 @@
 	if (ocfs2_mount_local(osb))
 		return 0;
 
-	status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
+	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
 	if (status < 0)
 		mlog_errno(status);
 
@@ -2208,13 +2306,13 @@
 	struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
 
 	if (!ocfs2_mount_local(osb))
-		ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
+		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
 }
 
 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
 {
 	int ret;
-	int level = ex ? LKM_EXMODE : LKM_PRMODE;
+	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
 	struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
 	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
 
@@ -2235,7 +2333,7 @@
 
 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
 {
-	int level = ex ? LKM_EXMODE : LKM_PRMODE;
+	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
 	struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
 	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
 
@@ -2400,7 +2498,7 @@
 		   lockres->l_blocking);
 
 	/* Dump the raw LVB */
-	lvb = lockres->l_lksb.lvb;
+	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
 	for(i = 0; i < DLM_LVB_LEN; i++)
 		seq_printf(m, "0x%x\t", lvb[i]);
 
@@ -2504,13 +2602,14 @@
 int ocfs2_dlm_init(struct ocfs2_super *osb)
 {
 	int status = 0;
-	u32 dlm_key;
-	struct dlm_ctxt *dlm = NULL;
+	struct ocfs2_cluster_connection *conn = NULL;
 
 	mlog_entry_void();
 
-	if (ocfs2_mount_local(osb))
+	if (ocfs2_mount_local(osb)) {
+		osb->node_num = 0;
 		goto local;
+	}
 
 	status = ocfs2_dlm_init_debug(osb);
 	if (status < 0) {
@@ -2527,26 +2626,31 @@
 		goto bail;
 	}
 
-	/* used by the dlm code to make message headers unique, each
-	 * node in this domain must agree on this. */
-	dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
-
 	/* for now, uuid == domain */
-	dlm = dlm_register_domain(osb->uuid_str, dlm_key,
-				  &osb->osb_locking_proto);
-	if (IS_ERR(dlm)) {
-		status = PTR_ERR(dlm);
+	status = ocfs2_cluster_connect(osb->osb_cluster_stack,
+				       osb->uuid_str,
+				       strlen(osb->uuid_str),
+				       ocfs2_do_node_down, osb,
+				       &conn);
+	if (status) {
 		mlog_errno(status);
 		goto bail;
 	}
 
-	dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
+	status = ocfs2_cluster_this_node(&osb->node_num);
+	if (status < 0) {
+		mlog_errno(status);
+		mlog(ML_ERROR,
+		     "could not find this host's node number\n");
+		ocfs2_cluster_disconnect(conn, 0);
+		goto bail;
+	}
 
 local:
 	ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
 	ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
 
-	osb->dlm = dlm;
+	osb->cconn = conn;
 
 	status = 0;
 bail:
@@ -2560,14 +2664,19 @@
 	return status;
 }
 
-void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
+void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
+			int hangup_pending)
 {
 	mlog_entry_void();
 
-	dlm_unregister_eviction_cb(&osb->osb_eviction_cb);
-
 	ocfs2_drop_osb_locks(osb);
 
+	/*
+	 * Now that we have dropped all locks and ocfs2_dismount_volume()
+	 * has disabled recovery, the DLM won't be talking to us.  It's
+	 * safe to tear things down before disconnecting the cluster.
+	 */
+
 	if (osb->dc_task) {
 		kthread_stop(osb->dc_task);
 		osb->dc_task = NULL;
@@ -2576,15 +2685,15 @@
 	ocfs2_lock_res_free(&osb->osb_super_lockres);
 	ocfs2_lock_res_free(&osb->osb_rename_lockres);
 
-	dlm_unregister_domain(osb->dlm);
-	osb->dlm = NULL;
+	ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
+	osb->cconn = NULL;
 
 	ocfs2_dlm_shutdown_debug(osb);
 
 	mlog_exit_void();
 }
 
-static void ocfs2_unlock_ast(void *opaque, enum dlm_status status)
+static void ocfs2_unlock_ast(void *opaque, int error)
 {
 	struct ocfs2_lock_res *lockres = opaque;
 	unsigned long flags;
@@ -2595,24 +2704,9 @@
 	     lockres->l_unlock_action);
 
 	spin_lock_irqsave(&lockres->l_lock, flags);
-	/* We tried to cancel a convert request, but it was already
-	 * granted. All we want to do here is clear our unlock
-	 * state. The wake_up call done at the bottom is redundant
-	 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
-	 * hurt anything anyway */
-	if (status == DLM_CANCELGRANT &&
-	    lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
-		mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
-
-		/* We don't clear the busy flag in this case as it
-		 * should have been cleared by the ast which the dlm
-		 * has called. */
-		goto complete_unlock;
-	}
-
-	if (status != DLM_NORMAL) {
-		mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
-		     "unlock_action %d\n", status, lockres->l_name,
+	if (error) {
+		mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
+		     "unlock_action %d\n", error, lockres->l_name,
 		     lockres->l_unlock_action);
 		spin_unlock_irqrestore(&lockres->l_lock, flags);
 		return;
@@ -2624,14 +2718,13 @@
 		lockres->l_action = OCFS2_AST_INVALID;
 		break;
 	case OCFS2_UNLOCK_DROP_LOCK:
-		lockres->l_level = LKM_IVMODE;
+		lockres->l_level = DLM_LOCK_IV;
 		break;
 	default:
 		BUG();
 	}
 
 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
-complete_unlock:
 	lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
 	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
@@ -2643,16 +2736,16 @@
 static int ocfs2_drop_lock(struct ocfs2_super *osb,
 			   struct ocfs2_lock_res *lockres)
 {
-	enum dlm_status status;
+	int ret;
 	unsigned long flags;
-	int lkm_flags = 0;
+	u32 lkm_flags = 0;
 
 	/* We didn't get anywhere near actually using this lockres. */
 	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
 		goto out;
 
 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
-		lkm_flags |= LKM_VALBLK;
+		lkm_flags |= DLM_LKF_VALBLK;
 
 	spin_lock_irqsave(&lockres->l_lock, flags);
 
@@ -2678,7 +2771,7 @@
 
 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
 		if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
-		    lockres->l_level == LKM_EXMODE &&
+		    lockres->l_level == DLM_LOCK_EX &&
 		    !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
 			lockres->l_ops->set_lvb(lockres);
 	}
@@ -2707,15 +2800,15 @@
 
 	mlog(0, "lock %s\n", lockres->l_name);
 
-	status = dlmunlock(osb->dlm, &lockres->l_lksb, lkm_flags,
-			   ocfs2_unlock_ast, lockres);
-	if (status != DLM_NORMAL) {
-		ocfs2_log_dlm_error("dlmunlock", status, lockres);
+	ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags,
+			       lockres);
+	if (ret) {
+		ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
 		mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
-		dlm_print_one_lock(lockres->l_lksb.lockid);
+		ocfs2_dlm_dump_lksb(&lockres->l_lksb);
 		BUG();
 	}
-	mlog(0, "lock %s, successfull return from dlmunlock\n",
+	mlog(0, "lock %s, successfull return from ocfs2_dlm_unlock\n",
 	     lockres->l_name);
 
 	ocfs2_wait_on_busy_lock(lockres);
@@ -2806,15 +2899,15 @@
 	return status;
 }
 
-static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
-				      int new_level)
+static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
+					      int new_level)
 {
 	assert_spin_locked(&lockres->l_lock);
 
-	BUG_ON(lockres->l_blocking <= LKM_NLMODE);
+	BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
 
 	if (lockres->l_level <= new_level) {
-		mlog(ML_ERROR, "lockres->l_level (%u) <= new_level (%u)\n",
+		mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n",
 		     lockres->l_level, new_level);
 		BUG();
 	}
@@ -2825,33 +2918,33 @@
 	lockres->l_action = OCFS2_AST_DOWNCONVERT;
 	lockres->l_requested = new_level;
 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+	return lockres_set_pending(lockres);
 }
 
 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
 				  struct ocfs2_lock_res *lockres,
 				  int new_level,
-				  int lvb)
+				  int lvb,
+				  unsigned int generation)
 {
-	int ret, dlm_flags = LKM_CONVERT;
-	enum dlm_status status;
+	int ret;
+	u32 dlm_flags = DLM_LKF_CONVERT;
 
 	mlog_entry_void();
 
 	if (lvb)
-		dlm_flags |= LKM_VALBLK;
+		dlm_flags |= DLM_LKF_VALBLK;
 
-	status = dlmlock(osb->dlm,
-			 new_level,
-			 &lockres->l_lksb,
-			 dlm_flags,
-			 lockres->l_name,
-			 OCFS2_LOCK_ID_MAX_LEN - 1,
-			 ocfs2_locking_ast,
-			 lockres,
-			 ocfs2_blocking_ast);
-	if (status != DLM_NORMAL) {
-		ocfs2_log_dlm_error("dlmlock", status, lockres);
-		ret = -EINVAL;
+	ret = ocfs2_dlm_lock(osb->cconn,
+			     new_level,
+			     &lockres->l_lksb,
+			     dlm_flags,
+			     lockres->l_name,
+			     OCFS2_LOCK_ID_MAX_LEN - 1,
+			     lockres);
+	lockres_clear_pending(lockres, generation, osb);
+	if (ret) {
+		ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
 		ocfs2_recover_from_dlm_error(lockres, 1);
 		goto bail;
 	}
@@ -2862,7 +2955,7 @@
 	return ret;
 }
 
-/* returns 1 when the caller should unlock and call dlmunlock */
+/* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
 				        struct ocfs2_lock_res *lockres)
 {
@@ -2898,24 +2991,18 @@
 				struct ocfs2_lock_res *lockres)
 {
 	int ret;
-	enum dlm_status status;
 
 	mlog_entry_void();
 	mlog(0, "lock %s\n", lockres->l_name);
 
-	ret = 0;
-	status = dlmunlock(osb->dlm,
-			   &lockres->l_lksb,
-			   LKM_CANCEL,
-			   ocfs2_unlock_ast,
-			   lockres);
-	if (status != DLM_NORMAL) {
-		ocfs2_log_dlm_error("dlmunlock", status, lockres);
-		ret = -EINVAL;
+	ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
+			       DLM_LKF_CANCEL, lockres);
+	if (ret) {
+		ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
 		ocfs2_recover_from_dlm_error(lockres, 0);
 	}
 
-	mlog(0, "lock %s return from dlmunlock\n", lockres->l_name);
+	mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name);
 
 	mlog_exit(ret);
 	return ret;
@@ -2930,6 +3017,7 @@
 	int new_level;
 	int ret = 0;
 	int set_lvb = 0;
+	unsigned int gen;
 
 	mlog_entry_void();
 
@@ -2939,6 +3027,32 @@
 
 recheck:
 	if (lockres->l_flags & OCFS2_LOCK_BUSY) {
+		/* XXX
+		 * This is a *big* race.  The OCFS2_LOCK_PENDING flag
+		 * exists entirely for one reason - another thread has set
+		 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
+		 *
+		 * If we do ocfs2_cancel_convert() before the other thread
+		 * calls dlm_lock(), our cancel will do nothing.  We will
+		 * get no ast, and we will have no way of knowing the
+		 * cancel failed.  Meanwhile, the other thread will call
+		 * into dlm_lock() and wait...forever.
+		 *
+		 * Why forever?  Because another node has asked for the
+		 * lock first; that's why we're here in unblock_lock().
+		 *
+		 * The solution is OCFS2_LOCK_PENDING.  When PENDING is
+		 * set, we just requeue the unblock.  Only when the other
+		 * thread has called dlm_lock() and cleared PENDING will
+		 * we then cancel their request.
+		 *
+		 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
+		 * at the same time they set OCFS2_DLM_BUSY.  They must
+		 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
+		 */
+		if (lockres->l_flags & OCFS2_LOCK_PENDING)
+			goto leave_requeue;
+
 		ctl->requeue = 1;
 		ret = ocfs2_prepare_cancel_convert(osb, lockres);
 		spin_unlock_irqrestore(&lockres->l_lock, flags);
@@ -2952,13 +3066,13 @@
 
 	/* if we're blocking an exclusive and we have *any* holders,
 	 * then requeue. */
-	if ((lockres->l_blocking == LKM_EXMODE)
+	if ((lockres->l_blocking == DLM_LOCK_EX)
 	    && (lockres->l_ex_holders || lockres->l_ro_holders))
 		goto leave_requeue;
 
 	/* If it's a PR we're blocking, then only
 	 * requeue if we've got any EX holders */
-	if (lockres->l_blocking == LKM_PRMODE &&
+	if (lockres->l_blocking == DLM_LOCK_PR &&
 	    lockres->l_ex_holders)
 		goto leave_requeue;
 
@@ -3005,7 +3119,7 @@
 	ctl->requeue = 0;
 
 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
-		if (lockres->l_level == LKM_EXMODE)
+		if (lockres->l_level == DLM_LOCK_EX)
 			set_lvb = 1;
 
 		/*
@@ -3018,9 +3132,11 @@
 			lockres->l_ops->set_lvb(lockres);
 	}
 
-	ocfs2_prepare_downconvert(lockres, new_level);
+	gen = ocfs2_prepare_downconvert(lockres, new_level);
 	spin_unlock_irqrestore(&lockres->l_lock, flags);
-	ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
+	ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
+				     gen);
+
 leave:
 	mlog_exit(ret);
 	return ret;
@@ -3059,7 +3175,7 @@
 		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
 	}
 	sync_mapping_buffers(mapping);
-	if (blocking == LKM_EXMODE) {
+	if (blocking == DLM_LOCK_EX) {
 		truncate_inode_pages(mapping, 0);
 	} else {
 		/* We only need to wait on the I/O if we're not also
@@ -3080,8 +3196,8 @@
 	struct inode *inode = ocfs2_lock_res_inode(lockres);
 	int checkpointed = ocfs2_inode_fully_checkpointed(inode);
 
-	BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
-	BUG_ON(lockres->l_level != LKM_EXMODE && !checkpointed);
+	BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
+	BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
 
 	if (checkpointed)
 		return 1;
@@ -3145,7 +3261,7 @@
 	 * valid. The downconvert code will retain a PR for this node,
 	 * so there's no further work to do.
 	 */
-	if (blocking == LKM_PRMODE)
+	if (blocking == DLM_LOCK_PR)
 		return UNBLOCK_CONTINUE;
 
 	/*
@@ -3219,6 +3335,45 @@
 	return UNBLOCK_CONTINUE_POST;
 }
 
+/*
+ * This is the filesystem locking protocol.  It provides the lock handling
+ * hooks for the underlying DLM.  It has a maximum version number.
+ * The version number allows interoperability with systems running at
+ * the same major number and an equal or smaller minor number.
+ *
+ * Whenever the filesystem does new things with locks (adds or removes a
+ * lock, orders them differently, does different things underneath a lock),
+ * the version must be changed.  The protocol is negotiated when joining
+ * the dlm domain.  A node may join the domain if its major version is
+ * identical to all other nodes and its minor version is greater than
+ * or equal to all other nodes.  When its minor version is greater than
+ * the other nodes, it will run at the minor version specified by the
+ * other nodes.
+ *
+ * If a locking change is made that will not be compatible with older
+ * versions, the major number must be increased and the minor version set
+ * to zero.  If a change merely adds a behavior that can be disabled when
+ * speaking to older versions, the minor version must be increased.  If a
+ * change adds a fully backwards compatible change (eg, LVB changes that
+ * are just ignored by older versions), the version does not need to be
+ * updated.
+ */
+static struct ocfs2_locking_protocol lproto = {
+	.lp_max_version = {
+		.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+		.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+	},
+	.lp_lock_ast		= ocfs2_locking_ast,
+	.lp_blocking_ast	= ocfs2_blocking_ast,
+	.lp_unlock_ast		= ocfs2_unlock_ast,
+};
+
+void ocfs2_set_locking_protocol(void)
+{
+	ocfs2_stack_glue_set_locking_protocol(&lproto);
+}
+
+
 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
 				       struct ocfs2_lock_res *lockres)
 {
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index e3cf902..2bb01f0 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -58,7 +58,7 @@
 #define OCFS2_LOCK_NONBLOCK		(0x04)
 
 int ocfs2_dlm_init(struct ocfs2_super *osb);
-void ocfs2_dlm_shutdown(struct ocfs2_super *osb);
+void ocfs2_dlm_shutdown(struct ocfs2_super *osb, int hangup_pending);
 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res);
 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
 			       enum ocfs2_lock_type type,
@@ -114,5 +114,6 @@
 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void);
 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
 
-extern const struct dlm_protocol_version ocfs2_locking_protocol;
+/* To set the locking protocol on module initialization */
+void ocfs2_set_locking_protocol(void);
 #endif	/* DLMGLUE_H */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index ed5d523..9154c82d3 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2242,7 +2242,7 @@
 	.open		= ocfs2_file_open,
 	.aio_read	= ocfs2_file_aio_read,
 	.aio_write	= ocfs2_file_aio_write,
-	.ioctl		= ocfs2_ioctl,
+	.unlocked_ioctl	= ocfs2_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl   = ocfs2_compat_ioctl,
 #endif
@@ -2258,7 +2258,7 @@
 	.fsync		= ocfs2_sync_file,
 	.release	= ocfs2_dir_release,
 	.open		= ocfs2_dir_open,
-	.ioctl		= ocfs2_ioctl,
+	.unlocked_ioctl	= ocfs2_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl   = ocfs2_compat_ioctl,
 #endif
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index 0758daf..c6e7213 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -28,9 +28,6 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
-#include <linux/kmod.h>
-
-#include <dlm/dlmapi.h>
 
 #define MLOG_MASK_PREFIX ML_SUPER
 #include <cluster/masklog.h>
@@ -48,7 +45,6 @@
 					    int bit);
 static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map,
 					      int bit);
-static inline int __ocfs2_node_map_is_empty(struct ocfs2_node_map *map);
 
 /* special case -1 for now
  * TODO: should *really* make sure the calling func never passes -1!!  */
@@ -62,23 +58,23 @@
 void ocfs2_init_node_maps(struct ocfs2_super *osb)
 {
 	spin_lock_init(&osb->node_map_lock);
-	ocfs2_node_map_init(&osb->recovery_map);
 	ocfs2_node_map_init(&osb->osb_recovering_orphan_dirs);
 }
 
-static void ocfs2_do_node_down(int node_num,
-			       struct ocfs2_super *osb)
+void ocfs2_do_node_down(int node_num, void *data)
 {
+	struct ocfs2_super *osb = data;
+
 	BUG_ON(osb->node_num == node_num);
 
 	mlog(0, "ocfs2: node down event for %d\n", node_num);
 
-	if (!osb->dlm) {
+	if (!osb->cconn) {
 		/*
-		 * No DLM means we're not even ready to participate yet.
-		 * We check the slots after the DLM comes up, so we will
-		 * notice the node death then.  We can safely ignore it
-		 * here.
+		 * No cluster connection means we're not even ready to
+		 * participate yet.  We check the slots after the cluster
+		 * comes up, so we will notice the node death then.  We
+		 * can safely ignore it here.
 		 */
 		return;
 	}
@@ -86,61 +82,6 @@
 	ocfs2_recovery_thread(osb, node_num);
 }
 
-/* Called from the dlm when it's about to evict a node. We may also
- * get a heartbeat callback later. */
-static void ocfs2_dlm_eviction_cb(int node_num,
-				  void *data)
-{
-	struct ocfs2_super *osb = (struct ocfs2_super *) data;
-	struct super_block *sb = osb->sb;
-
-	mlog(ML_NOTICE, "device (%u,%u): dlm has evicted node %d\n",
-	     MAJOR(sb->s_dev), MINOR(sb->s_dev), node_num);
-
-	ocfs2_do_node_down(node_num, osb);
-}
-
-void ocfs2_setup_hb_callbacks(struct ocfs2_super *osb)
-{
-	/* Not exactly a heartbeat callback, but leads to essentially
-	 * the same path so we set it up here. */
-	dlm_setup_eviction_cb(&osb->osb_eviction_cb,
-			      ocfs2_dlm_eviction_cb,
-			      osb);
-}
-
-void ocfs2_stop_heartbeat(struct ocfs2_super *osb)
-{
-	int ret;
-	char *argv[5], *envp[3];
-
-	if (ocfs2_mount_local(osb))
-		return;
-
-	if (!osb->uuid_str) {
-		/* This can happen if we don't get far enough in mount... */
-		mlog(0, "No UUID with which to stop heartbeat!\n\n");
-		return;
-	}
-
-	argv[0] = (char *)o2nm_get_hb_ctl_path();
-	argv[1] = "-K";
-	argv[2] = "-u";
-	argv[3] = osb->uuid_str;
-	argv[4] = NULL;
-
-	mlog(0, "Run: %s %s %s %s\n", argv[0], argv[1], argv[2], argv[3]);
-
-	/* minimal command environment taken from cpu_run_sbin_hotplug */
-	envp[0] = "HOME=/";
-	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
-	envp[2] = NULL;
-
-	ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
-	if (ret < 0)
-		mlog_errno(ret);
-}
-
 static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map,
 					    int bit)
 {
@@ -192,112 +133,3 @@
 	return ret;
 }
 
-static inline int __ocfs2_node_map_is_empty(struct ocfs2_node_map *map)
-{
-	int bit;
-	bit = find_next_bit(map->map, map->num_nodes, 0);
-	if (bit < map->num_nodes)
-		return 0;
-	return 1;
-}
-
-int ocfs2_node_map_is_empty(struct ocfs2_super *osb,
-			    struct ocfs2_node_map *map)
-{
-	int ret;
-	BUG_ON(map->num_nodes == 0);
-	spin_lock(&osb->node_map_lock);
-	ret = __ocfs2_node_map_is_empty(map);
-	spin_unlock(&osb->node_map_lock);
-	return ret;
-}
-
-#if 0
-
-static void __ocfs2_node_map_dup(struct ocfs2_node_map *target,
-				 struct ocfs2_node_map *from)
-{
-	BUG_ON(from->num_nodes == 0);
-	ocfs2_node_map_init(target);
-	__ocfs2_node_map_set(target, from);
-}
-
-/* returns 1 if bit is the only bit set in target, 0 otherwise */
-int ocfs2_node_map_is_only(struct ocfs2_super *osb,
-			   struct ocfs2_node_map *target,
-			   int bit)
-{
-	struct ocfs2_node_map temp;
-	int ret;
-
-	spin_lock(&osb->node_map_lock);
-	__ocfs2_node_map_dup(&temp, target);
-	__ocfs2_node_map_clear_bit(&temp, bit);
-	ret = __ocfs2_node_map_is_empty(&temp);
-	spin_unlock(&osb->node_map_lock);
-
-	return ret;
-}
-
-static void __ocfs2_node_map_set(struct ocfs2_node_map *target,
-				 struct ocfs2_node_map *from)
-{
-	int num_longs, i;
-
-	BUG_ON(target->num_nodes != from->num_nodes);
-	BUG_ON(target->num_nodes == 0);
-
-	num_longs = BITS_TO_LONGS(target->num_nodes);
-	for (i = 0; i < num_longs; i++)
-		target->map[i] = from->map[i];
-}
-
-#endif  /*  0  */
-
-/* Returns whether the recovery bit was actually set - it may not be
- * if a node is still marked as needing recovery */
-int ocfs2_recovery_map_set(struct ocfs2_super *osb,
-			   int num)
-{
-	int set = 0;
-
-	spin_lock(&osb->node_map_lock);
-
-	if (!test_bit(num, osb->recovery_map.map)) {
-	    __ocfs2_node_map_set_bit(&osb->recovery_map, num);
-	    set = 1;
-	}
-
-	spin_unlock(&osb->node_map_lock);
-
-	return set;
-}
-
-void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
-			      int num)
-{
-	ocfs2_node_map_clear_bit(osb, &osb->recovery_map, num);
-}
-
-int ocfs2_node_map_iterate(struct ocfs2_super *osb,
-			   struct ocfs2_node_map *map,
-			   int idx)
-{
-	int i = idx;
-
-	idx = O2NM_INVALID_NODE_NUM;
-	spin_lock(&osb->node_map_lock);
-	if ((i != O2NM_INVALID_NODE_NUM) &&
-	    (i >= 0) &&
-	    (i < map->num_nodes)) {
-		while(i < map->num_nodes) {
-			if (test_bit(i, map->map)) {
-				idx = i;
-				break;
-			}
-			i++;
-		}
-	}
-	spin_unlock(&osb->node_map_lock);
-	return idx;
-}
diff --git a/fs/ocfs2/heartbeat.h b/fs/ocfs2/heartbeat.h
index eac63ae..74b9c5d 100644
--- a/fs/ocfs2/heartbeat.h
+++ b/fs/ocfs2/heartbeat.h
@@ -28,13 +28,10 @@
 
 void ocfs2_init_node_maps(struct ocfs2_super *osb);
 
-void ocfs2_setup_hb_callbacks(struct ocfs2_super *osb);
-void ocfs2_stop_heartbeat(struct ocfs2_super *osb);
+void ocfs2_do_node_down(int node_num, void *data);
 
 /* node map functions - used to keep track of mounted and in-recovery
  * nodes. */
-int ocfs2_node_map_is_empty(struct ocfs2_super *osb,
-			    struct ocfs2_node_map *map);
 void ocfs2_node_map_set_bit(struct ocfs2_super *osb,
 			    struct ocfs2_node_map *map,
 			    int bit);
@@ -44,17 +41,5 @@
 int ocfs2_node_map_test_bit(struct ocfs2_super *osb,
 			    struct ocfs2_node_map *map,
 			    int bit);
-int ocfs2_node_map_iterate(struct ocfs2_super *osb,
-			   struct ocfs2_node_map *map,
-			   int idx);
-static inline int ocfs2_node_map_first_set_bit(struct ocfs2_super *osb,
-					       struct ocfs2_node_map *map)
-{
-	return ocfs2_node_map_iterate(osb, map, 0);
-}
-int ocfs2_recovery_map_set(struct ocfs2_super *osb,
-			   int num);
-void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
-			      int num);
 
 #endif /* OCFS2_HEARTBEAT_H */
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 5177fba..b413166 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,6 +7,7 @@
 
 #include <linux/fs.h>
 #include <linux/mount.h>
+#include <linux/smp_lock.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -112,9 +113,9 @@
 	return status;
 }
 
-int ocfs2_ioctl(struct inode * inode, struct file * filp,
-	unsigned int cmd, unsigned long arg)
+long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
+	struct inode *inode = filp->f_path.dentry->d_inode;
 	unsigned int flags;
 	int new_clusters;
 	int status;
@@ -168,9 +169,6 @@
 #ifdef CONFIG_COMPAT
 long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 {
-	struct inode *inode = file->f_path.dentry->d_inode;
-	int ret;
-
 	switch (cmd) {
 	case OCFS2_IOC32_GETFLAGS:
 		cmd = OCFS2_IOC_GETFLAGS;
@@ -190,9 +188,6 @@
 		return -ENOIOCTLCMD;
 	}
 
-	lock_kernel();
-	ret = ocfs2_ioctl(inode, file, cmd, arg);
-	unlock_kernel();
-	return ret;
+	return ocfs2_ioctl(file, cmd, arg);
 }
 #endif
diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h
index 4d6c4f4..cf9a5ee 100644
--- a/fs/ocfs2/ioctl.h
+++ b/fs/ocfs2/ioctl.h
@@ -10,8 +10,7 @@
 #ifndef OCFS2_IOCTL_H
 #define OCFS2_IOCTL_H
 
-int ocfs2_ioctl(struct inode * inode, struct file * filp,
-	unsigned int cmd, unsigned long arg);
+long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg);
 
 #endif /* OCFS2_IOCTL_H */
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index f31c7e8..9698338 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -64,6 +64,137 @@
 				 int slot);
 static int ocfs2_commit_thread(void *arg);
 
+
+/*
+ * The recovery_list is a simple linked list of node numbers to recover.
+ * It is protected by the recovery_lock.
+ */
+
+struct ocfs2_recovery_map {
+	unsigned int rm_used;
+	unsigned int *rm_entries;
+};
+
+int ocfs2_recovery_init(struct ocfs2_super *osb)
+{
+	struct ocfs2_recovery_map *rm;
+
+	mutex_init(&osb->recovery_lock);
+	osb->disable_recovery = 0;
+	osb->recovery_thread_task = NULL;
+	init_waitqueue_head(&osb->recovery_event);
+
+	rm = kzalloc(sizeof(struct ocfs2_recovery_map) +
+		     osb->max_slots * sizeof(unsigned int),
+		     GFP_KERNEL);
+	if (!rm) {
+		mlog_errno(-ENOMEM);
+		return -ENOMEM;
+	}
+
+	rm->rm_entries = (unsigned int *)((char *)rm +
+					  sizeof(struct ocfs2_recovery_map));
+	osb->recovery_map = rm;
+
+	return 0;
+}
+
+/* we can't grab the goofy sem lock from inside wait_event, so we use
+ * memory barriers to make sure that we'll see the null task before
+ * being woken up */
+static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
+{
+	mb();
+	return osb->recovery_thread_task != NULL;
+}
+
+void ocfs2_recovery_exit(struct ocfs2_super *osb)
+{
+	struct ocfs2_recovery_map *rm;
+
+	/* disable any new recovery threads and wait for any currently
+	 * running ones to exit. Do this before setting the vol_state. */
+	mutex_lock(&osb->recovery_lock);
+	osb->disable_recovery = 1;
+	mutex_unlock(&osb->recovery_lock);
+	wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
+
+	/* At this point, we know that no more recovery threads can be
+	 * launched, so wait for any recovery completion work to
+	 * complete. */
+	flush_workqueue(ocfs2_wq);
+
+	/*
+	 * Now that recovery is shut down, and the osb is about to be
+	 * freed,  the osb_lock is not taken here.
+	 */
+	rm = osb->recovery_map;
+	/* XXX: Should we bug if there are dirty entries? */
+
+	kfree(rm);
+}
+
+static int __ocfs2_recovery_map_test(struct ocfs2_super *osb,
+				     unsigned int node_num)
+{
+	int i;
+	struct ocfs2_recovery_map *rm = osb->recovery_map;
+
+	assert_spin_locked(&osb->osb_lock);
+
+	for (i = 0; i < rm->rm_used; i++) {
+		if (rm->rm_entries[i] == node_num)
+			return 1;
+	}
+
+	return 0;
+}
+
+/* Behaves like test-and-set.  Returns the previous value */
+static int ocfs2_recovery_map_set(struct ocfs2_super *osb,
+				  unsigned int node_num)
+{
+	struct ocfs2_recovery_map *rm = osb->recovery_map;
+
+	spin_lock(&osb->osb_lock);
+	if (__ocfs2_recovery_map_test(osb, node_num)) {
+		spin_unlock(&osb->osb_lock);
+		return 1;
+	}
+
+	/* XXX: Can this be exploited? Not from o2dlm... */
+	BUG_ON(rm->rm_used >= osb->max_slots);
+
+	rm->rm_entries[rm->rm_used] = node_num;
+	rm->rm_used++;
+	spin_unlock(&osb->osb_lock);
+
+	return 0;
+}
+
+static void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
+				     unsigned int node_num)
+{
+	int i;
+	struct ocfs2_recovery_map *rm = osb->recovery_map;
+
+	spin_lock(&osb->osb_lock);
+
+	for (i = 0; i < rm->rm_used; i++) {
+		if (rm->rm_entries[i] == node_num)
+			break;
+	}
+
+	if (i < rm->rm_used) {
+		/* XXX: be careful with the pointer math */
+		memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]),
+			(rm->rm_used - i - 1) * sizeof(unsigned int));
+		rm->rm_used--;
+	}
+
+	spin_unlock(&osb->osb_lock);
+}
+
 static int ocfs2_commit_cache(struct ocfs2_super *osb)
 {
 	int status = 0;
@@ -586,8 +717,7 @@
 
 	mlog_entry_void();
 
-	if (!journal)
-		BUG();
+	BUG_ON(!journal);
 
 	osb = journal->j_osb;
 
@@ -650,6 +780,23 @@
 	return status;
 }
 
+static int ocfs2_recovery_completed(struct ocfs2_super *osb)
+{
+	int empty;
+	struct ocfs2_recovery_map *rm = osb->recovery_map;
+
+	spin_lock(&osb->osb_lock);
+	empty = (rm->rm_used == 0);
+	spin_unlock(&osb->osb_lock);
+
+	return empty;
+}
+
+void ocfs2_wait_for_recovery(struct ocfs2_super *osb)
+{
+	wait_event(osb->recovery_event, ocfs2_recovery_completed(osb));
+}
+
 /*
  * JBD Might read a cached version of another nodes journal file. We
  * don't want this as this file changes often and we get no
@@ -848,6 +995,7 @@
 {
 	int status, node_num;
 	struct ocfs2_super *osb = arg;
+	struct ocfs2_recovery_map *rm = osb->recovery_map;
 
 	mlog_entry_void();
 
@@ -863,26 +1011,29 @@
 		goto bail;
 	}
 
-	while(!ocfs2_node_map_is_empty(osb, &osb->recovery_map)) {
-		node_num = ocfs2_node_map_first_set_bit(osb,
-							&osb->recovery_map);
-		if (node_num == O2NM_INVALID_NODE_NUM) {
-			mlog(0, "Out of nodes to recover.\n");
-			break;
-		}
+	spin_lock(&osb->osb_lock);
+	while (rm->rm_used) {
+		/* It's always safe to remove entry zero, as we won't
+		 * clear it until ocfs2_recover_node() has succeeded. */
+		node_num = rm->rm_entries[0];
+		spin_unlock(&osb->osb_lock);
 
 		status = ocfs2_recover_node(osb, node_num);
-		if (status < 0) {
+		if (!status) {
+			ocfs2_recovery_map_clear(osb, node_num);
+		} else {
 			mlog(ML_ERROR,
 			     "Error %d recovering node %d on device (%u,%u)!\n",
 			     status, node_num,
 			     MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
 			mlog(ML_ERROR, "Volume requires unmount.\n");
-			continue;
 		}
 
-		ocfs2_recovery_map_clear(osb, node_num);
+		spin_lock(&osb->osb_lock);
 	}
+	spin_unlock(&osb->osb_lock);
+	mlog(0, "All nodes recovered\n");
+
 	ocfs2_super_unlock(osb, 1);
 
 	/* We always run recovery on our own orphan dir - the dead
@@ -893,8 +1044,7 @@
 
 bail:
 	mutex_lock(&osb->recovery_lock);
-	if (!status &&
-	    !ocfs2_node_map_is_empty(osb, &osb->recovery_map)) {
+	if (!status && !ocfs2_recovery_completed(osb)) {
 		mutex_unlock(&osb->recovery_lock);
 		goto restart;
 	}
@@ -924,8 +1074,8 @@
 
 	/* People waiting on recovery will wait on
 	 * the recovery map to empty. */
-	if (!ocfs2_recovery_map_set(osb, node_num))
-		mlog(0, "node %d already be in recovery.\n", node_num);
+	if (ocfs2_recovery_map_set(osb, node_num))
+		mlog(0, "node %d already in recovery map.\n", node_num);
 
 	mlog(0, "starting recovery thread...\n");
 
@@ -1079,7 +1229,6 @@
 {
 	int status = 0;
 	int slot_num;
-	struct ocfs2_slot_info *si = osb->slot_info;
 	struct ocfs2_dinode *la_copy = NULL;
 	struct ocfs2_dinode *tl_copy = NULL;
 
@@ -1092,8 +1241,8 @@
 	 * case we should've called ocfs2_journal_load instead. */
 	BUG_ON(osb->node_num == node_num);
 
-	slot_num = ocfs2_node_num_to_slot(si, node_num);
-	if (slot_num == OCFS2_INVALID_SLOT) {
+	slot_num = ocfs2_node_num_to_slot(osb, node_num);
+	if (slot_num == -ENOENT) {
 		status = 0;
 		mlog(0, "no slot for this node, so no recovery required.\n");
 		goto done;
@@ -1123,8 +1272,7 @@
 
 	/* Likewise, this would be a strange but ultimately not so
 	 * harmful place to get an error... */
-	ocfs2_clear_slot(si, slot_num);
-	status = ocfs2_update_disk_slots(osb, si);
+	status = ocfs2_clear_slot(osb, slot_num);
 	if (status < 0)
 		mlog_errno(status);
 
@@ -1184,23 +1332,24 @@
  * slot info struct has been updated from disk. */
 int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
 {
-	int status, i, node_num;
-	struct ocfs2_slot_info *si = osb->slot_info;
+	unsigned int node_num;
+	int status, i;
 
 	/* This is called with the super block cluster lock, so we
 	 * know that the slot map can't change underneath us. */
 
-	spin_lock(&si->si_lock);
-	for(i = 0; i < si->si_num_slots; i++) {
+	spin_lock(&osb->osb_lock);
+	for (i = 0; i < osb->max_slots; i++) {
 		if (i == osb->slot_num)
 			continue;
-		if (ocfs2_is_empty_slot(si, i))
+
+		status = ocfs2_slot_to_node_num_locked(osb, i, &node_num);
+		if (status == -ENOENT)
 			continue;
 
-		node_num = si->si_global_node_nums[i];
-		if (ocfs2_node_map_test_bit(osb, &osb->recovery_map, node_num))
+		if (__ocfs2_recovery_map_test(osb, node_num))
 			continue;
-		spin_unlock(&si->si_lock);
+		spin_unlock(&osb->osb_lock);
 
 		/* Ok, we have a slot occupied by another node which
 		 * is not in the recovery map. We trylock his journal
@@ -1216,9 +1365,9 @@
 			goto bail;
 		}
 
-		spin_lock(&si->si_lock);
+		spin_lock(&osb->osb_lock);
 	}
-	spin_unlock(&si->si_lock);
+	spin_unlock(&osb->osb_lock);
 
 	status = 0;
 bail:
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 220f3e8..db82be2 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -134,6 +134,10 @@
 
 /* Exported only for the journal struct init code in super.c. Do not call. */
 void ocfs2_complete_recovery(struct work_struct *work);
+void ocfs2_wait_for_recovery(struct ocfs2_super *osb);
+
+int ocfs2_recovery_init(struct ocfs2_super *osb);
+void ocfs2_recovery_exit(struct ocfs2_super *osb);
 
 /*
  *  Journal Control:
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ab83fd5..ce0dc14 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -447,6 +447,8 @@
 	iput(main_bm_inode);
 
 out:
+	if (!status)
+		ocfs2_init_inode_steal_slot(osb);
 	mlog_exit(status);
 	return status;
 }
@@ -523,6 +525,8 @@
 	}
 
 	ac->ac_inode = local_alloc_inode;
+	/* We should never use localalloc from another slot */
+	ac->ac_alloc_slot = osb->slot_num;
 	ac->ac_which = OCFS2_AC_USE_LOCAL;
 	get_bh(osb->local_alloc_bh);
 	ac->ac_bh = osb->local_alloc_bh;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index ae9ad95..d5d808f 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -424,7 +424,7 @@
 	fe->i_fs_generation = cpu_to_le32(osb->fs_generation);
 	fe->i_blkno = cpu_to_le64(fe_blkno);
 	fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
-	fe->i_suballoc_slot = cpu_to_le16(osb->slot_num);
+	fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot);
 	fe->i_uid = cpu_to_le32(current->fsuid);
 	if (dir->i_mode & S_ISGID) {
 		fe->i_gid = cpu_to_le32(dir->i_gid);
@@ -997,7 +997,7 @@
 	 *
 	 * And that's why, just like the VFS, we need a file system
 	 * rename lock. */
-	if (old_dentry != new_dentry) {
+	if (old_dir != new_dir && S_ISDIR(old_inode->i_mode)) {
 		status = ocfs2_rename_lock(osb);
 		if (status < 0) {
 			mlog_errno(status);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 6546cef..3169237 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -36,11 +36,8 @@
 #include <linux/mutex.h>
 #include <linux/jbd.h>
 
-#include "cluster/nodemanager.h"
-#include "cluster/heartbeat.h"
-#include "cluster/tcp.h"
-
-#include "dlm/dlmapi.h"
+/* For union ocfs2_dlm_lksb */
+#include "stackglue.h"
 
 #include "ocfs2_fs.h"
 #include "ocfs2_lockid.h"
@@ -101,6 +98,9 @@
 					       * dropped. */
 #define OCFS2_LOCK_QUEUED        (0x00000100) /* queued for downconvert */
 #define OCFS2_LOCK_NOCACHE       (0x00000200) /* don't use a holder count */
+#define OCFS2_LOCK_PENDING       (0x00000400) /* This lockres is pending a
+						 call to dlm_lock.  Only
+						 exists with BUSY set. */
 
 struct ocfs2_lock_res_ops;
 
@@ -120,13 +120,14 @@
 	int                      l_level;
 	unsigned int             l_ro_holders;
 	unsigned int             l_ex_holders;
-	struct dlm_lockstatus    l_lksb;
+	union ocfs2_dlm_lksb     l_lksb;
 
 	/* used from AST/BAST funcs. */
 	enum ocfs2_ast_action    l_action;
 	enum ocfs2_unlock_action l_unlock_action;
 	int                      l_requested;
 	int                      l_blocking;
+	unsigned int             l_pending_gen;
 
 	wait_queue_head_t        l_event;
 
@@ -179,6 +180,8 @@
 #define OCFS2_DEFAULT_ATIME_QUANTUM	60
 
 struct ocfs2_journal;
+struct ocfs2_slot_info;
+struct ocfs2_recovery_map;
 struct ocfs2_super
 {
 	struct task_struct *commit_task;
@@ -190,7 +193,6 @@
 	struct ocfs2_slot_info *slot_info;
 
 	spinlock_t node_map_lock;
-	struct ocfs2_node_map recovery_map;
 
 	u64 root_blkno;
 	u64 system_dir_blkno;
@@ -206,25 +208,29 @@
 	u32 s_feature_incompat;
 	u32 s_feature_ro_compat;
 
-	/* Protects s_next_generaion, osb_flags. Could protect more on
-	 * osb as it's very short lived. */
+	/* Protects s_next_generation, osb_flags and s_inode_steal_slot.
+	 * Could protect more on osb as it's very short lived.
+	 */
 	spinlock_t osb_lock;
 	u32 s_next_generation;
 	unsigned long osb_flags;
+	s16 s_inode_steal_slot;
+	atomic_t s_num_inodes_stolen;
 
 	unsigned long s_mount_opt;
 	unsigned int s_atime_quantum;
 
-	u16 max_slots;
-	s16 node_num;
-	s16 slot_num;
-	s16 preferred_slot;
+	unsigned int max_slots;
+	unsigned int node_num;
+	int slot_num;
+	int preferred_slot;
 	int s_sectsize_bits;
 	int s_clustersize;
 	int s_clustersize_bits;
 
 	atomic_t vol_state;
 	struct mutex recovery_lock;
+	struct ocfs2_recovery_map *recovery_map;
 	struct task_struct *recovery_thread_task;
 	int disable_recovery;
 	wait_queue_head_t checkpoint_event;
@@ -245,12 +251,11 @@
 	struct ocfs2_alloc_stats alloc_stats;
 	char dev_str[20];		/* "major,minor" of the device */
 
-	struct dlm_ctxt *dlm;
+	char osb_cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
+	struct ocfs2_cluster_connection *cconn;
 	struct ocfs2_lock_res osb_super_lockres;
 	struct ocfs2_lock_res osb_rename_lockres;
-	struct dlm_eviction_cb osb_eviction_cb;
 	struct ocfs2_dlm_debug *osb_dlm_debug;
-	struct dlm_protocol_version osb_locking_proto;
 
 	struct dentry *osb_debug_root;
 
@@ -367,11 +372,24 @@
 	return ret;
 }
 
+static inline int ocfs2_userspace_stack(struct ocfs2_super *osb)
+{
+	return (osb->s_feature_incompat &
+		OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK);
+}
+
 static inline int ocfs2_mount_local(struct ocfs2_super *osb)
 {
 	return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
 }
 
+static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
+{
+	return (osb->s_feature_incompat &
+		OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP);
+}
+
+
 #define OCFS2_IS_VALID_DINODE(ptr)					\
 	(!strcmp((ptr)->i_signature, OCFS2_INODE_SIGNATURE))
 
@@ -522,6 +540,33 @@
 	return pages_per_cluster;
 }
 
+static inline void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
+{
+	spin_lock(&osb->osb_lock);
+	osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
+	spin_unlock(&osb->osb_lock);
+	atomic_set(&osb->s_num_inodes_stolen, 0);
+}
+
+static inline void ocfs2_set_inode_steal_slot(struct ocfs2_super *osb,
+					      s16 slot)
+{
+	spin_lock(&osb->osb_lock);
+	osb->s_inode_steal_slot = slot;
+	spin_unlock(&osb->osb_lock);
+}
+
+static inline s16 ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
+{
+	s16 slot;
+
+	spin_lock(&osb->osb_lock);
+	slot = osb->s_inode_steal_slot;
+	spin_unlock(&osb->osb_lock);
+
+	return slot;
+}
+
 #define ocfs2_set_bit ext2_set_bit
 #define ocfs2_clear_bit ext2_clear_bit
 #define ocfs2_test_bit ext2_test_bit
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 3633edd..52c4266 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -88,7 +88,9 @@
 #define OCFS2_FEATURE_COMPAT_SUPP	OCFS2_FEATURE_COMPAT_BACKUP_SB
 #define OCFS2_FEATURE_INCOMPAT_SUPP	(OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT \
 					 | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC \
-					 | OCFS2_FEATURE_INCOMPAT_INLINE_DATA)
+					 | OCFS2_FEATURE_INCOMPAT_INLINE_DATA \
+					 | OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP \
+					 | OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK)
 #define OCFS2_FEATURE_RO_COMPAT_SUPP	OCFS2_FEATURE_RO_COMPAT_UNWRITTEN
 
 /*
@@ -125,6 +127,21 @@
 /* Support for data packed into inode blocks */
 #define OCFS2_FEATURE_INCOMPAT_INLINE_DATA	0x0040
 
+/* Support for the extended slot map */
+#define OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP 0x100
+
+
+/*
+ * Support for alternate, userspace cluster stacks.  If set, the superblock
+ * field s_cluster_info contains a tag for the alternate stack in use as
+ * well as the name of the cluster being joined.
+ * mount.ocfs2 must pass in a matching stack name.
+ *
+ * If not set, the classic stack will be used.  This is compatbile with
+ * all older versions.
+ */
+#define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK	0x0080
+
 /*
  * backup superblock flag is used to indicate that this volume
  * has backup superblocks.
@@ -267,6 +284,10 @@
 #define OCFS2_VOL_UUID_LEN		16
 #define OCFS2_MAX_VOL_LABEL_LEN		64
 
+/* The alternate, userspace stack fields */
+#define OCFS2_STACK_LABEL_LEN		4
+#define OCFS2_CLUSTER_NAME_LEN		16
+
 /* Journal limits (in bytes) */
 #define OCFS2_MIN_JOURNAL_SIZE		(4 * 1024 * 1024)
 
@@ -475,6 +496,47 @@
 };
 
 /*
+ * On disk slot map for OCFS2.  This defines the contents of the "slot_map"
+ * system file.  A slot is valid if it contains a node number >= 0.  The
+ * value -1 (0xFFFF) is OCFS2_INVALID_SLOT.  This marks a slot empty.
+ */
+struct ocfs2_slot_map {
+/*00*/	__le16 sm_slots[0];
+/*
+ * Actual on-disk size is one block.  OCFS2_MAX_SLOTS is 255,
+ * 255 * sizeof(__le16) == 512B, within the 512B block minimum blocksize.
+ */
+};
+
+struct ocfs2_extended_slot {
+/*00*/	__u8	es_valid;
+	__u8	es_reserved1[3];
+	__le32	es_node_num;
+/*10*/
+};
+
+/*
+ * The extended slot map, used when OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP
+ * is set.  It separates out the valid marker from the node number, and
+ * has room to grow.  Unlike the old slot map, this format is defined by
+ * i_size.
+ */
+struct ocfs2_slot_map_extended {
+/*00*/	struct ocfs2_extended_slot se_slots[0];
+/*
+ * Actual size is i_size of the slot_map system file.  It should
+ * match s_max_slots * sizeof(struct ocfs2_extended_slot)
+ */
+};
+
+struct ocfs2_cluster_info {
+/*00*/	__u8   ci_stack[OCFS2_STACK_LABEL_LEN];
+	__le32 ci_reserved;
+/*08*/	__u8   ci_cluster[OCFS2_CLUSTER_NAME_LEN];
+/*18*/
+};
+
+/*
  * On disk superblock for OCFS2
  * Note that it is contained inside an ocfs2_dinode, so all offsets
  * are relative to the start of ocfs2_dinode.id2.
@@ -506,7 +568,20 @@
 					 * group header */
 /*50*/	__u8  s_label[OCFS2_MAX_VOL_LABEL_LEN];	/* Label for mounting, etc. */
 /*90*/	__u8  s_uuid[OCFS2_VOL_UUID_LEN];	/* 128-bit uuid */
-/*A0*/
+/*A0*/  struct ocfs2_cluster_info s_cluster_info; /* Selected userspace
+						     stack.  Only valid
+						     with INCOMPAT flag. */
+/*B8*/  __le64 s_reserved2[17];		/* Fill out superblock */
+/*140*/
+
+	/*
+	 * NOTE: As stated above, all offsets are relative to
+	 * ocfs2_dinode.id2, which is at 0xC0 in the inode.
+	 * 0xC0 + 0x140 = 0x200 or 512 bytes.  A superblock must fit within
+	 * our smallest blocksize, which is 512 bytes.  To ensure this,
+	 * we reserve the space in s_reserved2.  Anything past s_reserved2
+	 * will not be available on the smallest blocksize.
+	 */
 };
 
 /*
diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h
index 86f3e37..82c200f 100644
--- a/fs/ocfs2/ocfs2_lockid.h
+++ b/fs/ocfs2/ocfs2_lockid.h
@@ -100,7 +100,7 @@
 static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type)
 {
 #ifdef __KERNEL__
-	mlog_bug_on_msg(type >= OCFS2_NUM_LOCK_TYPES, "%d\n", type);
+	BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
 #endif
 	return ocfs2_lock_type_strings[type];
 }
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 3a50ce5..bb5ff89 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -42,81 +42,244 @@
 
 #include "buffer_head_io.h"
 
-static s16 __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
-				    s16 global);
-static void __ocfs2_fill_slot(struct ocfs2_slot_info *si,
-			      s16 slot_num,
-			      s16 node_num);
 
-/* post the slot information on disk into our slot_info struct. */
-void ocfs2_update_slot_info(struct ocfs2_slot_info *si)
+struct ocfs2_slot {
+	int sl_valid;
+	unsigned int sl_node_num;
+};
+
+struct ocfs2_slot_info {
+	int si_extended;
+	int si_slots_per_block;
+	struct inode *si_inode;
+	unsigned int si_blocks;
+	struct buffer_head **si_bh;
+	unsigned int si_num_slots;
+	struct ocfs2_slot *si_slots;
+};
+
+
+static int __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
+				    unsigned int node_num);
+
+static void ocfs2_invalidate_slot(struct ocfs2_slot_info *si,
+				  int slot_num)
+{
+	BUG_ON((slot_num < 0) || (slot_num >= si->si_num_slots));
+	si->si_slots[slot_num].sl_valid = 0;
+}
+
+static void ocfs2_set_slot(struct ocfs2_slot_info *si,
+			   int slot_num, unsigned int node_num)
+{
+	BUG_ON((slot_num < 0) || (slot_num >= si->si_num_slots));
+
+	si->si_slots[slot_num].sl_valid = 1;
+	si->si_slots[slot_num].sl_node_num = node_num;
+}
+
+/* This version is for the extended slot map */
+static void ocfs2_update_slot_info_extended(struct ocfs2_slot_info *si)
+{
+	int b, i, slotno;
+	struct ocfs2_slot_map_extended *se;
+
+	slotno = 0;
+	for (b = 0; b < si->si_blocks; b++) {
+		se = (struct ocfs2_slot_map_extended *)si->si_bh[b]->b_data;
+		for (i = 0;
+		     (i < si->si_slots_per_block) &&
+		     (slotno < si->si_num_slots);
+		     i++, slotno++) {
+			if (se->se_slots[i].es_valid)
+				ocfs2_set_slot(si, slotno,
+					       le32_to_cpu(se->se_slots[i].es_node_num));
+			else
+				ocfs2_invalidate_slot(si, slotno);
+		}
+	}
+}
+
+/*
+ * Post the slot information on disk into our slot_info struct.
+ * Must be protected by osb_lock.
+ */
+static void ocfs2_update_slot_info_old(struct ocfs2_slot_info *si)
 {
 	int i;
-	__le16 *disk_info;
+	struct ocfs2_slot_map *sm;
 
-	/* we don't read the slot block here as ocfs2_super_lock
-	 * should've made sure we have the most recent copy. */
-	spin_lock(&si->si_lock);
-	disk_info = (__le16 *) si->si_bh->b_data;
+	sm = (struct ocfs2_slot_map *)si->si_bh[0]->b_data;
 
-	for (i = 0; i < si->si_size; i++)
-		si->si_global_node_nums[i] = le16_to_cpu(disk_info[i]);
+	for (i = 0; i < si->si_num_slots; i++) {
+		if (le16_to_cpu(sm->sm_slots[i]) == (u16)OCFS2_INVALID_SLOT)
+			ocfs2_invalidate_slot(si, i);
+		else
+			ocfs2_set_slot(si, i, le16_to_cpu(sm->sm_slots[i]));
+	}
+}
 
-	spin_unlock(&si->si_lock);
+static void ocfs2_update_slot_info(struct ocfs2_slot_info *si)
+{
+	/*
+	 * The slot data will have been refreshed when ocfs2_super_lock
+	 * was taken.
+	 */
+	if (si->si_extended)
+		ocfs2_update_slot_info_extended(si);
+	else
+		ocfs2_update_slot_info_old(si);
+}
+
+int ocfs2_refresh_slot_info(struct ocfs2_super *osb)
+{
+	int ret;
+	struct ocfs2_slot_info *si = osb->slot_info;
+
+	if (si == NULL)
+		return 0;
+
+	BUG_ON(si->si_blocks == 0);
+	BUG_ON(si->si_bh == NULL);
+
+	mlog(0, "Refreshing slot map, reading %u block(s)\n",
+	     si->si_blocks);
+
+	/*
+	 * We pass -1 as blocknr because we expect all of si->si_bh to
+	 * be !NULL.  Thus, ocfs2_read_blocks() will ignore blocknr.  If
+	 * this is not true, the read of -1 (UINT64_MAX) will fail.
+	 */
+	ret = ocfs2_read_blocks(osb, -1, si->si_blocks, si->si_bh, 0,
+				si->si_inode);
+	if (ret == 0) {
+		spin_lock(&osb->osb_lock);
+		ocfs2_update_slot_info(si);
+		spin_unlock(&osb->osb_lock);
+	}
+
+	return ret;
 }
 
 /* post the our slot info stuff into it's destination bh and write it
  * out. */
-int ocfs2_update_disk_slots(struct ocfs2_super *osb,
-			    struct ocfs2_slot_info *si)
+static void ocfs2_update_disk_slot_extended(struct ocfs2_slot_info *si,
+					    int slot_num,
+					    struct buffer_head **bh)
 {
-	int status, i;
-	__le16 *disk_info = (__le16 *) si->si_bh->b_data;
+	int blkind = slot_num / si->si_slots_per_block;
+	int slotno = slot_num % si->si_slots_per_block;
+	struct ocfs2_slot_map_extended *se;
 
-	spin_lock(&si->si_lock);
-	for (i = 0; i < si->si_size; i++)
-		disk_info[i] = cpu_to_le16(si->si_global_node_nums[i]);
-	spin_unlock(&si->si_lock);
+	BUG_ON(blkind >= si->si_blocks);
 
-	status = ocfs2_write_block(osb, si->si_bh, si->si_inode);
+	se = (struct ocfs2_slot_map_extended *)si->si_bh[blkind]->b_data;
+	se->se_slots[slotno].es_valid = si->si_slots[slot_num].sl_valid;
+	if (si->si_slots[slot_num].sl_valid)
+		se->se_slots[slotno].es_node_num =
+			cpu_to_le32(si->si_slots[slot_num].sl_node_num);
+	*bh = si->si_bh[blkind];
+}
+
+static void ocfs2_update_disk_slot_old(struct ocfs2_slot_info *si,
+				       int slot_num,
+				       struct buffer_head **bh)
+{
+	int i;
+	struct ocfs2_slot_map *sm;
+
+	sm = (struct ocfs2_slot_map *)si->si_bh[0]->b_data;
+	for (i = 0; i < si->si_num_slots; i++) {
+		if (si->si_slots[i].sl_valid)
+			sm->sm_slots[i] =
+				cpu_to_le16(si->si_slots[i].sl_node_num);
+		else
+			sm->sm_slots[i] = cpu_to_le16(OCFS2_INVALID_SLOT);
+	}
+	*bh = si->si_bh[0];
+}
+
+static int ocfs2_update_disk_slot(struct ocfs2_super *osb,
+				  struct ocfs2_slot_info *si,
+				  int slot_num)
+{
+	int status;
+	struct buffer_head *bh;
+
+	spin_lock(&osb->osb_lock);
+	if (si->si_extended)
+		ocfs2_update_disk_slot_extended(si, slot_num, &bh);
+	else
+		ocfs2_update_disk_slot_old(si, slot_num, &bh);
+	spin_unlock(&osb->osb_lock);
+
+	status = ocfs2_write_block(osb, bh, si->si_inode);
 	if (status < 0)
 		mlog_errno(status);
 
 	return status;
 }
 
-/* try to find global node in the slot info. Returns
- * OCFS2_INVALID_SLOT if nothing is found. */
-static s16 __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
-				    s16 global)
+/*
+ * Calculate how many bytes are needed by the slot map.  Returns
+ * an error if the slot map file is too small.
+ */
+static int ocfs2_slot_map_physical_size(struct ocfs2_super *osb,
+					struct inode *inode,
+					unsigned long long *bytes)
 {
-	int i;
-	s16 ret = OCFS2_INVALID_SLOT;
+	unsigned long long bytes_needed;
+
+	if (ocfs2_uses_extended_slot_map(osb)) {
+		bytes_needed = osb->max_slots *
+			sizeof(struct ocfs2_extended_slot);
+	} else {
+		bytes_needed = osb->max_slots * sizeof(__le16);
+	}
+	if (bytes_needed > i_size_read(inode)) {
+		mlog(ML_ERROR,
+		     "Slot map file is too small!  (size %llu, needed %llu)\n",
+		     i_size_read(inode), bytes_needed);
+		return -ENOSPC;
+	}
+
+	*bytes = bytes_needed;
+	return 0;
+}
+
+/* try to find global node in the slot info. Returns -ENOENT
+ * if nothing is found. */
+static int __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
+				    unsigned int node_num)
+{
+	int i, ret = -ENOENT;
 
 	for(i = 0; i < si->si_num_slots; i++) {
-		if (global == si->si_global_node_nums[i]) {
-			ret = (s16) i;
+		if (si->si_slots[i].sl_valid &&
+		    (node_num == si->si_slots[i].sl_node_num)) {
+			ret = i;
 			break;
 		}
 	}
+
 	return ret;
 }
 
-static s16 __ocfs2_find_empty_slot(struct ocfs2_slot_info *si, s16 preferred)
+static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si,
+				   int preferred)
 {
-	int i;
-	s16 ret = OCFS2_INVALID_SLOT;
+	int i, ret = -ENOSPC;
 
-	if (preferred >= 0 && preferred < si->si_num_slots) {
-		if (OCFS2_INVALID_SLOT == si->si_global_node_nums[preferred]) {
+	if ((preferred >= 0) && (preferred < si->si_num_slots)) {
+		if (!si->si_slots[preferred].sl_valid) {
 			ret = preferred;
 			goto out;
 		}
 	}
 
 	for(i = 0; i < si->si_num_slots; i++) {
-		if (OCFS2_INVALID_SLOT == si->si_global_node_nums[i]) {
-			ret = (s16) i;
+		if (!si->si_slots[i].sl_valid) {
+			ret = i;
 			break;
 		}
 	}
@@ -124,58 +287,155 @@
 	return ret;
 }
 
-s16 ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
-			   s16 global)
+int ocfs2_node_num_to_slot(struct ocfs2_super *osb, unsigned int node_num)
 {
-	s16 ret;
+	int slot;
+	struct ocfs2_slot_info *si = osb->slot_info;
 
-	spin_lock(&si->si_lock);
-	ret = __ocfs2_node_num_to_slot(si, global);
-	spin_unlock(&si->si_lock);
-	return ret;
+	spin_lock(&osb->osb_lock);
+	slot = __ocfs2_node_num_to_slot(si, node_num);
+	spin_unlock(&osb->osb_lock);
+
+	return slot;
 }
 
-static void __ocfs2_fill_slot(struct ocfs2_slot_info *si,
-			      s16 slot_num,
-			      s16 node_num)
+int ocfs2_slot_to_node_num_locked(struct ocfs2_super *osb, int slot_num,
+				  unsigned int *node_num)
 {
-	BUG_ON(slot_num == OCFS2_INVALID_SLOT);
-	BUG_ON(slot_num >= si->si_num_slots);
-	BUG_ON((node_num != O2NM_INVALID_NODE_NUM) &&
-	       (node_num >= O2NM_MAX_NODES));
+	struct ocfs2_slot_info *si = osb->slot_info;
 
-	si->si_global_node_nums[slot_num] = node_num;
+	assert_spin_locked(&osb->osb_lock);
+
+	BUG_ON(slot_num < 0);
+	BUG_ON(slot_num > osb->max_slots);
+
+	if (!si->si_slots[slot_num].sl_valid)
+		return -ENOENT;
+
+	*node_num = si->si_slots[slot_num].sl_node_num;
+	return 0;
 }
 
-void ocfs2_clear_slot(struct ocfs2_slot_info *si,
-		      s16 slot_num)
+static void __ocfs2_free_slot_info(struct ocfs2_slot_info *si)
 {
-	spin_lock(&si->si_lock);
-	__ocfs2_fill_slot(si, slot_num, OCFS2_INVALID_SLOT);
-	spin_unlock(&si->si_lock);
+	unsigned int i;
+
+	if (si == NULL)
+		return;
+
+	if (si->si_inode)
+		iput(si->si_inode);
+	if (si->si_bh) {
+		for (i = 0; i < si->si_blocks; i++) {
+			if (si->si_bh[i]) {
+				brelse(si->si_bh[i]);
+				si->si_bh[i] = NULL;
+			}
+		}
+		kfree(si->si_bh);
+	}
+
+	kfree(si);
+}
+
+int ocfs2_clear_slot(struct ocfs2_super *osb, int slot_num)
+{
+	struct ocfs2_slot_info *si = osb->slot_info;
+
+	if (si == NULL)
+		return 0;
+
+	spin_lock(&osb->osb_lock);
+	ocfs2_invalidate_slot(si, slot_num);
+	spin_unlock(&osb->osb_lock);
+
+	return ocfs2_update_disk_slot(osb, osb->slot_info, slot_num);
+}
+
+static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
+				  struct ocfs2_slot_info *si)
+{
+	int status = 0;
+	u64 blkno;
+	unsigned long long blocks, bytes;
+	unsigned int i;
+	struct buffer_head *bh;
+
+	status = ocfs2_slot_map_physical_size(osb, si->si_inode, &bytes);
+	if (status)
+		goto bail;
+
+	blocks = ocfs2_blocks_for_bytes(si->si_inode->i_sb, bytes);
+	BUG_ON(blocks > UINT_MAX);
+	si->si_blocks = blocks;
+	if (!si->si_blocks)
+		goto bail;
+
+	if (si->si_extended)
+		si->si_slots_per_block =
+			(osb->sb->s_blocksize /
+			 sizeof(struct ocfs2_extended_slot));
+	else
+		si->si_slots_per_block = osb->sb->s_blocksize / sizeof(__le16);
+
+	/* The size checks above should ensure this */
+	BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks);
+
+	mlog(0, "Slot map needs %u buffers for %llu bytes\n",
+	     si->si_blocks, bytes);
+
+	si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks,
+			    GFP_KERNEL);
+	if (!si->si_bh) {
+		status = -ENOMEM;
+		mlog_errno(status);
+		goto bail;
+	}
+
+	for (i = 0; i < si->si_blocks; i++) {
+		status = ocfs2_extent_map_get_blocks(si->si_inode, i,
+						     &blkno, NULL, NULL);
+		if (status < 0) {
+			mlog_errno(status);
+			goto bail;
+		}
+
+		mlog(0, "Reading slot map block %u at %llu\n", i,
+		     (unsigned long long)blkno);
+
+		bh = NULL;  /* Acquire a fresh bh */
+		status = ocfs2_read_block(osb, blkno, &bh, 0, si->si_inode);
+		if (status < 0) {
+			mlog_errno(status);
+			goto bail;
+		}
+
+		si->si_bh[i] = bh;
+	}
+
+bail:
+	return status;
 }
 
 int ocfs2_init_slot_info(struct ocfs2_super *osb)
 {
-	int status, i;
-	u64 blkno;
+	int status;
 	struct inode *inode = NULL;
-	struct buffer_head *bh = NULL;
 	struct ocfs2_slot_info *si;
 
-	si = kzalloc(sizeof(struct ocfs2_slot_info), GFP_KERNEL);
+	si = kzalloc(sizeof(struct ocfs2_slot_info) +
+		     (sizeof(struct ocfs2_slot) * osb->max_slots),
+		     GFP_KERNEL);
 	if (!si) {
 		status = -ENOMEM;
 		mlog_errno(status);
 		goto bail;
 	}
 
-	spin_lock_init(&si->si_lock);
+	si->si_extended = ocfs2_uses_extended_slot_map(osb);
 	si->si_num_slots = osb->max_slots;
-	si->si_size = OCFS2_MAX_SLOTS;
-
-	for(i = 0; i < si->si_num_slots; i++)
-		si->si_global_node_nums[i] = OCFS2_INVALID_SLOT;
+	si->si_slots = (struct ocfs2_slot *)((char *)si +
+					     sizeof(struct ocfs2_slot_info));
 
 	inode = ocfs2_get_system_file_inode(osb, SLOT_MAP_SYSTEM_INODE,
 					    OCFS2_INVALID_SLOT);
@@ -185,61 +445,53 @@
 		goto bail;
 	}
 
-	status = ocfs2_extent_map_get_blocks(inode, 0ULL, &blkno, NULL, NULL);
-	if (status < 0) {
-		mlog_errno(status);
-		goto bail;
-	}
-
-	status = ocfs2_read_block(osb, blkno, &bh, 0, inode);
-	if (status < 0) {
-		mlog_errno(status);
-		goto bail;
-	}
-
 	si->si_inode = inode;
-	si->si_bh = bh;
-	osb->slot_info = si;
+	status = ocfs2_map_slot_buffers(osb, si);
+	if (status < 0) {
+		mlog_errno(status);
+		goto bail;
+	}
+
+	osb->slot_info = (struct ocfs2_slot_info *)si;
 bail:
 	if (status < 0 && si)
-		ocfs2_free_slot_info(si);
+		__ocfs2_free_slot_info(si);
 
 	return status;
 }
 
-void ocfs2_free_slot_info(struct ocfs2_slot_info *si)
+void ocfs2_free_slot_info(struct ocfs2_super *osb)
 {
-	if (si->si_inode)
-		iput(si->si_inode);
-	if (si->si_bh)
-		brelse(si->si_bh);
-	kfree(si);
+	struct ocfs2_slot_info *si = osb->slot_info;
+
+	osb->slot_info = NULL;
+	__ocfs2_free_slot_info(si);
 }
 
 int ocfs2_find_slot(struct ocfs2_super *osb)
 {
 	int status;
-	s16 slot;
+	int slot;
 	struct ocfs2_slot_info *si;
 
 	mlog_entry_void();
 
 	si = osb->slot_info;
 
+	spin_lock(&osb->osb_lock);
 	ocfs2_update_slot_info(si);
 
-	spin_lock(&si->si_lock);
 	/* search for ourselves first and take the slot if it already
 	 * exists. Perhaps we need to mark this in a variable for our
 	 * own journal recovery? Possibly not, though we certainly
 	 * need to warn to the user */
 	slot = __ocfs2_node_num_to_slot(si, osb->node_num);
-	if (slot == OCFS2_INVALID_SLOT) {
+	if (slot < 0) {
 		/* if no slot yet, then just take 1st available
 		 * one. */
 		slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
-		if (slot == OCFS2_INVALID_SLOT) {
-			spin_unlock(&si->si_lock);
+		if (slot < 0) {
+			spin_unlock(&osb->osb_lock);
 			mlog(ML_ERROR, "no free slots available!\n");
 			status = -EINVAL;
 			goto bail;
@@ -248,13 +500,13 @@
 		mlog(ML_NOTICE, "slot %d is already allocated to this node!\n",
 		     slot);
 
-	__ocfs2_fill_slot(si, slot, osb->node_num);
+	ocfs2_set_slot(si, slot, osb->node_num);
 	osb->slot_num = slot;
-	spin_unlock(&si->si_lock);
+	spin_unlock(&osb->osb_lock);
 
 	mlog(0, "taking node slot %d\n", osb->slot_num);
 
-	status = ocfs2_update_disk_slots(osb, si);
+	status = ocfs2_update_disk_slot(osb, si, osb->slot_num);
 	if (status < 0)
 		mlog_errno(status);
 
@@ -265,27 +517,27 @@
 
 void ocfs2_put_slot(struct ocfs2_super *osb)
 {
-	int status;
+	int status, slot_num;
 	struct ocfs2_slot_info *si = osb->slot_info;
 
 	if (!si)
 		return;
 
+	spin_lock(&osb->osb_lock);
 	ocfs2_update_slot_info(si);
 
-	spin_lock(&si->si_lock);
-	__ocfs2_fill_slot(si, osb->slot_num, OCFS2_INVALID_SLOT);
+	slot_num = osb->slot_num;
+	ocfs2_invalidate_slot(si, osb->slot_num);
 	osb->slot_num = OCFS2_INVALID_SLOT;
-	spin_unlock(&si->si_lock);
+	spin_unlock(&osb->osb_lock);
 
-	status = ocfs2_update_disk_slots(osb, si);
+	status = ocfs2_update_disk_slot(osb, si, slot_num);
 	if (status < 0) {
 		mlog_errno(status);
 		goto bail;
 	}
 
 bail:
-	osb->slot_info = NULL;
-	ocfs2_free_slot_info(si);
+	ocfs2_free_slot_info(osb);
 }
 
diff --git a/fs/ocfs2/slot_map.h b/fs/ocfs2/slot_map.h
index 1025872..601c95f 100644
--- a/fs/ocfs2/slot_map.h
+++ b/fs/ocfs2/slot_map.h
@@ -27,38 +27,18 @@
 #ifndef SLOTMAP_H
 #define SLOTMAP_H
 
-struct ocfs2_slot_info {
-	spinlock_t si_lock;
-
-       	struct inode *si_inode;
-	struct buffer_head *si_bh;
-	unsigned int si_num_slots;
-	unsigned int si_size;
-	s16 si_global_node_nums[OCFS2_MAX_SLOTS];
-};
-
 int ocfs2_init_slot_info(struct ocfs2_super *osb);
-void ocfs2_free_slot_info(struct ocfs2_slot_info *si);
+void ocfs2_free_slot_info(struct ocfs2_super *osb);
 
 int ocfs2_find_slot(struct ocfs2_super *osb);
 void ocfs2_put_slot(struct ocfs2_super *osb);
 
-void ocfs2_update_slot_info(struct ocfs2_slot_info *si);
-int ocfs2_update_disk_slots(struct ocfs2_super *osb,
-			    struct ocfs2_slot_info *si);
+int ocfs2_refresh_slot_info(struct ocfs2_super *osb);
 
-s16 ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
-			   s16 global);
-void ocfs2_clear_slot(struct ocfs2_slot_info *si,
-		      s16 slot_num);
+int ocfs2_node_num_to_slot(struct ocfs2_super *osb, unsigned int node_num);
+int ocfs2_slot_to_node_num_locked(struct ocfs2_super *osb, int slot_num,
+				  unsigned int *node_num);
 
-static inline int ocfs2_is_empty_slot(struct ocfs2_slot_info *si,
-				      int slot_num)
-{
-	BUG_ON(slot_num == OCFS2_INVALID_SLOT);
-	assert_spin_locked(&si->si_lock);
-
-	return si->si_global_node_nums[slot_num] == OCFS2_INVALID_SLOT;
-}
+int ocfs2_clear_slot(struct ocfs2_super *osb, int slot_num);
 
 #endif
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
new file mode 100644
index 0000000..ac1d74c
--- /dev/null
+++ b/fs/ocfs2/stack_o2cb.c
@@ -0,0 +1,420 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * stack_o2cb.c
+ *
+ * Code which interfaces ocfs2 with the o2cb stack.
+ *
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+
+/* Needed for AOP_TRUNCATED_PAGE in mlog_errno() */
+#include <linux/fs.h>
+
+#include "cluster/masklog.h"
+#include "cluster/nodemanager.h"
+#include "cluster/heartbeat.h"
+
+#include "stackglue.h"
+
+struct o2dlm_private {
+	struct dlm_eviction_cb op_eviction_cb;
+};
+
+static struct ocfs2_stack_plugin o2cb_stack;
+
+/* These should be identical */
+#if (DLM_LOCK_IV != LKM_IVMODE)
+# error Lock modes do not match
+#endif
+#if (DLM_LOCK_NL != LKM_NLMODE)
+# error Lock modes do not match
+#endif
+#if (DLM_LOCK_CR != LKM_CRMODE)
+# error Lock modes do not match
+#endif
+#if (DLM_LOCK_CW != LKM_CWMODE)
+# error Lock modes do not match
+#endif
+#if (DLM_LOCK_PR != LKM_PRMODE)
+# error Lock modes do not match
+#endif
+#if (DLM_LOCK_PW != LKM_PWMODE)
+# error Lock modes do not match
+#endif
+#if (DLM_LOCK_EX != LKM_EXMODE)
+# error Lock modes do not match
+#endif
+static inline int mode_to_o2dlm(int mode)
+{
+	BUG_ON(mode > LKM_MAXMODE);
+
+	return mode;
+}
+
+#define map_flag(_generic, _o2dlm)		\
+	if (flags & (_generic)) {		\
+		flags &= ~(_generic);		\
+		o2dlm_flags |= (_o2dlm);	\
+	}
+static int flags_to_o2dlm(u32 flags)
+{
+	int o2dlm_flags = 0;
+
+	map_flag(DLM_LKF_NOQUEUE, LKM_NOQUEUE);
+	map_flag(DLM_LKF_CANCEL, LKM_CANCEL);
+	map_flag(DLM_LKF_CONVERT, LKM_CONVERT);
+	map_flag(DLM_LKF_VALBLK, LKM_VALBLK);
+	map_flag(DLM_LKF_IVVALBLK, LKM_INVVALBLK);
+	map_flag(DLM_LKF_ORPHAN, LKM_ORPHAN);
+	map_flag(DLM_LKF_FORCEUNLOCK, LKM_FORCE);
+	map_flag(DLM_LKF_TIMEOUT, LKM_TIMEOUT);
+	map_flag(DLM_LKF_LOCAL, LKM_LOCAL);
+
+	/* map_flag() should have cleared every flag passed in */
+	BUG_ON(flags != 0);
+
+	return o2dlm_flags;
+}
+#undef map_flag
+
+/*
+ * Map an o2dlm status to standard errno values.
+ *
+ * o2dlm only uses a handful of these, and returns even fewer to the
+ * caller. Still, we try to assign sane values to each error.
+ *
+ * The following value pairs have special meanings to dlmglue, thus
+ * the right hand side needs to stay unique - never duplicate the
+ * mapping elsewhere in the table!
+ *
+ * DLM_NORMAL:		0
+ * DLM_NOTQUEUED:	-EAGAIN
+ * DLM_CANCELGRANT:	-EBUSY
+ * DLM_CANCEL:		-DLM_ECANCEL
+ */
+/* Keep in sync with dlmapi.h */
+static int status_map[] = {
+	[DLM_NORMAL]			= 0,		/* Success */
+	[DLM_GRANTED]			= -EINVAL,
+	[DLM_DENIED]			= -EACCES,
+	[DLM_DENIED_NOLOCKS]		= -EACCES,
+	[DLM_WORKING]			= -EACCES,
+	[DLM_BLOCKED]			= -EINVAL,
+	[DLM_BLOCKED_ORPHAN]		= -EINVAL,
+	[DLM_DENIED_GRACE_PERIOD]	= -EACCES,
+	[DLM_SYSERR]			= -ENOMEM,	/* It is what it is */
+	[DLM_NOSUPPORT]			= -EPROTO,
+	[DLM_CANCELGRANT]		= -EBUSY,	/* Cancel after grant */
+	[DLM_IVLOCKID]			= -EINVAL,
+	[DLM_SYNC]			= -EINVAL,
+	[DLM_BADTYPE]			= -EINVAL,
+	[DLM_BADRESOURCE]		= -EINVAL,
+	[DLM_MAXHANDLES]		= -ENOMEM,
+	[DLM_NOCLINFO]			= -EINVAL,
+	[DLM_NOLOCKMGR]			= -EINVAL,
+	[DLM_NOPURGED]			= -EINVAL,
+	[DLM_BADARGS]			= -EINVAL,
+	[DLM_VOID]			= -EINVAL,
+	[DLM_NOTQUEUED]			= -EAGAIN,	/* Trylock failed */
+	[DLM_IVBUFLEN]			= -EINVAL,
+	[DLM_CVTUNGRANT]		= -EPERM,
+	[DLM_BADPARAM]			= -EINVAL,
+	[DLM_VALNOTVALID]		= -EINVAL,
+	[DLM_REJECTED]			= -EPERM,
+	[DLM_ABORT]			= -EINVAL,
+	[DLM_CANCEL]			= -DLM_ECANCEL,	/* Successful cancel */
+	[DLM_IVRESHANDLE]		= -EINVAL,
+	[DLM_DEADLOCK]			= -EDEADLK,
+	[DLM_DENIED_NOASTS]		= -EINVAL,
+	[DLM_FORWARD]			= -EINVAL,
+	[DLM_TIMEOUT]			= -ETIMEDOUT,
+	[DLM_IVGROUPID]			= -EINVAL,
+	[DLM_VERS_CONFLICT]		= -EOPNOTSUPP,
+	[DLM_BAD_DEVICE_PATH]		= -ENOENT,
+	[DLM_NO_DEVICE_PERMISSION]	= -EPERM,
+	[DLM_NO_CONTROL_DEVICE]		= -ENOENT,
+	[DLM_RECOVERING]		= -ENOTCONN,
+	[DLM_MIGRATING]			= -ERESTART,
+	[DLM_MAXSTATS]			= -EINVAL,
+};
+
+static int dlm_status_to_errno(enum dlm_status status)
+{
+	BUG_ON(status > (sizeof(status_map) / sizeof(status_map[0])));
+
+	return status_map[status];
+}
+
+static void o2dlm_lock_ast_wrapper(void *astarg)
+{
+	BUG_ON(o2cb_stack.sp_proto == NULL);
+
+	o2cb_stack.sp_proto->lp_lock_ast(astarg);
+}
+
+static void o2dlm_blocking_ast_wrapper(void *astarg, int level)
+{
+	BUG_ON(o2cb_stack.sp_proto == NULL);
+
+	o2cb_stack.sp_proto->lp_blocking_ast(astarg, level);
+}
+
+static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status)
+{
+	int error = dlm_status_to_errno(status);
+
+	BUG_ON(o2cb_stack.sp_proto == NULL);
+
+	/*
+	 * In o2dlm, you can get both the lock_ast() for the lock being
+	 * granted and the unlock_ast() for the CANCEL failing.  A
+	 * successful cancel sends DLM_NORMAL here.  If the
+	 * lock grant happened before the cancel arrived, you get
+	 * DLM_CANCELGRANT.
+	 *
+	 * There's no need for the double-ast.  If we see DLM_CANCELGRANT,
+	 * we just ignore it.  We expect the lock_ast() to handle the
+	 * granted lock.
+	 */
+	if (status == DLM_CANCELGRANT)
+		return;
+
+	o2cb_stack.sp_proto->lp_unlock_ast(astarg, error);
+}
+
+static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn,
+			 int mode,
+			 union ocfs2_dlm_lksb *lksb,
+			 u32 flags,
+			 void *name,
+			 unsigned int namelen,
+			 void *astarg)
+{
+	enum dlm_status status;
+	int o2dlm_mode = mode_to_o2dlm(mode);
+	int o2dlm_flags = flags_to_o2dlm(flags);
+	int ret;
+
+	status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm,
+			 o2dlm_flags, name, namelen,
+			 o2dlm_lock_ast_wrapper, astarg,
+			 o2dlm_blocking_ast_wrapper);
+	ret = dlm_status_to_errno(status);
+	return ret;
+}
+
+static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn,
+			   union ocfs2_dlm_lksb *lksb,
+			   u32 flags,
+			   void *astarg)
+{
+	enum dlm_status status;
+	int o2dlm_flags = flags_to_o2dlm(flags);
+	int ret;
+
+	status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm,
+			   o2dlm_flags, o2dlm_unlock_ast_wrapper, astarg);
+	ret = dlm_status_to_errno(status);
+	return ret;
+}
+
+static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+{
+	return dlm_status_to_errno(lksb->lksb_o2dlm.status);
+}
+
+static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+{
+	return (void *)(lksb->lksb_o2dlm.lvb);
+}
+
+static void o2cb_dump_lksb(union ocfs2_dlm_lksb *lksb)
+{
+	dlm_print_one_lock(lksb->lksb_o2dlm.lockid);
+}
+
+/*
+ * Called from the dlm when it's about to evict a node. This is how the
+ * classic stack signals node death.
+ */
+static void o2dlm_eviction_cb(int node_num, void *data)
+{
+	struct ocfs2_cluster_connection *conn = data;
+
+	mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n",
+	     node_num, conn->cc_namelen, conn->cc_name);
+
+	conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
+}
+
+static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
+{
+	int rc = 0;
+	u32 dlm_key;
+	struct dlm_ctxt *dlm;
+	struct o2dlm_private *priv;
+	struct dlm_protocol_version dlm_version;
+
+	BUG_ON(conn == NULL);
+	BUG_ON(o2cb_stack.sp_proto == NULL);
+
+	/* for now we only have one cluster/node, make sure we see it
+	 * in the heartbeat universe */
+	if (!o2hb_check_local_node_heartbeating()) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	priv = kzalloc(sizeof(struct o2dlm_private), GFP_KERNEL);
+	if (!priv) {
+		rc = -ENOMEM;
+		goto out_free;
+	}
+
+	/* This just fills the structure in.  It is safe to pass conn. */
+	dlm_setup_eviction_cb(&priv->op_eviction_cb, o2dlm_eviction_cb,
+			      conn);
+
+	conn->cc_private = priv;
+
+	/* used by the dlm code to make message headers unique, each
+	 * node in this domain must agree on this. */
+	dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
+	dlm_version.pv_major = conn->cc_version.pv_major;
+	dlm_version.pv_minor = conn->cc_version.pv_minor;
+
+	dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version);
+	if (IS_ERR(dlm)) {
+		rc = PTR_ERR(dlm);
+		mlog_errno(rc);
+		goto out_free;
+	}
+
+	conn->cc_version.pv_major = dlm_version.pv_major;
+	conn->cc_version.pv_minor = dlm_version.pv_minor;
+	conn->cc_lockspace = dlm;
+
+	dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
+
+out_free:
+	if (rc && conn->cc_private)
+		kfree(conn->cc_private);
+
+out:
+	return rc;
+}
+
+static int o2cb_cluster_disconnect(struct ocfs2_cluster_connection *conn,
+				   int hangup_pending)
+{
+	struct dlm_ctxt *dlm = conn->cc_lockspace;
+	struct o2dlm_private *priv = conn->cc_private;
+
+	dlm_unregister_eviction_cb(&priv->op_eviction_cb);
+	conn->cc_private = NULL;
+	kfree(priv);
+
+	dlm_unregister_domain(dlm);
+	conn->cc_lockspace = NULL;
+
+	return 0;
+}
+
+static void o2hb_stop(const char *group)
+{
+	int ret;
+	char *argv[5], *envp[3];
+
+	argv[0] = (char *)o2nm_get_hb_ctl_path();
+	argv[1] = "-K";
+	argv[2] = "-u";
+	argv[3] = (char *)group;
+	argv[4] = NULL;
+
+	mlog(0, "Run: %s %s %s %s\n", argv[0], argv[1], argv[2], argv[3]);
+
+	/* minimal command environment taken from cpu_run_sbin_hotplug */
+	envp[0] = "HOME=/";
+	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+	envp[2] = NULL;
+
+	ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+	if (ret < 0)
+		mlog_errno(ret);
+}
+
+/*
+ * Hangup is a hack for tools compatibility.  Older ocfs2-tools software
+ * expects the filesystem to call "ocfs2_hb_ctl" during unmount.  This
+ * happens regardless of whether the DLM got started, so we can't do it
+ * in ocfs2_cluster_disconnect().  We bring the o2hb_stop() function into
+ * the glue and provide a "hangup" API for super.c to call.
+ *
+ * Other stacks will eventually provide a NULL ->hangup() pointer.
+ */
+static void o2cb_cluster_hangup(const char *group, int grouplen)
+{
+	o2hb_stop(group);
+}
+
+static int o2cb_cluster_this_node(unsigned int *node)
+{
+	int node_num;
+
+	node_num = o2nm_this_node();
+	if (node_num == O2NM_INVALID_NODE_NUM)
+		return -ENOENT;
+
+	if (node_num >= O2NM_MAX_NODES)
+		return -EOVERFLOW;
+
+	*node = node_num;
+	return 0;
+}
+
+struct ocfs2_stack_operations o2cb_stack_ops = {
+	.connect	= o2cb_cluster_connect,
+	.disconnect	= o2cb_cluster_disconnect,
+	.hangup		= o2cb_cluster_hangup,
+	.this_node	= o2cb_cluster_this_node,
+	.dlm_lock	= o2cb_dlm_lock,
+	.dlm_unlock	= o2cb_dlm_unlock,
+	.lock_status	= o2cb_dlm_lock_status,
+	.lock_lvb	= o2cb_dlm_lvb,
+	.dump_lksb	= o2cb_dump_lksb,
+};
+
+static struct ocfs2_stack_plugin o2cb_stack = {
+	.sp_name	= "o2cb",
+	.sp_ops		= &o2cb_stack_ops,
+	.sp_owner	= THIS_MODULE,
+};
+
+static int __init o2cb_stack_init(void)
+{
+	return ocfs2_stack_glue_register(&o2cb_stack);
+}
+
+static void __exit o2cb_stack_exit(void)
+{
+	ocfs2_stack_glue_unregister(&o2cb_stack);
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_DESCRIPTION("ocfs2 driver for the classic o2cb stack");
+MODULE_LICENSE("GPL");
+module_init(o2cb_stack_init);
+module_exit(o2cb_stack_exit);
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
new file mode 100644
index 0000000..7428663
--- /dev/null
+++ b/fs/ocfs2/stack_user.c
@@ -0,0 +1,883 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * stack_user.c
+ *
+ * Code which interfaces ocfs2 with fs/dlm and a userspace stack.
+ *
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/reboot.h>
+#include <asm/uaccess.h>
+
+#include "ocfs2.h"  /* For struct ocfs2_lock_res */
+#include "stackglue.h"
+
+
+/*
+ * The control protocol starts with a handshake.  Until the handshake
+ * is complete, the control device will fail all write(2)s.
+ *
+ * The handshake is simple.  First, the client reads until EOF.  Each line
+ * of output is a supported protocol tag.  All protocol tags are a single
+ * character followed by a two hex digit version number.  Currently the
+ * only things supported is T01, for "Text-base version 0x01".  Next, the
+ * client writes the version they would like to use, including the newline.
+ * Thus, the protocol tag is 'T01\n'.  If the version tag written is
+ * unknown, -EINVAL is returned.  Once the negotiation is complete, the
+ * client can start sending messages.
+ *
+ * The T01 protocol has three messages.  First is the "SETN" message.
+ * It has the following syntax:
+ *
+ *  SETN<space><8-char-hex-nodenum><newline>
+ *
+ * This is 14 characters.
+ *
+ * The "SETN" message must be the first message following the protocol.
+ * It tells ocfs2_control the local node number.
+ *
+ * Next comes the "SETV" message.  It has the following syntax:
+ *
+ *  SETV<space><2-char-hex-major><space><2-char-hex-minor><newline>
+ *
+ * This is 11 characters.
+ *
+ * The "SETV" message sets the filesystem locking protocol version as
+ * negotiated by the client.  The client negotiates based on the maximum
+ * version advertised in /sys/fs/ocfs2/max_locking_protocol.  The major
+ * number from the "SETV" message must match
+ * user_stack.sp_proto->lp_max_version.pv_major, and the minor number
+ * must be less than or equal to ...->lp_max_version.pv_minor.
+ *
+ * Once this information has been set, mounts will be allowed.  From this
+ * point on, the "DOWN" message can be sent for node down notification.
+ * It has the following syntax:
+ *
+ *  DOWN<space><32-char-cap-hex-uuid><space><8-char-hex-nodenum><newline>
+ *
+ * eg:
+ *
+ *  DOWN 632A924FDD844190BDA93C0DF6B94899 00000001\n
+ *
+ * This is 47 characters.
+ */
+
+/*
+ * Whether or not the client has done the handshake.
+ * For now, we have just one protocol version.
+ */
+#define OCFS2_CONTROL_PROTO			"T01\n"
+#define OCFS2_CONTROL_PROTO_LEN			4
+
+/* Handshake states */
+#define OCFS2_CONTROL_HANDSHAKE_INVALID		(0)
+#define OCFS2_CONTROL_HANDSHAKE_READ		(1)
+#define OCFS2_CONTROL_HANDSHAKE_PROTOCOL	(2)
+#define OCFS2_CONTROL_HANDSHAKE_VALID		(3)
+
+/* Messages */
+#define OCFS2_CONTROL_MESSAGE_OP_LEN		4
+#define OCFS2_CONTROL_MESSAGE_SETNODE_OP	"SETN"
+#define OCFS2_CONTROL_MESSAGE_SETNODE_TOTAL_LEN	14
+#define OCFS2_CONTROL_MESSAGE_SETVERSION_OP	"SETV"
+#define OCFS2_CONTROL_MESSAGE_SETVERSION_TOTAL_LEN	11
+#define OCFS2_CONTROL_MESSAGE_DOWN_OP		"DOWN"
+#define OCFS2_CONTROL_MESSAGE_DOWN_TOTAL_LEN	47
+#define OCFS2_TEXT_UUID_LEN			32
+#define OCFS2_CONTROL_MESSAGE_VERNUM_LEN	2
+#define OCFS2_CONTROL_MESSAGE_NODENUM_LEN	8
+
+/*
+ * ocfs2_live_connection is refcounted because the filesystem and
+ * miscdevice sides can detach in different order.  Let's just be safe.
+ */
+struct ocfs2_live_connection {
+	struct list_head		oc_list;
+	struct ocfs2_cluster_connection	*oc_conn;
+};
+
+struct ocfs2_control_private {
+	struct list_head op_list;
+	int op_state;
+	int op_this_node;
+	struct ocfs2_protocol_version op_proto;
+};
+
+/* SETN<space><8-char-hex-nodenum><newline> */
+struct ocfs2_control_message_setn {
+	char	tag[OCFS2_CONTROL_MESSAGE_OP_LEN];
+	char	space;
+	char	nodestr[OCFS2_CONTROL_MESSAGE_NODENUM_LEN];
+	char	newline;
+};
+
+/* SETV<space><2-char-hex-major><space><2-char-hex-minor><newline> */
+struct ocfs2_control_message_setv {
+	char	tag[OCFS2_CONTROL_MESSAGE_OP_LEN];
+	char	space1;
+	char	major[OCFS2_CONTROL_MESSAGE_VERNUM_LEN];
+	char	space2;
+	char	minor[OCFS2_CONTROL_MESSAGE_VERNUM_LEN];
+	char	newline;
+};
+
+/* DOWN<space><32-char-cap-hex-uuid><space><8-char-hex-nodenum><newline> */
+struct ocfs2_control_message_down {
+	char	tag[OCFS2_CONTROL_MESSAGE_OP_LEN];
+	char	space1;
+	char	uuid[OCFS2_TEXT_UUID_LEN];
+	char	space2;
+	char	nodestr[OCFS2_CONTROL_MESSAGE_NODENUM_LEN];
+	char	newline;
+};
+
+union ocfs2_control_message {
+	char					tag[OCFS2_CONTROL_MESSAGE_OP_LEN];
+	struct ocfs2_control_message_setn	u_setn;
+	struct ocfs2_control_message_setv	u_setv;
+	struct ocfs2_control_message_down	u_down;
+};
+
+static struct ocfs2_stack_plugin user_stack;
+
+static atomic_t ocfs2_control_opened;
+static int ocfs2_control_this_node = -1;
+static struct ocfs2_protocol_version running_proto;
+
+static LIST_HEAD(ocfs2_live_connection_list);
+static LIST_HEAD(ocfs2_control_private_list);
+static DEFINE_MUTEX(ocfs2_control_lock);
+
+static inline void ocfs2_control_set_handshake_state(struct file *file,
+						     int state)
+{
+	struct ocfs2_control_private *p = file->private_data;
+	p->op_state = state;
+}
+
+static inline int ocfs2_control_get_handshake_state(struct file *file)
+{
+	struct ocfs2_control_private *p = file->private_data;
+	return p->op_state;
+}
+
+static struct ocfs2_live_connection *ocfs2_connection_find(const char *name)
+{
+	size_t len = strlen(name);
+	struct ocfs2_live_connection *c;
+
+	BUG_ON(!mutex_is_locked(&ocfs2_control_lock));
+
+	list_for_each_entry(c, &ocfs2_live_connection_list, oc_list) {
+		if ((c->oc_conn->cc_namelen == len) &&
+		    !strncmp(c->oc_conn->cc_name, name, len))
+			return c;
+	}
+
+	return c;
+}
+
+/*
+ * ocfs2_live_connection structures are created underneath the ocfs2
+ * mount path.  Since the VFS prevents multiple calls to
+ * fill_super(), we can't get dupes here.
+ */
+static int ocfs2_live_connection_new(struct ocfs2_cluster_connection *conn,
+				     struct ocfs2_live_connection **c_ret)
+{
+	int rc = 0;
+	struct ocfs2_live_connection *c;
+
+	c = kzalloc(sizeof(struct ocfs2_live_connection), GFP_KERNEL);
+	if (!c)
+		return -ENOMEM;
+
+	mutex_lock(&ocfs2_control_lock);
+	c->oc_conn = conn;
+
+	if (atomic_read(&ocfs2_control_opened))
+		list_add(&c->oc_list, &ocfs2_live_connection_list);
+	else {
+		printk(KERN_ERR
+		       "ocfs2: Userspace control daemon is not present\n");
+		rc = -ESRCH;
+	}
+
+	mutex_unlock(&ocfs2_control_lock);
+
+	if (!rc)
+		*c_ret = c;
+	else
+		kfree(c);
+
+	return rc;
+}
+
+/*
+ * This function disconnects the cluster connection from ocfs2_control.
+ * Afterwards, userspace can't affect the cluster connection.
+ */
+static void ocfs2_live_connection_drop(struct ocfs2_live_connection *c)
+{
+	mutex_lock(&ocfs2_control_lock);
+	list_del_init(&c->oc_list);
+	c->oc_conn = NULL;
+	mutex_unlock(&ocfs2_control_lock);
+
+	kfree(c);
+}
+
+static int ocfs2_control_cfu(void *target, size_t target_len,
+			     const char __user *buf, size_t count)
+{
+	/* The T01 expects write(2) calls to have exactly one command */
+	if ((count != target_len) ||
+	    (count > sizeof(union ocfs2_control_message)))
+		return -EINVAL;
+
+	if (copy_from_user(target, buf, target_len))
+		return -EFAULT;
+
+	return 0;
+}
+
+static ssize_t ocfs2_control_validate_protocol(struct file *file,
+					       const char __user *buf,
+					       size_t count)
+{
+	ssize_t ret;
+	char kbuf[OCFS2_CONTROL_PROTO_LEN];
+
+	ret = ocfs2_control_cfu(kbuf, OCFS2_CONTROL_PROTO_LEN,
+				buf, count);
+	if (ret)
+		return ret;
+
+	if (strncmp(kbuf, OCFS2_CONTROL_PROTO, OCFS2_CONTROL_PROTO_LEN))
+		return -EINVAL;
+
+	ocfs2_control_set_handshake_state(file,
+					  OCFS2_CONTROL_HANDSHAKE_PROTOCOL);
+
+	return count;
+}
+
+static void ocfs2_control_send_down(const char *uuid,
+				    int nodenum)
+{
+	struct ocfs2_live_connection *c;
+
+	mutex_lock(&ocfs2_control_lock);
+
+	c = ocfs2_connection_find(uuid);
+	if (c) {
+		BUG_ON(c->oc_conn == NULL);
+		c->oc_conn->cc_recovery_handler(nodenum,
+						c->oc_conn->cc_recovery_data);
+	}
+
+	mutex_unlock(&ocfs2_control_lock);
+}
+
+/*
+ * Called whenever configuration elements are sent to /dev/ocfs2_control.
+ * If all configuration elements are present, try to set the global
+ * values.  If there is a problem, return an error.  Skip any missing
+ * elements, and only bump ocfs2_control_opened when we have all elements
+ * and are successful.
+ */
+static int ocfs2_control_install_private(struct file *file)
+{
+	int rc = 0;
+	int set_p = 1;
+	struct ocfs2_control_private *p = file->private_data;
+
+	BUG_ON(p->op_state != OCFS2_CONTROL_HANDSHAKE_PROTOCOL);
+
+	mutex_lock(&ocfs2_control_lock);
+
+	if (p->op_this_node < 0) {
+		set_p = 0;
+	} else if ((ocfs2_control_this_node >= 0) &&
+		   (ocfs2_control_this_node != p->op_this_node)) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (!p->op_proto.pv_major) {
+		set_p = 0;
+	} else if (!list_empty(&ocfs2_live_connection_list) &&
+		   ((running_proto.pv_major != p->op_proto.pv_major) ||
+		    (running_proto.pv_minor != p->op_proto.pv_minor))) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (set_p) {
+		ocfs2_control_this_node = p->op_this_node;
+		running_proto.pv_major = p->op_proto.pv_major;
+		running_proto.pv_minor = p->op_proto.pv_minor;
+	}
+
+out_unlock:
+	mutex_unlock(&ocfs2_control_lock);
+
+	if (!rc && set_p) {
+		/* We set the global values successfully */
+		atomic_inc(&ocfs2_control_opened);
+		ocfs2_control_set_handshake_state(file,
+					OCFS2_CONTROL_HANDSHAKE_VALID);
+	}
+
+	return rc;
+}
+
+static int ocfs2_control_get_this_node(void)
+{
+	int rc;
+
+	mutex_lock(&ocfs2_control_lock);
+	if (ocfs2_control_this_node < 0)
+		rc = -EINVAL;
+	else
+		rc = ocfs2_control_this_node;
+	mutex_unlock(&ocfs2_control_lock);
+
+	return rc;
+}
+
+static int ocfs2_control_do_setnode_msg(struct file *file,
+					struct ocfs2_control_message_setn *msg)
+{
+	long nodenum;
+	char *ptr = NULL;
+	struct ocfs2_control_private *p = file->private_data;
+
+	if (ocfs2_control_get_handshake_state(file) !=
+	    OCFS2_CONTROL_HANDSHAKE_PROTOCOL)
+		return -EINVAL;
+
+	if (strncmp(msg->tag, OCFS2_CONTROL_MESSAGE_SETNODE_OP,
+		    OCFS2_CONTROL_MESSAGE_OP_LEN))
+		return -EINVAL;
+
+	if ((msg->space != ' ') || (msg->newline != '\n'))
+		return -EINVAL;
+	msg->space = msg->newline = '\0';
+
+	nodenum = simple_strtol(msg->nodestr, &ptr, 16);
+	if (!ptr || *ptr)
+		return -EINVAL;
+
+	if ((nodenum == LONG_MIN) || (nodenum == LONG_MAX) ||
+	    (nodenum > INT_MAX) || (nodenum < 0))
+		return -ERANGE;
+	p->op_this_node = nodenum;
+
+	return ocfs2_control_install_private(file);
+}
+
+static int ocfs2_control_do_setversion_msg(struct file *file,
+					   struct ocfs2_control_message_setv *msg)
+ {
+	long major, minor;
+	char *ptr = NULL;
+	struct ocfs2_control_private *p = file->private_data;
+	struct ocfs2_protocol_version *max =
+		&user_stack.sp_proto->lp_max_version;
+
+	if (ocfs2_control_get_handshake_state(file) !=
+	    OCFS2_CONTROL_HANDSHAKE_PROTOCOL)
+		return -EINVAL;
+
+	if (strncmp(msg->tag, OCFS2_CONTROL_MESSAGE_SETVERSION_OP,
+		    OCFS2_CONTROL_MESSAGE_OP_LEN))
+		return -EINVAL;
+
+	if ((msg->space1 != ' ') || (msg->space2 != ' ') ||
+	    (msg->newline != '\n'))
+		return -EINVAL;
+	msg->space1 = msg->space2 = msg->newline = '\0';
+
+	major = simple_strtol(msg->major, &ptr, 16);
+	if (!ptr || *ptr)
+		return -EINVAL;
+	minor = simple_strtol(msg->minor, &ptr, 16);
+	if (!ptr || *ptr)
+		return -EINVAL;
+
+	/*
+	 * The major must be between 1 and 255, inclusive.  The minor
+	 * must be between 0 and 255, inclusive.  The version passed in
+	 * must be within the maximum version supported by the filesystem.
+	 */
+	if ((major == LONG_MIN) || (major == LONG_MAX) ||
+	    (major > (u8)-1) || (major < 1))
+		return -ERANGE;
+	if ((minor == LONG_MIN) || (minor == LONG_MAX) ||
+	    (minor > (u8)-1) || (minor < 0))
+		return -ERANGE;
+	if ((major != max->pv_major) ||
+	    (minor > max->pv_minor))
+		return -EINVAL;
+
+	p->op_proto.pv_major = major;
+	p->op_proto.pv_minor = minor;
+
+	return ocfs2_control_install_private(file);
+}
+
+static int ocfs2_control_do_down_msg(struct file *file,
+				     struct ocfs2_control_message_down *msg)
+{
+	long nodenum;
+	char *p = NULL;
+
+	if (ocfs2_control_get_handshake_state(file) !=
+	    OCFS2_CONTROL_HANDSHAKE_VALID)
+		return -EINVAL;
+
+	if (strncmp(msg->tag, OCFS2_CONTROL_MESSAGE_DOWN_OP,
+		    OCFS2_CONTROL_MESSAGE_OP_LEN))
+		return -EINVAL;
+
+	if ((msg->space1 != ' ') || (msg->space2 != ' ') ||
+	    (msg->newline != '\n'))
+		return -EINVAL;
+	msg->space1 = msg->space2 = msg->newline = '\0';
+
+	nodenum = simple_strtol(msg->nodestr, &p, 16);
+	if (!p || *p)
+		return -EINVAL;
+
+	if ((nodenum == LONG_MIN) || (nodenum == LONG_MAX) ||
+	    (nodenum > INT_MAX) || (nodenum < 0))
+		return -ERANGE;
+
+	ocfs2_control_send_down(msg->uuid, nodenum);
+
+	return 0;
+}
+
+static ssize_t ocfs2_control_message(struct file *file,
+				     const char __user *buf,
+				     size_t count)
+{
+	ssize_t ret;
+	union ocfs2_control_message msg;
+
+	/* Try to catch padding issues */
+	WARN_ON(offsetof(struct ocfs2_control_message_down, uuid) !=
+		(sizeof(msg.u_down.tag) + sizeof(msg.u_down.space1)));
+
+	memset(&msg, 0, sizeof(union ocfs2_control_message));
+	ret = ocfs2_control_cfu(&msg, count, buf, count);
+	if (ret)
+		goto out;
+
+	if ((count == OCFS2_CONTROL_MESSAGE_SETNODE_TOTAL_LEN) &&
+	    !strncmp(msg.tag, OCFS2_CONTROL_MESSAGE_SETNODE_OP,
+		     OCFS2_CONTROL_MESSAGE_OP_LEN))
+		ret = ocfs2_control_do_setnode_msg(file, &msg.u_setn);
+	else if ((count == OCFS2_CONTROL_MESSAGE_SETVERSION_TOTAL_LEN) &&
+		 !strncmp(msg.tag, OCFS2_CONTROL_MESSAGE_SETVERSION_OP,
+			  OCFS2_CONTROL_MESSAGE_OP_LEN))
+		ret = ocfs2_control_do_setversion_msg(file, &msg.u_setv);
+	else if ((count == OCFS2_CONTROL_MESSAGE_DOWN_TOTAL_LEN) &&
+		 !strncmp(msg.tag, OCFS2_CONTROL_MESSAGE_DOWN_OP,
+			  OCFS2_CONTROL_MESSAGE_OP_LEN))
+		ret = ocfs2_control_do_down_msg(file, &msg.u_down);
+	else
+		ret = -EINVAL;
+
+out:
+	return ret ? ret : count;
+}
+
+static ssize_t ocfs2_control_write(struct file *file,
+				   const char __user *buf,
+				   size_t count,
+				   loff_t *ppos)
+{
+	ssize_t ret;
+
+	switch (ocfs2_control_get_handshake_state(file)) {
+		case OCFS2_CONTROL_HANDSHAKE_INVALID:
+			ret = -EINVAL;
+			break;
+
+		case OCFS2_CONTROL_HANDSHAKE_READ:
+			ret = ocfs2_control_validate_protocol(file, buf,
+							      count);
+			break;
+
+		case OCFS2_CONTROL_HANDSHAKE_PROTOCOL:
+		case OCFS2_CONTROL_HANDSHAKE_VALID:
+			ret = ocfs2_control_message(file, buf, count);
+			break;
+
+		default:
+			BUG();
+			ret = -EIO;
+			break;
+	}
+
+	return ret;
+}
+
+/*
+ * This is a naive version.  If we ever have a new protocol, we'll expand
+ * it.  Probably using seq_file.
+ */
+static ssize_t ocfs2_control_read(struct file *file,
+				  char __user *buf,
+				  size_t count,
+				  loff_t *ppos)
+{
+	char *proto_string = OCFS2_CONTROL_PROTO;
+	size_t to_write = 0;
+
+	if (*ppos >= OCFS2_CONTROL_PROTO_LEN)
+		return 0;
+
+	to_write = OCFS2_CONTROL_PROTO_LEN - *ppos;
+	if (to_write > count)
+		to_write = count;
+	if (copy_to_user(buf, proto_string + *ppos, to_write))
+		return -EFAULT;
+
+	*ppos += to_write;
+
+	/* Have we read the whole protocol list? */
+	if (*ppos >= OCFS2_CONTROL_PROTO_LEN)
+		ocfs2_control_set_handshake_state(file,
+						  OCFS2_CONTROL_HANDSHAKE_READ);
+
+	return to_write;
+}
+
+static int ocfs2_control_release(struct inode *inode, struct file *file)
+{
+	struct ocfs2_control_private *p = file->private_data;
+
+	mutex_lock(&ocfs2_control_lock);
+
+	if (ocfs2_control_get_handshake_state(file) !=
+	    OCFS2_CONTROL_HANDSHAKE_VALID)
+		goto out;
+
+	if (atomic_dec_and_test(&ocfs2_control_opened)) {
+		if (!list_empty(&ocfs2_live_connection_list)) {
+			/* XXX: Do bad things! */
+			printk(KERN_ERR
+			       "ocfs2: Unexpected release of ocfs2_control!\n"
+			       "       Loss of cluster connection requires "
+			       "an emergency restart!\n");
+			emergency_restart();
+		}
+		/*
+		 * Last valid close clears the node number and resets
+		 * the locking protocol version
+		 */
+		ocfs2_control_this_node = -1;
+		running_proto.pv_major = 0;
+		running_proto.pv_major = 0;
+	}
+
+out:
+	list_del_init(&p->op_list);
+	file->private_data = NULL;
+
+	mutex_unlock(&ocfs2_control_lock);
+
+	kfree(p);
+
+	return 0;
+}
+
+static int ocfs2_control_open(struct inode *inode, struct file *file)
+{
+	struct ocfs2_control_private *p;
+
+	p = kzalloc(sizeof(struct ocfs2_control_private), GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+	p->op_this_node = -1;
+
+	mutex_lock(&ocfs2_control_lock);
+	file->private_data = p;
+	list_add(&p->op_list, &ocfs2_control_private_list);
+	mutex_unlock(&ocfs2_control_lock);
+
+	return 0;
+}
+
+static const struct file_operations ocfs2_control_fops = {
+	.open    = ocfs2_control_open,
+	.release = ocfs2_control_release,
+	.read    = ocfs2_control_read,
+	.write   = ocfs2_control_write,
+	.owner   = THIS_MODULE,
+};
+
+struct miscdevice ocfs2_control_device = {
+	.minor		= MISC_DYNAMIC_MINOR,
+	.name		= "ocfs2_control",
+	.fops		= &ocfs2_control_fops,
+};
+
+static int ocfs2_control_init(void)
+{
+	int rc;
+
+	atomic_set(&ocfs2_control_opened, 0);
+
+	rc = misc_register(&ocfs2_control_device);
+	if (rc)
+		printk(KERN_ERR
+		       "ocfs2: Unable to register ocfs2_control device "
+		       "(errno %d)\n",
+		       -rc);
+
+	return rc;
+}
+
+static void ocfs2_control_exit(void)
+{
+	int rc;
+
+	rc = misc_deregister(&ocfs2_control_device);
+	if (rc)
+		printk(KERN_ERR
+		       "ocfs2: Unable to deregister ocfs2_control device "
+		       "(errno %d)\n",
+		       -rc);
+}
+
+static struct dlm_lksb *fsdlm_astarg_to_lksb(void *astarg)
+{
+	struct ocfs2_lock_res *res = astarg;
+	return &res->l_lksb.lksb_fsdlm;
+}
+
+static void fsdlm_lock_ast_wrapper(void *astarg)
+{
+	struct dlm_lksb *lksb = fsdlm_astarg_to_lksb(astarg);
+	int status = lksb->sb_status;
+
+	BUG_ON(user_stack.sp_proto == NULL);
+
+	/*
+	 * For now we're punting on the issue of other non-standard errors
+	 * where we can't tell if the unlock_ast or lock_ast should be called.
+	 * The main "other error" that's possible is EINVAL which means the
+	 * function was called with invalid args, which shouldn't be possible
+	 * since the caller here is under our control.  Other non-standard
+	 * errors probably fall into the same category, or otherwise are fatal
+	 * which means we can't carry on anyway.
+	 */
+
+	if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL)
+		user_stack.sp_proto->lp_unlock_ast(astarg, 0);
+	else
+		user_stack.sp_proto->lp_lock_ast(astarg);
+}
+
+static void fsdlm_blocking_ast_wrapper(void *astarg, int level)
+{
+	BUG_ON(user_stack.sp_proto == NULL);
+
+	user_stack.sp_proto->lp_blocking_ast(astarg, level);
+}
+
+static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
+			 int mode,
+			 union ocfs2_dlm_lksb *lksb,
+			 u32 flags,
+			 void *name,
+			 unsigned int namelen,
+			 void *astarg)
+{
+	int ret;
+
+	if (!lksb->lksb_fsdlm.sb_lvbptr)
+		lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb +
+					     sizeof(struct dlm_lksb);
+
+	ret = dlm_lock(conn->cc_lockspace, mode, &lksb->lksb_fsdlm,
+		       flags|DLM_LKF_NODLCKWT, name, namelen, 0,
+		       fsdlm_lock_ast_wrapper, astarg,
+		       fsdlm_blocking_ast_wrapper);
+	return ret;
+}
+
+static int user_dlm_unlock(struct ocfs2_cluster_connection *conn,
+			   union ocfs2_dlm_lksb *lksb,
+			   u32 flags,
+			   void *astarg)
+{
+	int ret;
+
+	ret = dlm_unlock(conn->cc_lockspace, lksb->lksb_fsdlm.sb_lkid,
+			 flags, &lksb->lksb_fsdlm, astarg);
+	return ret;
+}
+
+static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+{
+	return lksb->lksb_fsdlm.sb_status;
+}
+
+static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+{
+	return (void *)(lksb->lksb_fsdlm.sb_lvbptr);
+}
+
+static void user_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb)
+{
+}
+
+/*
+ * Compare a requested locking protocol version against the current one.
+ *
+ * If the major numbers are different, they are incompatible.
+ * If the current minor is greater than the request, they are incompatible.
+ * If the current minor is less than or equal to the request, they are
+ * compatible, and the requester should run at the current minor version.
+ */
+static int fs_protocol_compare(struct ocfs2_protocol_version *existing,
+			       struct ocfs2_protocol_version *request)
+{
+	if (existing->pv_major != request->pv_major)
+		return 1;
+
+	if (existing->pv_minor > request->pv_minor)
+		return 1;
+
+	if (existing->pv_minor < request->pv_minor)
+		request->pv_minor = existing->pv_minor;
+
+	return 0;
+}
+
+static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
+{
+	dlm_lockspace_t *fsdlm;
+	struct ocfs2_live_connection *control;
+	int rc = 0;
+
+	BUG_ON(conn == NULL);
+
+	rc = ocfs2_live_connection_new(conn, &control);
+	if (rc)
+		goto out;
+
+	/*
+	 * running_proto must have been set before we allowed any mounts
+	 * to proceed.
+	 */
+	if (fs_protocol_compare(&running_proto, &conn->cc_version)) {
+		printk(KERN_ERR
+		       "Unable to mount with fs locking protocol version "
+		       "%u.%u because the userspace control daemon has "
+		       "negotiated %u.%u\n",
+		       conn->cc_version.pv_major, conn->cc_version.pv_minor,
+		       running_proto.pv_major, running_proto.pv_minor);
+		rc = -EPROTO;
+		ocfs2_live_connection_drop(control);
+		goto out;
+	}
+
+	rc = dlm_new_lockspace(conn->cc_name, strlen(conn->cc_name),
+			       &fsdlm, DLM_LSFL_FS, DLM_LVB_LEN);
+	if (rc) {
+		ocfs2_live_connection_drop(control);
+		goto out;
+	}
+
+	conn->cc_private = control;
+	conn->cc_lockspace = fsdlm;
+out:
+	return rc;
+}
+
+static int user_cluster_disconnect(struct ocfs2_cluster_connection *conn,
+				   int hangup_pending)
+{
+	dlm_release_lockspace(conn->cc_lockspace, 2);
+	conn->cc_lockspace = NULL;
+	ocfs2_live_connection_drop(conn->cc_private);
+	conn->cc_private = NULL;
+	return 0;
+}
+
+static int user_cluster_this_node(unsigned int *this_node)
+{
+	int rc;
+
+	rc = ocfs2_control_get_this_node();
+	if (rc < 0)
+		return rc;
+
+	*this_node = rc;
+	return 0;
+}
+
+static struct ocfs2_stack_operations user_stack_ops = {
+	.connect	= user_cluster_connect,
+	.disconnect	= user_cluster_disconnect,
+	.this_node	= user_cluster_this_node,
+	.dlm_lock	= user_dlm_lock,
+	.dlm_unlock	= user_dlm_unlock,
+	.lock_status	= user_dlm_lock_status,
+	.lock_lvb	= user_dlm_lvb,
+	.dump_lksb	= user_dlm_dump_lksb,
+};
+
+static struct ocfs2_stack_plugin user_stack = {
+	.sp_name	= "user",
+	.sp_ops		= &user_stack_ops,
+	.sp_owner	= THIS_MODULE,
+};
+
+
+static int __init user_stack_init(void)
+{
+	int rc;
+
+	rc = ocfs2_control_init();
+	if (!rc) {
+		rc = ocfs2_stack_glue_register(&user_stack);
+		if (rc)
+			ocfs2_control_exit();
+	}
+
+	return rc;
+}
+
+static void __exit user_stack_exit(void)
+{
+	ocfs2_stack_glue_unregister(&user_stack);
+	ocfs2_control_exit();
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_DESCRIPTION("ocfs2 driver for userspace cluster stacks");
+MODULE_LICENSE("GPL");
+module_init(user_stack_init);
+module_exit(user_stack_exit);
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
new file mode 100644
index 0000000..119f60c
--- /dev/null
+++ b/fs/ocfs2/stackglue.c
@@ -0,0 +1,568 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * stackglue.c
+ *
+ * Code which implements an OCFS2 specific interface to underlying
+ * cluster stacks.
+ *
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "ocfs2_fs.h"
+
+#include "stackglue.h"
+
+#define OCFS2_STACK_PLUGIN_O2CB		"o2cb"
+#define OCFS2_STACK_PLUGIN_USER		"user"
+
+static struct ocfs2_locking_protocol *lproto;
+static DEFINE_SPINLOCK(ocfs2_stack_lock);
+static LIST_HEAD(ocfs2_stack_list);
+static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1];
+
+/*
+ * The stack currently in use.  If not null, active_stack->sp_count > 0,
+ * the module is pinned, and the locking protocol cannot be changed.
+ */
+static struct ocfs2_stack_plugin *active_stack;
+
+static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
+{
+	struct ocfs2_stack_plugin *p;
+
+	assert_spin_locked(&ocfs2_stack_lock);
+
+	list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
+		if (!strcmp(p->sp_name, name))
+			return p;
+	}
+
+	return NULL;
+}
+
+static int ocfs2_stack_driver_request(const char *stack_name,
+				      const char *plugin_name)
+{
+	int rc;
+	struct ocfs2_stack_plugin *p;
+
+	spin_lock(&ocfs2_stack_lock);
+
+	/*
+	 * If the stack passed by the filesystem isn't the selected one,
+	 * we can't continue.
+	 */
+	if (strcmp(stack_name, cluster_stack_name)) {
+		rc = -EBUSY;
+		goto out;
+	}
+
+	if (active_stack) {
+		/*
+		 * If the active stack isn't the one we want, it cannot
+		 * be selected right now.
+		 */
+		if (!strcmp(active_stack->sp_name, plugin_name))
+			rc = 0;
+		else
+			rc = -EBUSY;
+		goto out;
+	}
+
+	p = ocfs2_stack_lookup(plugin_name);
+	if (!p || !try_module_get(p->sp_owner)) {
+		rc = -ENOENT;
+		goto out;
+	}
+
+	/* Ok, the stack is pinned */
+	p->sp_count++;
+	active_stack = p;
+
+	rc = 0;
+
+out:
+	spin_unlock(&ocfs2_stack_lock);
+	return rc;
+}
+
+/*
+ * This function looks up the appropriate stack and makes it active.  If
+ * there is no stack, it tries to load it.  It will fail if the stack still
+ * cannot be found.  It will also fail if a different stack is in use.
+ */
+static int ocfs2_stack_driver_get(const char *stack_name)
+{
+	int rc;
+	char *plugin_name = OCFS2_STACK_PLUGIN_O2CB;
+
+	/*
+	 * Classic stack does not pass in a stack name.  This is
+	 * compatible with older tools as well.
+	 */
+	if (!stack_name || !*stack_name)
+		stack_name = OCFS2_STACK_PLUGIN_O2CB;
+
+	if (strlen(stack_name) != OCFS2_STACK_LABEL_LEN) {
+		printk(KERN_ERR
+		       "ocfs2 passed an invalid cluster stack label: \"%s\"\n",
+		       stack_name);
+		return -EINVAL;
+	}
+
+	/* Anything that isn't the classic stack is a user stack */
+	if (strcmp(stack_name, OCFS2_STACK_PLUGIN_O2CB))
+		plugin_name = OCFS2_STACK_PLUGIN_USER;
+
+	rc = ocfs2_stack_driver_request(stack_name, plugin_name);
+	if (rc == -ENOENT) {
+		request_module("ocfs2_stack_%s", plugin_name);
+		rc = ocfs2_stack_driver_request(stack_name, plugin_name);
+	}
+
+	if (rc == -ENOENT) {
+		printk(KERN_ERR
+		       "ocfs2: Cluster stack driver \"%s\" cannot be found\n",
+		       plugin_name);
+	} else if (rc == -EBUSY) {
+		printk(KERN_ERR
+		       "ocfs2: A different cluster stack is in use\n");
+	}
+
+	return rc;
+}
+
+static void ocfs2_stack_driver_put(void)
+{
+	spin_lock(&ocfs2_stack_lock);
+	BUG_ON(active_stack == NULL);
+	BUG_ON(active_stack->sp_count == 0);
+
+	active_stack->sp_count--;
+	if (!active_stack->sp_count) {
+		module_put(active_stack->sp_owner);
+		active_stack = NULL;
+	}
+	spin_unlock(&ocfs2_stack_lock);
+}
+
+int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin)
+{
+	int rc;
+
+	spin_lock(&ocfs2_stack_lock);
+	if (!ocfs2_stack_lookup(plugin->sp_name)) {
+		plugin->sp_count = 0;
+		plugin->sp_proto = lproto;
+		list_add(&plugin->sp_list, &ocfs2_stack_list);
+		printk(KERN_INFO "ocfs2: Registered cluster interface %s\n",
+		       plugin->sp_name);
+		rc = 0;
+	} else {
+		printk(KERN_ERR "ocfs2: Stack \"%s\" already registered\n",
+		       plugin->sp_name);
+		rc = -EEXIST;
+	}
+	spin_unlock(&ocfs2_stack_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ocfs2_stack_glue_register);
+
+void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin)
+{
+	struct ocfs2_stack_plugin *p;
+
+	spin_lock(&ocfs2_stack_lock);
+	p = ocfs2_stack_lookup(plugin->sp_name);
+	if (p) {
+		BUG_ON(p != plugin);
+		BUG_ON(plugin == active_stack);
+		BUG_ON(plugin->sp_count != 0);
+		list_del_init(&plugin->sp_list);
+		printk(KERN_INFO "ocfs2: Unregistered cluster interface %s\n",
+		       plugin->sp_name);
+	} else {
+		printk(KERN_ERR "Stack \"%s\" is not registered\n",
+		       plugin->sp_name);
+	}
+	spin_unlock(&ocfs2_stack_lock);
+}
+EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister);
+
+void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto)
+{
+	struct ocfs2_stack_plugin *p;
+
+	BUG_ON(proto == NULL);
+
+	spin_lock(&ocfs2_stack_lock);
+	BUG_ON(active_stack != NULL);
+
+	lproto = proto;
+	list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
+		p->sp_proto = lproto;
+	}
+
+	spin_unlock(&ocfs2_stack_lock);
+}
+EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_locking_protocol);
+
+
+/*
+ * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take
+ * "struct ocfs2_lock_res *astarg" instead of "void *astarg" because the
+ * underlying stack plugins need to pilfer the lksb off of the lock_res.
+ * If some other structure needs to be passed as an astarg, the plugins
+ * will need to be given a different avenue to the lksb.
+ */
+int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
+		   int mode,
+		   union ocfs2_dlm_lksb *lksb,
+		   u32 flags,
+		   void *name,
+		   unsigned int namelen,
+		   struct ocfs2_lock_res *astarg)
+{
+	BUG_ON(lproto == NULL);
+
+	return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags,
+					      name, namelen, astarg);
+}
+EXPORT_SYMBOL_GPL(ocfs2_dlm_lock);
+
+int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
+		     union ocfs2_dlm_lksb *lksb,
+		     u32 flags,
+		     struct ocfs2_lock_res *astarg)
+{
+	BUG_ON(lproto == NULL);
+
+	return active_stack->sp_ops->dlm_unlock(conn, lksb, flags, astarg);
+}
+EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock);
+
+int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+{
+	return active_stack->sp_ops->lock_status(lksb);
+}
+EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status);
+
+/*
+ * Why don't we cast to ocfs2_meta_lvb?  The "clean" answer is that we
+ * don't cast at the glue level.  The real answer is that the header
+ * ordering is nigh impossible.
+ */
+void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+{
+	return active_stack->sp_ops->lock_lvb(lksb);
+}
+EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb);
+
+void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb)
+{
+	active_stack->sp_ops->dump_lksb(lksb);
+}
+EXPORT_SYMBOL_GPL(ocfs2_dlm_dump_lksb);
+
+int ocfs2_cluster_connect(const char *stack_name,
+			  const char *group,
+			  int grouplen,
+			  void (*recovery_handler)(int node_num,
+						   void *recovery_data),
+			  void *recovery_data,
+			  struct ocfs2_cluster_connection **conn)
+{
+	int rc = 0;
+	struct ocfs2_cluster_connection *new_conn;
+
+	BUG_ON(group == NULL);
+	BUG_ON(conn == NULL);
+	BUG_ON(recovery_handler == NULL);
+
+	if (grouplen > GROUP_NAME_MAX) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection),
+			   GFP_KERNEL);
+	if (!new_conn) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	memcpy(new_conn->cc_name, group, grouplen);
+	new_conn->cc_namelen = grouplen;
+	new_conn->cc_recovery_handler = recovery_handler;
+	new_conn->cc_recovery_data = recovery_data;
+
+	/* Start the new connection at our maximum compatibility level */
+	new_conn->cc_version = lproto->lp_max_version;
+
+	/* This will pin the stack driver if successful */
+	rc = ocfs2_stack_driver_get(stack_name);
+	if (rc)
+		goto out_free;
+
+	rc = active_stack->sp_ops->connect(new_conn);
+	if (rc) {
+		ocfs2_stack_driver_put();
+		goto out_free;
+	}
+
+	*conn = new_conn;
+
+out_free:
+	if (rc)
+		kfree(new_conn);
+
+out:
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ocfs2_cluster_connect);
+
+/* If hangup_pending is 0, the stack driver will be dropped */
+int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
+			     int hangup_pending)
+{
+	int ret;
+
+	BUG_ON(conn == NULL);
+
+	ret = active_stack->sp_ops->disconnect(conn, hangup_pending);
+
+	/* XXX Should we free it anyway? */
+	if (!ret) {
+		kfree(conn);
+		if (!hangup_pending)
+			ocfs2_stack_driver_put();
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ocfs2_cluster_disconnect);
+
+void ocfs2_cluster_hangup(const char *group, int grouplen)
+{
+	BUG_ON(group == NULL);
+	BUG_ON(group[grouplen] != '\0');
+
+	if (active_stack->sp_ops->hangup)
+		active_stack->sp_ops->hangup(group, grouplen);
+
+	/* cluster_disconnect() was called with hangup_pending==1 */
+	ocfs2_stack_driver_put();
+}
+EXPORT_SYMBOL_GPL(ocfs2_cluster_hangup);
+
+int ocfs2_cluster_this_node(unsigned int *node)
+{
+	return active_stack->sp_ops->this_node(node);
+}
+EXPORT_SYMBOL_GPL(ocfs2_cluster_this_node);
+
+
+/*
+ * Sysfs bits
+ */
+
+static ssize_t ocfs2_max_locking_protocol_show(struct kobject *kobj,
+					       struct kobj_attribute *attr,
+					       char *buf)
+{
+	ssize_t ret = 0;
+
+	spin_lock(&ocfs2_stack_lock);
+	if (lproto)
+		ret = snprintf(buf, PAGE_SIZE, "%u.%u\n",
+			       lproto->lp_max_version.pv_major,
+			       lproto->lp_max_version.pv_minor);
+	spin_unlock(&ocfs2_stack_lock);
+
+	return ret;
+}
+
+static struct kobj_attribute ocfs2_attr_max_locking_protocol =
+	__ATTR(max_locking_protocol, S_IFREG | S_IRUGO,
+	       ocfs2_max_locking_protocol_show, NULL);
+
+static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj,
+						 struct kobj_attribute *attr,
+						 char *buf)
+{
+	ssize_t ret = 0, total = 0, remain = PAGE_SIZE;
+	struct ocfs2_stack_plugin *p;
+
+	spin_lock(&ocfs2_stack_lock);
+	list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
+		ret = snprintf(buf, remain, "%s\n",
+			       p->sp_name);
+		if (ret < 0) {
+			total = ret;
+			break;
+		}
+		if (ret == remain) {
+			/* snprintf() didn't fit */
+			total = -E2BIG;
+			break;
+		}
+		total += ret;
+		remain -= ret;
+	}
+	spin_unlock(&ocfs2_stack_lock);
+
+	return total;
+}
+
+static struct kobj_attribute ocfs2_attr_loaded_cluster_plugins =
+	__ATTR(loaded_cluster_plugins, S_IFREG | S_IRUGO,
+	       ocfs2_loaded_cluster_plugins_show, NULL);
+
+static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj,
+						struct kobj_attribute *attr,
+						char *buf)
+{
+	ssize_t ret = 0;
+
+	spin_lock(&ocfs2_stack_lock);
+	if (active_stack) {
+		ret = snprintf(buf, PAGE_SIZE, "%s\n",
+			       active_stack->sp_name);
+		if (ret == PAGE_SIZE)
+			ret = -E2BIG;
+	}
+	spin_unlock(&ocfs2_stack_lock);
+
+	return ret;
+}
+
+static struct kobj_attribute ocfs2_attr_active_cluster_plugin =
+	__ATTR(active_cluster_plugin, S_IFREG | S_IRUGO,
+	       ocfs2_active_cluster_plugin_show, NULL);
+
+static ssize_t ocfs2_cluster_stack_show(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					char *buf)
+{
+	ssize_t ret;
+	spin_lock(&ocfs2_stack_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%s\n", cluster_stack_name);
+	spin_unlock(&ocfs2_stack_lock);
+
+	return ret;
+}
+
+static ssize_t ocfs2_cluster_stack_store(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 const char *buf, size_t count)
+{
+	size_t len = count;
+	ssize_t ret;
+
+	if (len == 0)
+		return len;
+
+	if (buf[len - 1] == '\n')
+		len--;
+
+	if ((len != OCFS2_STACK_LABEL_LEN) ||
+	    (strnlen(buf, len) != len))
+		return -EINVAL;
+
+	spin_lock(&ocfs2_stack_lock);
+	if (active_stack) {
+		if (!strncmp(buf, cluster_stack_name, len))
+			ret = count;
+		else
+			ret = -EBUSY;
+	} else {
+		memcpy(cluster_stack_name, buf, len);
+		ret = count;
+	}
+	spin_unlock(&ocfs2_stack_lock);
+
+	return ret;
+}
+
+
+static struct kobj_attribute ocfs2_attr_cluster_stack =
+	__ATTR(cluster_stack, S_IFREG | S_IRUGO | S_IWUSR,
+	       ocfs2_cluster_stack_show,
+	       ocfs2_cluster_stack_store);
+
+static struct attribute *ocfs2_attrs[] = {
+	&ocfs2_attr_max_locking_protocol.attr,
+	&ocfs2_attr_loaded_cluster_plugins.attr,
+	&ocfs2_attr_active_cluster_plugin.attr,
+	&ocfs2_attr_cluster_stack.attr,
+	NULL,
+};
+
+static struct attribute_group ocfs2_attr_group = {
+	.attrs = ocfs2_attrs,
+};
+
+static struct kset *ocfs2_kset;
+
+static void ocfs2_sysfs_exit(void)
+{
+	kset_unregister(ocfs2_kset);
+}
+
+static int ocfs2_sysfs_init(void)
+{
+	int ret;
+
+	ocfs2_kset = kset_create_and_add("ocfs2", NULL, fs_kobj);
+	if (!ocfs2_kset)
+		return -ENOMEM;
+
+	ret = sysfs_create_group(&ocfs2_kset->kobj, &ocfs2_attr_group);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	kset_unregister(ocfs2_kset);
+	return ret;
+}
+
+static int __init ocfs2_stack_glue_init(void)
+{
+	strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
+
+	return ocfs2_sysfs_init();
+}
+
+static void __exit ocfs2_stack_glue_exit(void)
+{
+	lproto = NULL;
+	ocfs2_sysfs_exit();
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_DESCRIPTION("ocfs2 cluter stack glue layer");
+MODULE_LICENSE("GPL");
+module_init(ocfs2_stack_glue_init);
+module_exit(ocfs2_stack_glue_exit);
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
new file mode 100644
index 0000000..005e4f1
--- /dev/null
+++ b/fs/ocfs2/stackglue.h
@@ -0,0 +1,261 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * stackglue.h
+ *
+ * Glue to the underlying cluster stack.
+ *
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+
+#ifndef STACKGLUE_H
+#define STACKGLUE_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/dlmconstants.h>
+
+#include "dlm/dlmapi.h"
+#include <linux/dlm.h>
+
+/*
+ * dlmconstants.h does not have a LOCAL flag.  We hope to remove it
+ * some day, but right now we need it.  Let's fake it.  This value is larger
+ * than any flag in dlmconstants.h.
+ */
+#define DLM_LKF_LOCAL		0x00100000
+
+/*
+ * This shadows DLM_LOCKSPACE_LEN in fs/dlm/dlm_internal.h.  That probably
+ * wants to be in a public header.
+ */
+#define GROUP_NAME_MAX		64
+
+
+/*
+ * ocfs2_protocol_version changes when ocfs2 does something different in
+ * its inter-node behavior.  See dlmglue.c for more information.
+ */
+struct ocfs2_protocol_version {
+	u8 pv_major;
+	u8 pv_minor;
+};
+
+/*
+ * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf.
+ */
+struct ocfs2_locking_protocol {
+	struct ocfs2_protocol_version lp_max_version;
+	void (*lp_lock_ast)(void *astarg);
+	void (*lp_blocking_ast)(void *astarg, int level);
+	void (*lp_unlock_ast)(void *astarg, int error);
+};
+
+
+/*
+ * The dlm_lockstatus struct includes lvb space, but the dlm_lksb struct only
+ * has a pointer to separately allocated lvb space.  This struct exists only to
+ * include in the lksb union to make space for a combined dlm_lksb and lvb.
+ */
+struct fsdlm_lksb_plus_lvb {
+	struct dlm_lksb lksb;
+	char lvb[DLM_LVB_LEN];
+};
+
+/*
+ * A union of all lock status structures.  We define it here so that the
+ * size of the union is known.  Lock status structures are embedded in
+ * ocfs2 inodes.
+ */
+union ocfs2_dlm_lksb {
+	struct dlm_lockstatus lksb_o2dlm;
+	struct dlm_lksb lksb_fsdlm;
+	struct fsdlm_lksb_plus_lvb padding;
+};
+
+/*
+ * A cluster connection.  Mostly opaque to ocfs2, the connection holds
+ * state for the underlying stack.  ocfs2 does use cc_version to determine
+ * locking compatibility.
+ */
+struct ocfs2_cluster_connection {
+	char cc_name[GROUP_NAME_MAX];
+	int cc_namelen;
+	struct ocfs2_protocol_version cc_version;
+	void (*cc_recovery_handler)(int node_num, void *recovery_data);
+	void *cc_recovery_data;
+	void *cc_lockspace;
+	void *cc_private;
+};
+
+/*
+ * Each cluster stack implements the stack operations structure.  Not used
+ * in the ocfs2 code, the stackglue code translates generic cluster calls
+ * into stack operations.
+ */
+struct ocfs2_stack_operations {
+	/*
+	 * The fs code calls ocfs2_cluster_connect() to attach a new
+	 * filesystem to the cluster stack.  The ->connect() op is passed
+	 * an ocfs2_cluster_connection with the name and recovery field
+	 * filled in.
+	 *
+	 * The stack must set up any notification mechanisms and create
+	 * the filesystem lockspace in the DLM.  The lockspace should be
+	 * stored on cc_lockspace.  Any other information can be stored on
+	 * cc_private.
+	 *
+	 * ->connect() must not return until it is guaranteed that
+	 *
+	 *  - Node down notifications for the filesystem will be recieved
+	 *    and passed to conn->cc_recovery_handler().
+	 *  - Locking requests for the filesystem will be processed.
+	 */
+	int (*connect)(struct ocfs2_cluster_connection *conn);
+
+	/*
+	 * The fs code calls ocfs2_cluster_disconnect() when a filesystem
+	 * no longer needs cluster services.  All DLM locks have been
+	 * dropped, and recovery notification is being ignored by the
+	 * fs code.  The stack must disengage from the DLM and discontinue
+	 * recovery notification.
+	 *
+	 * Once ->disconnect() has returned, the connection structure will
+	 * be freed.  Thus, a stack must not return from ->disconnect()
+	 * until it will no longer reference the conn pointer.
+	 *
+	 * If hangup_pending is zero, ocfs2_cluster_disconnect() will also
+	 * be dropping the reference on the module.
+	 */
+	int (*disconnect)(struct ocfs2_cluster_connection *conn,
+			  int hangup_pending);
+
+	/*
+	 * ocfs2_cluster_hangup() exists for compatibility with older
+	 * ocfs2 tools.  Only the classic stack really needs it.  As such
+	 * ->hangup() is not required of all stacks.  See the comment by
+	 * ocfs2_cluster_hangup() for more details.
+	 *
+	 * Note that ocfs2_cluster_hangup() can only be called if
+	 * hangup_pending was passed to ocfs2_cluster_disconnect().
+	 */
+	void (*hangup)(const char *group, int grouplen);
+
+	/*
+	 * ->this_node() returns the cluster's unique identifier for the
+	 * local node.
+	 */
+	int (*this_node)(unsigned int *node);
+
+	/*
+	 * Call the underlying dlm lock function.  The ->dlm_lock()
+	 * callback should convert the flags and mode as appropriate.
+	 *
+	 * ast and bast functions are not part of the call because the
+	 * stack will likely want to wrap ast and bast calls before passing
+	 * them to stack->sp_proto.
+	 */
+	int (*dlm_lock)(struct ocfs2_cluster_connection *conn,
+			int mode,
+			union ocfs2_dlm_lksb *lksb,
+			u32 flags,
+			void *name,
+			unsigned int namelen,
+			void *astarg);
+
+	/*
+	 * Call the underlying dlm unlock function.  The ->dlm_unlock()
+	 * function should convert the flags as appropriate.
+	 *
+	 * The unlock ast is not passed, as the stack will want to wrap
+	 * it before calling stack->sp_proto->lp_unlock_ast().
+	 */
+	int (*dlm_unlock)(struct ocfs2_cluster_connection *conn,
+			  union ocfs2_dlm_lksb *lksb,
+			  u32 flags,
+			  void *astarg);
+
+	/*
+	 * Return the status of the current lock status block.  The fs
+	 * code should never dereference the union.  The ->lock_status()
+	 * callback pulls out the stack-specific lksb, converts the status
+	 * to a proper errno, and returns it.
+	 */
+	int (*lock_status)(union ocfs2_dlm_lksb *lksb);
+
+	/*
+	 * Pull the lvb pointer off of the stack-specific lksb.
+	 */
+	void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb);
+
+	/*
+	 * This is an optoinal debugging hook.  If provided, the
+	 * stack can dump debugging information about this lock.
+	 */
+	void (*dump_lksb)(union ocfs2_dlm_lksb *lksb);
+};
+
+/*
+ * Each stack plugin must describe itself by registering a
+ * ocfs2_stack_plugin structure.  This is only seen by stackglue and the
+ * stack driver.
+ */
+struct ocfs2_stack_plugin {
+	char *sp_name;
+	struct ocfs2_stack_operations *sp_ops;
+	struct module *sp_owner;
+
+	/* These are managed by the stackglue code. */
+	struct list_head sp_list;
+	unsigned int sp_count;
+	struct ocfs2_locking_protocol *sp_proto;
+};
+
+
+/* Used by the filesystem */
+int ocfs2_cluster_connect(const char *stack_name,
+			  const char *group,
+			  int grouplen,
+			  void (*recovery_handler)(int node_num,
+						   void *recovery_data),
+			  void *recovery_data,
+			  struct ocfs2_cluster_connection **conn);
+int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
+			     int hangup_pending);
+void ocfs2_cluster_hangup(const char *group, int grouplen);
+int ocfs2_cluster_this_node(unsigned int *node);
+
+struct ocfs2_lock_res;
+int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
+		   int mode,
+		   union ocfs2_dlm_lksb *lksb,
+		   u32 flags,
+		   void *name,
+		   unsigned int namelen,
+		   struct ocfs2_lock_res *astarg);
+int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
+		     union ocfs2_dlm_lksb *lksb,
+		     u32 flags,
+		     struct ocfs2_lock_res *astarg);
+
+int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb);
+void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb);
+void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb);
+
+void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto);
+
+
+/* Used by stack plugins */
+int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
+void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
+#endif  /* STACKGLUE_H */
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 72c198a..d2d278f 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -46,6 +46,11 @@
 
 #include "buffer_head_io.h"
 
+#define NOT_ALLOC_NEW_GROUP		0
+#define ALLOC_NEW_GROUP			1
+
+#define OCFS2_MAX_INODES_TO_STEAL	1024
+
 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -106,7 +111,7 @@
 						u64 *bg_blkno,
 						u16 *bg_bit_off);
 
-void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
+static void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
 {
 	struct inode *inode = ac->ac_inode;
 
@@ -117,9 +122,17 @@
 		mutex_unlock(&inode->i_mutex);
 
 		iput(inode);
+		ac->ac_inode = NULL;
 	}
-	if (ac->ac_bh)
+	if (ac->ac_bh) {
 		brelse(ac->ac_bh);
+		ac->ac_bh = NULL;
+	}
+}
+
+void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
+{
+	ocfs2_free_ac_resource(ac);
 	kfree(ac);
 }
 
@@ -391,7 +404,8 @@
 static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
 				       struct ocfs2_alloc_context *ac,
 				       int type,
-				       u32 slot)
+				       u32 slot,
+				       int alloc_new_group)
 {
 	int status;
 	u32 bits_wanted = ac->ac_bits_wanted;
@@ -420,6 +434,7 @@
 	}
 
 	ac->ac_inode = alloc_inode;
+	ac->ac_alloc_slot = slot;
 
 	fe = (struct ocfs2_dinode *) bh->b_data;
 	if (!OCFS2_IS_VALID_DINODE(fe)) {
@@ -446,6 +461,14 @@
 			goto bail;
 		}
 
+		if (alloc_new_group != ALLOC_NEW_GROUP) {
+			mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, "
+			     "and we don't alloc a new group for it.\n",
+			     slot, bits_wanted, free_bits);
+			status = -ENOSPC;
+			goto bail;
+		}
+
 		status = ocfs2_block_group_alloc(osb, alloc_inode, bh);
 		if (status < 0) {
 			if (status != -ENOSPC)
@@ -490,7 +513,8 @@
 	(*ac)->ac_group_search = ocfs2_block_group_search;
 
 	status = ocfs2_reserve_suballoc_bits(osb, (*ac),
-					     EXTENT_ALLOC_SYSTEM_INODE, slot);
+					     EXTENT_ALLOC_SYSTEM_INODE,
+					     slot, ALLOC_NEW_GROUP);
 	if (status < 0) {
 		if (status != -ENOSPC)
 			mlog_errno(status);
@@ -508,10 +532,42 @@
 	return status;
 }
 
+static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb,
+					      struct ocfs2_alloc_context *ac)
+{
+	int i, status = -ENOSPC;
+	s16 slot = ocfs2_get_inode_steal_slot(osb);
+
+	/* Start to steal inodes from the first slot after ours. */
+	if (slot == OCFS2_INVALID_SLOT)
+		slot = osb->slot_num + 1;
+
+	for (i = 0; i < osb->max_slots; i++, slot++) {
+		if (slot == osb->max_slots)
+			slot = 0;
+
+		if (slot == osb->slot_num)
+			continue;
+
+		status = ocfs2_reserve_suballoc_bits(osb, ac,
+						     INODE_ALLOC_SYSTEM_INODE,
+						     slot, NOT_ALLOC_NEW_GROUP);
+		if (status >= 0) {
+			ocfs2_set_inode_steal_slot(osb, slot);
+			break;
+		}
+
+		ocfs2_free_ac_resource(ac);
+	}
+
+	return status;
+}
+
 int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
 			    struct ocfs2_alloc_context **ac)
 {
 	int status;
+	s16 slot = ocfs2_get_inode_steal_slot(osb);
 
 	*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
 	if (!(*ac)) {
@@ -525,9 +581,43 @@
 
 	(*ac)->ac_group_search = ocfs2_block_group_search;
 
+	/*
+	 * slot is set when we successfully steal inode from other nodes.
+	 * It is reset in 3 places:
+	 * 1. when we flush the truncate log
+	 * 2. when we complete local alloc recovery.
+	 * 3. when we successfully allocate from our own slot.
+	 * After it is set, we will go on stealing inodes until we find the
+	 * need to check our slots to see whether there is some space for us.
+	 */
+	if (slot != OCFS2_INVALID_SLOT &&
+	    atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL)
+		goto inode_steal;
+
+	atomic_set(&osb->s_num_inodes_stolen, 0);
 	status = ocfs2_reserve_suballoc_bits(osb, *ac,
 					     INODE_ALLOC_SYSTEM_INODE,
-					     osb->slot_num);
+					     osb->slot_num, ALLOC_NEW_GROUP);
+	if (status >= 0) {
+		status = 0;
+
+		/*
+		 * Some inodes must be freed by us, so try to allocate
+		 * from our own next time.
+		 */
+		if (slot != OCFS2_INVALID_SLOT)
+			ocfs2_init_inode_steal_slot(osb);
+		goto bail;
+	} else if (status < 0 && status != -ENOSPC) {
+		mlog_errno(status);
+		goto bail;
+	}
+
+	ocfs2_free_ac_resource(*ac);
+
+inode_steal:
+	status = ocfs2_steal_inode_from_other_nodes(osb, *ac);
+	atomic_inc(&osb->s_num_inodes_stolen);
 	if (status < 0) {
 		if (status != -ENOSPC)
 			mlog_errno(status);
@@ -557,7 +647,8 @@
 
 	status = ocfs2_reserve_suballoc_bits(osb, ac,
 					     GLOBAL_BITMAP_SYSTEM_INODE,
-					     OCFS2_INVALID_SLOT);
+					     OCFS2_INVALID_SLOT,
+					     ALLOC_NEW_GROUP);
 	if (status < 0 && status != -ENOSPC) {
 		mlog_errno(status);
 		goto bail;
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index 8799033..544c600 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -36,6 +36,7 @@
 struct ocfs2_alloc_context {
 	struct inode *ac_inode;    /* which bitmap are we allocating from? */
 	struct buffer_head *ac_bh; /* file entry bh */
+	u32    ac_alloc_slot;   /* which slot are we allocating from? */
 	u32    ac_bits_wanted;
 	u32    ac_bits_given;
 #define OCFS2_AC_USE_LOCAL 1
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index bec75af..df63ba2 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -40,8 +40,7 @@
 #include <linux/crc32.h>
 #include <linux/debugfs.h>
 #include <linux/mount.h>
-
-#include <cluster/nodemanager.h>
+#include <linux/seq_file.h>
 
 #define MLOG_MASK_PREFIX ML_SUPER
 #include <cluster/masklog.h>
@@ -88,6 +87,7 @@
 	unsigned int	atime_quantum;
 	signed short	slot;
 	unsigned int	localalloc_opt;
+	char		cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
 };
 
 static int ocfs2_parse_options(struct super_block *sb, char *options,
@@ -109,7 +109,6 @@
 static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb);
 static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb);
 static void ocfs2_release_system_inodes(struct ocfs2_super *osb);
-static int ocfs2_fill_local_node_info(struct ocfs2_super *osb);
 static int ocfs2_check_volume(struct ocfs2_super *osb);
 static int ocfs2_verify_volume(struct ocfs2_dinode *di,
 			       struct buffer_head *bh,
@@ -154,6 +153,7 @@
 	Opt_commit,
 	Opt_localalloc,
 	Opt_localflocks,
+	Opt_stack,
 	Opt_err,
 };
 
@@ -172,6 +172,7 @@
 	{Opt_commit, "commit=%u"},
 	{Opt_localalloc, "localalloc=%d"},
 	{Opt_localflocks, "localflocks"},
+	{Opt_stack, "cluster_stack=%s"},
 	{Opt_err, NULL}
 };
 
@@ -551,8 +552,17 @@
 		}
 	}
 
+	if (ocfs2_userspace_stack(osb)) {
+		if (osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) {
+			mlog(ML_ERROR, "Userspace stack expected, but "
+			     "o2cb heartbeat arguments passed to mount\n");
+			return -EINVAL;
+		}
+	}
+
 	if (!(osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL)) {
-		if (!ocfs2_mount_local(osb) && !ocfs2_is_hard_readonly(osb)) {
+		if (!ocfs2_mount_local(osb) && !ocfs2_is_hard_readonly(osb) &&
+		    !ocfs2_userspace_stack(osb)) {
 			mlog(ML_ERROR, "Heartbeat has to be started to mount "
 			     "a read-write clustered device.\n");
 			return -EINVAL;
@@ -562,6 +572,35 @@
 	return 0;
 }
 
+/*
+ * If we're using a userspace stack, mount should have passed
+ * a name that matches the disk.  If not, mount should not
+ * have passed a stack.
+ */
+static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb,
+					struct mount_options *mopt)
+{
+	if (!ocfs2_userspace_stack(osb) && mopt->cluster_stack[0]) {
+		mlog(ML_ERROR,
+		     "cluster stack passed to mount, but this filesystem "
+		     "does not support it\n");
+		return -EINVAL;
+	}
+
+	if (ocfs2_userspace_stack(osb) &&
+	    strncmp(osb->osb_cluster_stack, mopt->cluster_stack,
+		    OCFS2_STACK_LABEL_LEN)) {
+		mlog(ML_ERROR,
+		     "cluster stack passed to mount (\"%s\") does not "
+		     "match the filesystem (\"%s\")\n",
+		     mopt->cluster_stack,
+		     osb->osb_cluster_stack);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct dentry *root;
@@ -579,15 +618,6 @@
 		goto read_super_error;
 	}
 
-	/* for now we only have one cluster/node, make sure we see it
-	 * in the heartbeat universe */
-	if (parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL) {
-		if (!o2hb_check_local_node_heartbeating()) {
-			status = -EINVAL;
-			goto read_super_error;
-		}
-	}
-
 	/* probe for superblock */
 	status = ocfs2_sb_probe(sb, &bh, &sector_size);
 	if (status < 0) {
@@ -609,6 +639,10 @@
 	osb->osb_commit_interval = parsed_options.commit_interval;
 	osb->local_alloc_size = parsed_options.localalloc_opt;
 
+	status = ocfs2_verify_userspace_stack(osb, &parsed_options);
+	if (status)
+		goto read_super_error;
+
 	sb->s_magic = OCFS2_SUPER_MAGIC;
 
 	/* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
@@ -694,7 +728,7 @@
 	if (ocfs2_mount_local(osb))
 		snprintf(nodestr, sizeof(nodestr), "local");
 	else
-		snprintf(nodestr, sizeof(nodestr), "%d", osb->node_num);
+		snprintf(nodestr, sizeof(nodestr), "%u", osb->node_num);
 
 	printk(KERN_INFO "ocfs2: Mounting device (%s) on (node %s, slot %d) "
 	       "with %s data mode.\n",
@@ -763,6 +797,7 @@
 	mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
 	mopt->slot = OCFS2_INVALID_SLOT;
 	mopt->localalloc_opt = OCFS2_DEFAULT_LOCAL_ALLOC_SIZE;
+	mopt->cluster_stack[0] = '\0';
 
 	if (!options) {
 		status = 1;
@@ -864,6 +899,25 @@
 			if (!is_remount)
 				mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS;
 			break;
+		case Opt_stack:
+			/* Check both that the option we were passed
+			 * is of the right length and that it is a proper
+			 * string of the right length.
+			 */
+			if (((args[0].to - args[0].from) !=
+			     OCFS2_STACK_LABEL_LEN) ||
+			    (strnlen(args[0].from,
+				     OCFS2_STACK_LABEL_LEN) !=
+			     OCFS2_STACK_LABEL_LEN)) {
+				mlog(ML_ERROR,
+				     "Invalid cluster_stack option\n");
+				status = 0;
+				goto bail;
+			}
+			memcpy(mopt->cluster_stack, args[0].from,
+			       OCFS2_STACK_LABEL_LEN);
+			mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
+			break;
 		default:
 			mlog(ML_ERROR,
 			     "Unrecognized mount option \"%s\" "
@@ -922,6 +976,10 @@
 	if (opts & OCFS2_MOUNT_LOCALFLOCKS)
 		seq_printf(s, ",localflocks,");
 
+	if (osb->osb_cluster_stack[0])
+		seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN,
+			   osb->osb_cluster_stack);
+
 	return 0;
 }
 
@@ -957,6 +1015,8 @@
 		mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n");
 	}
 
+	ocfs2_set_locking_protocol();
+
 leave:
 	if (status < 0) {
 		ocfs2_free_mem_caches();
@@ -1132,31 +1192,6 @@
 	return 0;
 }
 
-/* ocfs2 1.0 only allows one cluster and node identity per kernel image. */
-static int ocfs2_fill_local_node_info(struct ocfs2_super *osb)
-{
-	int status;
-
-	/* XXX hold a ref on the node while mounte?  easy enough, if
-	 * desirable. */
-	if (ocfs2_mount_local(osb))
-		osb->node_num = 0;
-	else
-		osb->node_num = o2nm_this_node();
-
-	if (osb->node_num == O2NM_MAX_NODES) {
-		mlog(ML_ERROR, "could not find this host's node number\n");
-		status = -ENOENT;
-		goto bail;
-	}
-
-	mlog(0, "I am node %d\n", osb->node_num);
-
-	status = 0;
-bail:
-	return status;
-}
-
 static int ocfs2_mount_volume(struct super_block *sb)
 {
 	int status = 0;
@@ -1168,12 +1203,6 @@
 	if (ocfs2_is_hard_readonly(osb))
 		goto leave;
 
-	status = ocfs2_fill_local_node_info(osb);
-	if (status < 0) {
-		mlog_errno(status);
-		goto leave;
-	}
-
 	status = ocfs2_dlm_init(osb);
 	if (status < 0) {
 		mlog_errno(status);
@@ -1224,18 +1253,9 @@
 	return status;
 }
 
-/* we can't grab the goofy sem lock from inside wait_event, so we use
- * memory barriers to make sure that we'll see the null task before
- * being woken up */
-static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
-{
-	mb();
-	return osb->recovery_thread_task != NULL;
-}
-
 static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
 {
-	int tmp;
+	int tmp, hangup_needed = 0;
 	struct ocfs2_super *osb = NULL;
 	char nodestr[8];
 
@@ -1249,25 +1269,16 @@
 
 	ocfs2_truncate_log_shutdown(osb);
 
-	/* disable any new recovery threads and wait for any currently
-	 * running ones to exit. Do this before setting the vol_state. */
-	mutex_lock(&osb->recovery_lock);
-	osb->disable_recovery = 1;
-	mutex_unlock(&osb->recovery_lock);
-	wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
-
-	/* At this point, we know that no more recovery threads can be
-	 * launched, so wait for any recovery completion work to
-	 * complete. */
-	flush_workqueue(ocfs2_wq);
+	/* This will disable recovery and flush any recovery work. */
+	ocfs2_recovery_exit(osb);
 
 	ocfs2_journal_shutdown(osb);
 
 	ocfs2_sync_blockdev(sb);
 
-	/* No dlm means we've failed during mount, so skip all the
-	 * steps which depended on that to complete. */
-	if (osb->dlm) {
+	/* No cluster connection means we've failed during mount, so skip
+	 * all the steps which depended on that to complete. */
+	if (osb->cconn) {
 		tmp = ocfs2_super_lock(osb, 1);
 		if (tmp < 0) {
 			mlog_errno(tmp);
@@ -1278,25 +1289,34 @@
 	if (osb->slot_num != OCFS2_INVALID_SLOT)
 		ocfs2_put_slot(osb);
 
-	if (osb->dlm)
+	if (osb->cconn)
 		ocfs2_super_unlock(osb, 1);
 
 	ocfs2_release_system_inodes(osb);
 
-	if (osb->dlm)
-		ocfs2_dlm_shutdown(osb);
+	/*
+	 * If we're dismounting due to mount error, mount.ocfs2 will clean
+	 * up heartbeat.  If we're a local mount, there is no heartbeat.
+	 * If we failed before we got a uuid_str yet, we can't stop
+	 * heartbeat.  Otherwise, do it.
+	 */
+	if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str)
+		hangup_needed = 1;
+
+	if (osb->cconn)
+		ocfs2_dlm_shutdown(osb, hangup_needed);
 
 	debugfs_remove(osb->osb_debug_root);
 
-	if (!mnt_err)
-		ocfs2_stop_heartbeat(osb);
+	if (hangup_needed)
+		ocfs2_cluster_hangup(osb->uuid_str, strlen(osb->uuid_str));
 
 	atomic_set(&osb->vol_state, VOLUME_DISMOUNTED);
 
 	if (ocfs2_mount_local(osb))
 		snprintf(nodestr, sizeof(nodestr), "local");
 	else
-		snprintf(nodestr, sizeof(nodestr), "%d", osb->node_num);
+		snprintf(nodestr, sizeof(nodestr), "%u", osb->node_num);
 
 	printk(KERN_INFO "ocfs2: Unmounting device (%s) on (node %s)\n",
 	       osb->dev_str, nodestr);
@@ -1355,7 +1375,6 @@
 	sb->s_fs_info = osb;
 	sb->s_op = &ocfs2_sops;
 	sb->s_export_op = &ocfs2_export_ops;
-	osb->osb_locking_proto = ocfs2_locking_protocol;
 	sb->s_time_gran = 1;
 	sb->s_flags |= MS_NOATIME;
 	/* this is needed to support O_LARGEFILE */
@@ -1368,7 +1387,6 @@
 	osb->s_sectsize_bits = blksize_bits(sector_size);
 	BUG_ON(!osb->s_sectsize_bits);
 
-	init_waitqueue_head(&osb->recovery_event);
 	spin_lock_init(&osb->dc_task_lock);
 	init_waitqueue_head(&osb->dc_event);
 	osb->dc_work_sequence = 0;
@@ -1376,6 +1394,7 @@
 	INIT_LIST_HEAD(&osb->blocked_lock_list);
 	osb->blocked_lock_count = 0;
 	spin_lock_init(&osb->osb_lock);
+	ocfs2_init_inode_steal_slot(osb);
 
 	atomic_set(&osb->alloc_stats.moves, 0);
 	atomic_set(&osb->alloc_stats.local_data, 0);
@@ -1388,24 +1407,23 @@
 	snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u",
 		 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
 
-	mutex_init(&osb->recovery_lock);
-
-	osb->disable_recovery = 0;
-	osb->recovery_thread_task = NULL;
+	status = ocfs2_recovery_init(osb);
+	if (status) {
+		mlog(ML_ERROR, "Unable to initialize recovery state\n");
+		mlog_errno(status);
+		goto bail;
+	}
 
 	init_waitqueue_head(&osb->checkpoint_event);
 	atomic_set(&osb->needs_checkpoint, 0);
 
 	osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
 
-	osb->node_num = O2NM_INVALID_NODE_NUM;
 	osb->slot_num = OCFS2_INVALID_SLOT;
 
 	osb->local_alloc_state = OCFS2_LA_UNUSED;
 	osb->local_alloc_bh = NULL;
 
-	ocfs2_setup_hb_callbacks(osb);
-
 	init_waitqueue_head(&osb->osb_mount_event);
 
 	osb->vol_label = kmalloc(OCFS2_MAX_VOL_LABEL_LEN, GFP_KERNEL);
@@ -1455,6 +1473,25 @@
 		goto bail;
 	}
 
+	if (ocfs2_userspace_stack(osb)) {
+		memcpy(osb->osb_cluster_stack,
+		       OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
+		       OCFS2_STACK_LABEL_LEN);
+		osb->osb_cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
+		if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
+			mlog(ML_ERROR,
+			     "couldn't mount because of an invalid "
+			     "cluster stack label (%s) \n",
+			     osb->osb_cluster_stack);
+			status = -EINVAL;
+			goto bail;
+		}
+	} else {
+		/* The empty string is identical with classic tools that
+		 * don't know about s_cluster_info. */
+		osb->osb_cluster_stack[0] = '\0';
+	}
+
 	get_random_bytes(&osb->s_next_generation, sizeof(u32));
 
 	/* FIXME
@@ -1724,8 +1761,7 @@
 
 	/* This function assumes that the caller has the main osb resource */
 
-	if (osb->slot_info)
-		ocfs2_free_slot_info(osb->slot_info);
+	ocfs2_free_slot_info(osb);
 
 	kfree(osb->osb_orphan_wipes);
 	/* FIXME
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 5f66c44..817f596 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -87,7 +87,14 @@
 
 void sysfs_remove_link(struct kobject * kobj, const char * name)
 {
-	sysfs_hash_and_remove(kobj->sd, name);
+	struct sysfs_dirent *parent_sd = NULL;
+
+	if (!kobj)
+		parent_sd = &sysfs_root;
+	else
+		parent_sd = kobj->sd;
+
+	sysfs_hash_and_remove(parent_sd, name);
 }
 
 static int sysfs_get_target_path(struct sysfs_dirent *parent_sd,
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 35115bc..524021f 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -35,18 +35,6 @@
 	  with or without the generic quota support enabled (CONFIG_QUOTA) -
 	  they are completely independent subsystems.
 
-config XFS_SECURITY
-	bool "XFS Security Label support"
-	depends on XFS_FS
-	help
-	  Security labels support alternative access control models
-	  implemented by security modules like SELinux.  This option
-	  enables an extended attribute namespace for inode security
-	  labels in the XFS filesystem.
-
-	  If you are not using a security module that requires using
-	  extended attributes for inode security labels, say N.
-
 config XFS_POSIX_ACL
 	bool "XFS POSIX ACL support"
 	depends on XFS_FS
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index e040f1c..9b1bb17 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -37,7 +37,7 @@
 #ifdef DEBUG
 	if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
 		printk(KERN_WARNING "Large %s attempt, size=%ld\n",
-			__FUNCTION__, (long)size);
+			__func__, (long)size);
 		dump_stack();
 	}
 #endif
@@ -52,7 +52,7 @@
 		if (!(++retries % 100))
 			printk(KERN_ERR "XFS: possible memory allocation "
 					"deadlock in %s (mode:0x%x)\n",
-					__FUNCTION__, lflags);
+					__func__, lflags);
 		congestion_wait(WRITE, HZ/50);
 	} while (1);
 }
@@ -129,7 +129,7 @@
 		if (!(++retries % 100))
 			printk(KERN_ERR "XFS: possible memory allocation "
 					"deadlock in %s (mode:0x%x)\n",
-					__FUNCTION__, lflags);
+					__func__, lflags);
 		congestion_wait(WRITE, HZ/50);
 	} while (1);
 }
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index e051952..a55c3b2 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -243,8 +243,12 @@
 	size_t			size = ioend->io_size;
 
 	if (likely(!ioend->io_error)) {
-		if (!XFS_FORCED_SHUTDOWN(ip->i_mount))
-			xfs_iomap_write_unwritten(ip, offset, size);
+		if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+			int error;
+			error = xfs_iomap_write_unwritten(ip, offset, size);
+			if (error)
+				ioend->io_error = error;
+		}
 		xfs_setfilesize(ioend);
 	}
 	xfs_destroy_ioend(ioend);
@@ -1532,9 +1536,9 @@
 	struct xfs_inode	*ip = XFS_I(inode);
 
 	xfs_itrace_entry(XFS_I(inode));
-	xfs_rwlock(ip, VRWLOCK_READ);
+	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 	xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
-	xfs_rwunlock(ip, VRWLOCK_READ);
+	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 	return generic_block_bmap(mapping, block, xfs_get_blocks);
 }
 
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index e347bfd..52f6846 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -400,7 +400,7 @@
 				printk(KERN_ERR
 					"XFS: possible memory allocation "
 					"deadlock in %s (mode:0x%x)\n",
-					__FUNCTION__, gfp_mask);
+					__func__, gfp_mask);
 
 			XFS_STATS_INC(xb_page_retries);
 			xfsbufd_wakeup(0, gfp_mask);
@@ -598,7 +598,7 @@
 		error = _xfs_buf_map_pages(bp, flags);
 		if (unlikely(error)) {
 			printk(KERN_WARNING "%s: failed to map pages\n",
-					__FUNCTION__);
+					__func__);
 			goto no_buffer;
 		}
 	}
@@ -778,7 +778,7 @@
 	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
 	if (unlikely(error)) {
 		printk(KERN_WARNING "%s: failed to map pages\n",
-				__FUNCTION__);
+				__func__);
 		goto fail_free_mem;
 	}
 
@@ -1060,7 +1060,7 @@
 		bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
 		bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
 		xfs_buf_delwri_queue(bp, 1);
-		return status;
+		return 0;
 	}
 
 	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index a3d207d..841d788 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -387,11 +387,15 @@
 	return error;
 }
 
-static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
+/*
+ * No error can be returned from xfs_buf_iostart for delwri
+ * buffers as they are queued and no I/O is issued.
+ */
+static inline void xfs_bdwrite(void *mp, xfs_buf_t *bp)
 {
 	bp->b_strat = xfs_bdstrat_cb;
 	bp->b_fspriv3 = mp;
-	return xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
+	(void)xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
 }
 
 #define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h
index e7f3da6..652721c 100644
--- a/fs/xfs/linux-2.6/xfs_cred.h
+++ b/fs/xfs/linux-2.6/xfs_cred.h
@@ -30,7 +30,7 @@
 extern struct cred *sys_cred;
 
 /* this is a hack.. (assumes sys_cred is the only cred_t in the system) */
-static __inline int capable_cred(cred_t *cr, int cid)
+static inline int capable_cred(cred_t *cr, int cid)
 {
 	return (cr == sys_cred) ? 1 : capable(cid);
 }
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index ca4f66c..265f016 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -22,6 +22,7 @@
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
+#include "xfs_dir2.h"
 #include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_export.h"
@@ -30,8 +31,6 @@
 #include "xfs_inode.h"
 #include "xfs_vfsops.h"
 
-static struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, };
-
 /*
  * Note that we only accept fileids which are long enough rather than allow
  * the parent generation number to default to zero.  XFS considers zero a
@@ -66,7 +65,7 @@
 	int			len;
 
 	/* Directories don't need their parent encoded, they have ".." */
-	if (S_ISDIR(inode->i_mode))
+	if (S_ISDIR(inode->i_mode) || !connectable)
 		fileid_type = FILEID_INO32_GEN;
 	else
 		fileid_type = FILEID_INO32_GEN_PARENT;
@@ -213,17 +212,16 @@
 	struct dentry		*child)
 {
 	int			error;
-	bhv_vnode_t		*cvp;
+	struct xfs_inode	*cip;
 	struct dentry		*parent;
 
-	cvp = NULL;
-	error = xfs_lookup(XFS_I(child->d_inode), &dotdot, &cvp);
+	error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip);
 	if (unlikely(error))
 		return ERR_PTR(-error);
 
-	parent = d_alloc_anon(vn_to_inode(cvp));
+	parent = d_alloc_anon(cip->i_vnode);
 	if (unlikely(!parent)) {
-		VN_RELE(cvp);
+		iput(cip->i_vnode);
 		return ERR_PTR(-ENOMEM);
 	}
 	return parent;
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index edab1ff..0590524 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -469,16 +469,11 @@
 	struct inode	*inode)
 {
 	struct xfs_mount *mp = XFS_M(inode->i_sb);
+	struct xfs_inode *ip = XFS_I(inode);
 
-	if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI)) {
-		if (DM_EVENT_ENABLED(XFS_I(inode), DM_EVENT_READ)) {
-			bhv_vnode_t *vp = vn_from_inode(inode);
-
-			return -XFS_SEND_DATA(mp, DM_EVENT_READ,
-						vp, 0, 0, 0, NULL);
-		}
-	}
-
+	if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI) &&
+	             DM_EVENT_ENABLED(ip, DM_EVENT_READ))
+		return -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL);
 	return 0;
 }
 #endif /* HAVE_FOP_OPEN_EXEC */
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index ac6d34c..1eefe61 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -17,18 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_vnodeops.h"
-
-/*
- * The following six includes are needed so that we can include
- * xfs_inode.h.  What a mess..
- */
 #include "xfs_bmap_btree.h"
-#include "xfs_inum.h"
-#include "xfs_dir2.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-
 #include "xfs_inode.h"
 
 int  fs_noerr(void) { return 0; }
@@ -42,11 +31,10 @@
 	xfs_off_t	last,
 	int		fiopt)
 {
-	bhv_vnode_t	*vp = XFS_ITOV(ip);
-	struct inode	*inode = vn_to_inode(vp);
+	struct address_space *mapping = ip->i_vnode->i_mapping;
 
-	if (VN_CACHED(vp))
-		truncate_inode_pages(inode->i_mapping, first);
+	if (mapping->nrpages)
+		truncate_inode_pages(mapping, first);
 }
 
 int
@@ -56,15 +44,14 @@
 	xfs_off_t	last,
 	int		fiopt)
 {
-	bhv_vnode_t	*vp = XFS_ITOV(ip);
-	struct inode	*inode = vn_to_inode(vp);
+	struct address_space *mapping = ip->i_vnode->i_mapping;
 	int		ret = 0;
 
-	if (VN_CACHED(vp)) {
+	if (mapping->nrpages) {
 		xfs_iflags_clear(ip, XFS_ITRUNCATED);
-		ret = filemap_write_and_wait(inode->i_mapping);
+		ret = filemap_write_and_wait(mapping);
 		if (!ret)
-			truncate_inode_pages(inode->i_mapping, first);
+			truncate_inode_pages(mapping, first);
 	}
 	return ret;
 }
@@ -77,17 +64,16 @@
 	uint64_t	flags,
 	int		fiopt)
 {
-	bhv_vnode_t	*vp = XFS_ITOV(ip);
-	struct inode	*inode = vn_to_inode(vp);
+	struct address_space *mapping = ip->i_vnode->i_mapping;
 	int		ret = 0;
 	int		ret2;
 
-	if (VN_DIRTY(vp)) {
+	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
 		xfs_iflags_clear(ip, XFS_ITRUNCATED);
-		ret = filemap_fdatawrite(inode->i_mapping);
+		ret = filemap_fdatawrite(mapping);
 		if (flags & XFS_B_ASYNC)
 			return ret;
-		ret2 = filemap_fdatawait(inode->i_mapping);
+		ret2 = filemap_fdatawait(mapping);
 		if (!ret)
 			ret = ret2;
 	}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index f34bd01..bf77597 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -651,314 +651,6 @@
 	return -error;
 }
 
-/* prototypes for a few of the stack-hungry cases that have
- * their own functions.  Functions are defined after their use
- * so gcc doesn't get fancy and inline them with -03 */
-
-STATIC int
-xfs_ioc_space(
-	struct xfs_inode	*ip,
-	struct inode		*inode,
-	struct file		*filp,
-	int			flags,
-	unsigned int		cmd,
-	void			__user *arg);
-
-STATIC int
-xfs_ioc_bulkstat(
-	xfs_mount_t		*mp,
-	unsigned int		cmd,
-	void			__user *arg);
-
-STATIC int
-xfs_ioc_fsgeometry_v1(
-	xfs_mount_t		*mp,
-	void			__user *arg);
-
-STATIC int
-xfs_ioc_fsgeometry(
-	xfs_mount_t		*mp,
-	void			__user *arg);
-
-STATIC int
-xfs_ioc_xattr(
-	xfs_inode_t		*ip,
-	struct file		*filp,
-	unsigned int		cmd,
-	void			__user *arg);
-
-STATIC int
-xfs_ioc_fsgetxattr(
-	xfs_inode_t		*ip,
-	int			attr,
-	void			__user *arg);
-
-STATIC int
-xfs_ioc_getbmap(
-	struct xfs_inode	*ip,
-	int			flags,
-	unsigned int		cmd,
-	void			__user *arg);
-
-STATIC int
-xfs_ioc_getbmapx(
-	struct xfs_inode	*ip,
-	void			__user *arg);
-
-int
-xfs_ioctl(
-	xfs_inode_t		*ip,
-	struct file		*filp,
-	int			ioflags,
-	unsigned int		cmd,
-	void			__user *arg)
-{
-	struct inode		*inode = filp->f_path.dentry->d_inode;
-	xfs_mount_t		*mp = ip->i_mount;
-	int			error;
-
-	xfs_itrace_entry(XFS_I(inode));
-	switch (cmd) {
-
-	case XFS_IOC_ALLOCSP:
-	case XFS_IOC_FREESP:
-	case XFS_IOC_RESVSP:
-	case XFS_IOC_UNRESVSP:
-	case XFS_IOC_ALLOCSP64:
-	case XFS_IOC_FREESP64:
-	case XFS_IOC_RESVSP64:
-	case XFS_IOC_UNRESVSP64:
-		/*
-		 * Only allow the sys admin to reserve space unless
-		 * unwritten extents are enabled.
-		 */
-		if (!xfs_sb_version_hasextflgbit(&mp->m_sb) &&
-		    !capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg);
-
-	case XFS_IOC_DIOINFO: {
-		struct dioattr	da;
-		xfs_buftarg_t	*target =
-			XFS_IS_REALTIME_INODE(ip) ?
-			mp->m_rtdev_targp : mp->m_ddev_targp;
-
-		da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
-		da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
-
-		if (copy_to_user(arg, &da, sizeof(da)))
-			return -XFS_ERROR(EFAULT);
-		return 0;
-	}
-
-	case XFS_IOC_FSBULKSTAT_SINGLE:
-	case XFS_IOC_FSBULKSTAT:
-	case XFS_IOC_FSINUMBERS:
-		return xfs_ioc_bulkstat(mp, cmd, arg);
-
-	case XFS_IOC_FSGEOMETRY_V1:
-		return xfs_ioc_fsgeometry_v1(mp, arg);
-
-	case XFS_IOC_FSGEOMETRY:
-		return xfs_ioc_fsgeometry(mp, arg);
-
-	case XFS_IOC_GETVERSION:
-		return put_user(inode->i_generation, (int __user *)arg);
-
-	case XFS_IOC_FSGETXATTR:
-		return xfs_ioc_fsgetxattr(ip, 0, arg);
-	case XFS_IOC_FSGETXATTRA:
-		return xfs_ioc_fsgetxattr(ip, 1, arg);
-	case XFS_IOC_GETXFLAGS:
-	case XFS_IOC_SETXFLAGS:
-	case XFS_IOC_FSSETXATTR:
-		return xfs_ioc_xattr(ip, filp, cmd, arg);
-
-	case XFS_IOC_FSSETDM: {
-		struct fsdmidata	dmi;
-
-		if (copy_from_user(&dmi, arg, sizeof(dmi)))
-			return -XFS_ERROR(EFAULT);
-
-		error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
-				dmi.fsd_dmstate);
-		return -error;
-	}
-
-	case XFS_IOC_GETBMAP:
-	case XFS_IOC_GETBMAPA:
-		return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
-
-	case XFS_IOC_GETBMAPX:
-		return xfs_ioc_getbmapx(ip, arg);
-
-	case XFS_IOC_FD_TO_HANDLE:
-	case XFS_IOC_PATH_TO_HANDLE:
-	case XFS_IOC_PATH_TO_FSHANDLE:
-		return xfs_find_handle(cmd, arg);
-
-	case XFS_IOC_OPEN_BY_HANDLE:
-		return xfs_open_by_handle(mp, arg, filp, inode);
-
-	case XFS_IOC_FSSETDM_BY_HANDLE:
-		return xfs_fssetdm_by_handle(mp, arg, inode);
-
-	case XFS_IOC_READLINK_BY_HANDLE:
-		return xfs_readlink_by_handle(mp, arg, inode);
-
-	case XFS_IOC_ATTRLIST_BY_HANDLE:
-		return xfs_attrlist_by_handle(mp, arg, inode);
-
-	case XFS_IOC_ATTRMULTI_BY_HANDLE:
-		return xfs_attrmulti_by_handle(mp, arg, inode);
-
-	case XFS_IOC_SWAPEXT: {
-		error = xfs_swapext((struct xfs_swapext __user *)arg);
-		return -error;
-	}
-
-	case XFS_IOC_FSCOUNTS: {
-		xfs_fsop_counts_t out;
-
-		error = xfs_fs_counts(mp, &out);
-		if (error)
-			return -error;
-
-		if (copy_to_user(arg, &out, sizeof(out)))
-			return -XFS_ERROR(EFAULT);
-		return 0;
-	}
-
-	case XFS_IOC_SET_RESBLKS: {
-		xfs_fsop_resblks_t inout;
-		__uint64_t	   in;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (copy_from_user(&inout, arg, sizeof(inout)))
-			return -XFS_ERROR(EFAULT);
-
-		/* input parameter is passed in resblks field of structure */
-		in = inout.resblks;
-		error = xfs_reserve_blocks(mp, &in, &inout);
-		if (error)
-			return -error;
-
-		if (copy_to_user(arg, &inout, sizeof(inout)))
-			return -XFS_ERROR(EFAULT);
-		return 0;
-	}
-
-	case XFS_IOC_GET_RESBLKS: {
-		xfs_fsop_resblks_t out;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		error = xfs_reserve_blocks(mp, NULL, &out);
-		if (error)
-			return -error;
-
-		if (copy_to_user(arg, &out, sizeof(out)))
-			return -XFS_ERROR(EFAULT);
-
-		return 0;
-	}
-
-	case XFS_IOC_FSGROWFSDATA: {
-		xfs_growfs_data_t in;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (copy_from_user(&in, arg, sizeof(in)))
-			return -XFS_ERROR(EFAULT);
-
-		error = xfs_growfs_data(mp, &in);
-		return -error;
-	}
-
-	case XFS_IOC_FSGROWFSLOG: {
-		xfs_growfs_log_t in;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (copy_from_user(&in, arg, sizeof(in)))
-			return -XFS_ERROR(EFAULT);
-
-		error = xfs_growfs_log(mp, &in);
-		return -error;
-	}
-
-	case XFS_IOC_FSGROWFSRT: {
-		xfs_growfs_rt_t in;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (copy_from_user(&in, arg, sizeof(in)))
-			return -XFS_ERROR(EFAULT);
-
-		error = xfs_growfs_rt(mp, &in);
-		return -error;
-	}
-
-	case XFS_IOC_FREEZE:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (inode->i_sb->s_frozen == SB_UNFROZEN)
-			freeze_bdev(inode->i_sb->s_bdev);
-		return 0;
-
-	case XFS_IOC_THAW:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-		if (inode->i_sb->s_frozen != SB_UNFROZEN)
-			thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
-		return 0;
-
-	case XFS_IOC_GOINGDOWN: {
-		__uint32_t in;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (get_user(in, (__uint32_t __user *)arg))
-			return -XFS_ERROR(EFAULT);
-
-		error = xfs_fs_goingdown(mp, in);
-		return -error;
-	}
-
-	case XFS_IOC_ERROR_INJECTION: {
-		xfs_error_injection_t in;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (copy_from_user(&in, arg, sizeof(in)))
-			return -XFS_ERROR(EFAULT);
-
-		error = xfs_errortag_add(in.errtag, mp);
-		return -error;
-	}
-
-	case XFS_IOC_ERROR_CLEARALL:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		error = xfs_errortag_clearall(mp, 1);
-		return -error;
-
-	default:
-		return -ENOTTY;
-	}
-}
-
 STATIC int
 xfs_ioc_space(
 	struct xfs_inode	*ip,
@@ -1179,85 +871,85 @@
 }
 
 STATIC int
-xfs_ioc_xattr(
+xfs_ioc_fssetxattr(
 	xfs_inode_t		*ip,
 	struct file		*filp,
-	unsigned int		cmd,
 	void			__user *arg)
 {
 	struct fsxattr		fa;
 	struct bhv_vattr	*vattr;
-	int			error = 0;
+	int			error;
 	int			attr_flags;
-	unsigned int		flags;
+
+	if (copy_from_user(&fa, arg, sizeof(fa)))
+		return -EFAULT;
 
 	vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
 	if (unlikely(!vattr))
 		return -ENOMEM;
 
-	switch (cmd) {
-	case XFS_IOC_FSSETXATTR: {
-		if (copy_from_user(&fa, arg, sizeof(fa))) {
-			error = -EFAULT;
-			break;
-		}
+	attr_flags = 0;
+	if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
+		attr_flags |= ATTR_NONBLOCK;
 
-		attr_flags = 0;
-		if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
-			attr_flags |= ATTR_NONBLOCK;
+	vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
+	vattr->va_xflags  = fa.fsx_xflags;
+	vattr->va_extsize = fa.fsx_extsize;
+	vattr->va_projid  = fa.fsx_projid;
 
-		vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
-		vattr->va_xflags  = fa.fsx_xflags;
-		vattr->va_extsize = fa.fsx_extsize;
-		vattr->va_projid  = fa.fsx_projid;
+	error = -xfs_setattr(ip, vattr, attr_flags, NULL);
+	if (!error)
+		vn_revalidate(XFS_ITOV(ip));	/* update flags */
+	kfree(vattr);
+	return 0;
+}
 
-		error = xfs_setattr(ip, vattr, attr_flags, NULL);
-		if (likely(!error))
-			vn_revalidate(XFS_ITOV(ip));	/* update flags */
-		error = -error;
-		break;
-	}
+STATIC int
+xfs_ioc_getxflags(
+	xfs_inode_t		*ip,
+	void			__user *arg)
+{
+	unsigned int		flags;
 
-	case XFS_IOC_GETXFLAGS: {
-		flags = xfs_di2lxflags(ip->i_d.di_flags);
-		if (copy_to_user(arg, &flags, sizeof(flags)))
-			error = -EFAULT;
-		break;
-	}
+	flags = xfs_di2lxflags(ip->i_d.di_flags);
+	if (copy_to_user(arg, &flags, sizeof(flags)))
+		return -EFAULT;
+	return 0;
+}
 
-	case XFS_IOC_SETXFLAGS: {
-		if (copy_from_user(&flags, arg, sizeof(flags))) {
-			error = -EFAULT;
-			break;
-		}
+STATIC int
+xfs_ioc_setxflags(
+	xfs_inode_t		*ip,
+	struct file		*filp,
+	void			__user *arg)
+{
+	struct bhv_vattr	*vattr;
+	unsigned int		flags;
+	int			attr_flags;
+	int			error;
 
-		if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
-			      FS_NOATIME_FL | FS_NODUMP_FL | \
-			      FS_SYNC_FL)) {
-			error = -EOPNOTSUPP;
-			break;
-		}
+	if (copy_from_user(&flags, arg, sizeof(flags)))
+		return -EFAULT;
 
-		attr_flags = 0;
-		if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
-			attr_flags |= ATTR_NONBLOCK;
+	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
+		      FS_NOATIME_FL | FS_NODUMP_FL | \
+		      FS_SYNC_FL))
+		return -EOPNOTSUPP;
 
-		vattr->va_mask = XFS_AT_XFLAGS;
-		vattr->va_xflags = xfs_merge_ioc_xflags(flags,
-							xfs_ip2xflags(ip));
+	vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
+	if (unlikely(!vattr))
+		return -ENOMEM;
 
-		error = xfs_setattr(ip, vattr, attr_flags, NULL);
-		if (likely(!error))
-			vn_revalidate(XFS_ITOV(ip));	/* update flags */
-		error = -error;
-		break;
-	}
+	attr_flags = 0;
+	if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
+		attr_flags |= ATTR_NONBLOCK;
 
-	default:
-		error = -ENOTTY;
-		break;
-	}
+	vattr->va_mask = XFS_AT_XFLAGS;
+	vattr->va_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
 
+	error = -xfs_setattr(ip, vattr, attr_flags, NULL);
+	if (likely(!error))
+		vn_revalidate(XFS_ITOV(ip));	/* update flags */
 	kfree(vattr);
 	return error;
 }
@@ -1332,3 +1024,259 @@
 
 	return 0;
 }
+
+int
+xfs_ioctl(
+	xfs_inode_t		*ip,
+	struct file		*filp,
+	int			ioflags,
+	unsigned int		cmd,
+	void			__user *arg)
+{
+	struct inode		*inode = filp->f_path.dentry->d_inode;
+	xfs_mount_t		*mp = ip->i_mount;
+	int			error;
+
+	xfs_itrace_entry(XFS_I(inode));
+	switch (cmd) {
+
+	case XFS_IOC_ALLOCSP:
+	case XFS_IOC_FREESP:
+	case XFS_IOC_RESVSP:
+	case XFS_IOC_UNRESVSP:
+	case XFS_IOC_ALLOCSP64:
+	case XFS_IOC_FREESP64:
+	case XFS_IOC_RESVSP64:
+	case XFS_IOC_UNRESVSP64:
+		/*
+		 * Only allow the sys admin to reserve space unless
+		 * unwritten extents are enabled.
+		 */
+		if (!xfs_sb_version_hasextflgbit(&mp->m_sb) &&
+		    !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg);
+
+	case XFS_IOC_DIOINFO: {
+		struct dioattr	da;
+		xfs_buftarg_t	*target =
+			XFS_IS_REALTIME_INODE(ip) ?
+			mp->m_rtdev_targp : mp->m_ddev_targp;
+
+		da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
+		da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
+
+		if (copy_to_user(arg, &da, sizeof(da)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	case XFS_IOC_FSBULKSTAT_SINGLE:
+	case XFS_IOC_FSBULKSTAT:
+	case XFS_IOC_FSINUMBERS:
+		return xfs_ioc_bulkstat(mp, cmd, arg);
+
+	case XFS_IOC_FSGEOMETRY_V1:
+		return xfs_ioc_fsgeometry_v1(mp, arg);
+
+	case XFS_IOC_FSGEOMETRY:
+		return xfs_ioc_fsgeometry(mp, arg);
+
+	case XFS_IOC_GETVERSION:
+		return put_user(inode->i_generation, (int __user *)arg);
+
+	case XFS_IOC_FSGETXATTR:
+		return xfs_ioc_fsgetxattr(ip, 0, arg);
+	case XFS_IOC_FSGETXATTRA:
+		return xfs_ioc_fsgetxattr(ip, 1, arg);
+	case XFS_IOC_FSSETXATTR:
+		return xfs_ioc_fssetxattr(ip, filp, arg);
+	case XFS_IOC_GETXFLAGS:
+		return xfs_ioc_getxflags(ip, arg);
+	case XFS_IOC_SETXFLAGS:
+		return xfs_ioc_setxflags(ip, filp, arg);
+
+	case XFS_IOC_FSSETDM: {
+		struct fsdmidata	dmi;
+
+		if (copy_from_user(&dmi, arg, sizeof(dmi)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
+				dmi.fsd_dmstate);
+		return -error;
+	}
+
+	case XFS_IOC_GETBMAP:
+	case XFS_IOC_GETBMAPA:
+		return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
+
+	case XFS_IOC_GETBMAPX:
+		return xfs_ioc_getbmapx(ip, arg);
+
+	case XFS_IOC_FD_TO_HANDLE:
+	case XFS_IOC_PATH_TO_HANDLE:
+	case XFS_IOC_PATH_TO_FSHANDLE:
+		return xfs_find_handle(cmd, arg);
+
+	case XFS_IOC_OPEN_BY_HANDLE:
+		return xfs_open_by_handle(mp, arg, filp, inode);
+
+	case XFS_IOC_FSSETDM_BY_HANDLE:
+		return xfs_fssetdm_by_handle(mp, arg, inode);
+
+	case XFS_IOC_READLINK_BY_HANDLE:
+		return xfs_readlink_by_handle(mp, arg, inode);
+
+	case XFS_IOC_ATTRLIST_BY_HANDLE:
+		return xfs_attrlist_by_handle(mp, arg, inode);
+
+	case XFS_IOC_ATTRMULTI_BY_HANDLE:
+		return xfs_attrmulti_by_handle(mp, arg, inode);
+
+	case XFS_IOC_SWAPEXT: {
+		error = xfs_swapext((struct xfs_swapext __user *)arg);
+		return -error;
+	}
+
+	case XFS_IOC_FSCOUNTS: {
+		xfs_fsop_counts_t out;
+
+		error = xfs_fs_counts(mp, &out);
+		if (error)
+			return -error;
+
+		if (copy_to_user(arg, &out, sizeof(out)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	case XFS_IOC_SET_RESBLKS: {
+		xfs_fsop_resblks_t inout;
+		__uint64_t	   in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&inout, arg, sizeof(inout)))
+			return -XFS_ERROR(EFAULT);
+
+		/* input parameter is passed in resblks field of structure */
+		in = inout.resblks;
+		error = xfs_reserve_blocks(mp, &in, &inout);
+		if (error)
+			return -error;
+
+		if (copy_to_user(arg, &inout, sizeof(inout)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	case XFS_IOC_GET_RESBLKS: {
+		xfs_fsop_resblks_t out;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		error = xfs_reserve_blocks(mp, NULL, &out);
+		if (error)
+			return -error;
+
+		if (copy_to_user(arg, &out, sizeof(out)))
+			return -XFS_ERROR(EFAULT);
+
+		return 0;
+	}
+
+	case XFS_IOC_FSGROWFSDATA: {
+		xfs_growfs_data_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&in, arg, sizeof(in)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_growfs_data(mp, &in);
+		return -error;
+	}
+
+	case XFS_IOC_FSGROWFSLOG: {
+		xfs_growfs_log_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&in, arg, sizeof(in)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_growfs_log(mp, &in);
+		return -error;
+	}
+
+	case XFS_IOC_FSGROWFSRT: {
+		xfs_growfs_rt_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&in, arg, sizeof(in)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_growfs_rt(mp, &in);
+		return -error;
+	}
+
+	case XFS_IOC_FREEZE:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (inode->i_sb->s_frozen == SB_UNFROZEN)
+			freeze_bdev(inode->i_sb->s_bdev);
+		return 0;
+
+	case XFS_IOC_THAW:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		if (inode->i_sb->s_frozen != SB_UNFROZEN)
+			thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
+		return 0;
+
+	case XFS_IOC_GOINGDOWN: {
+		__uint32_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (get_user(in, (__uint32_t __user *)arg))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_fs_goingdown(mp, in);
+		return -error;
+	}
+
+	case XFS_IOC_ERROR_INJECTION: {
+		xfs_error_injection_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&in, arg, sizeof(in)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_errortag_add(in.errtag, mp);
+		return -error;
+	}
+
+	case XFS_IOC_ERROR_CLEARALL:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		error = xfs_errortag_clearall(mp, 1);
+		return -error;
+
+	default:
+		return -ENOTTY;
+	}
+}
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index cc4abd3..0c958cf 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -62,12 +62,11 @@
 xfs_synchronize_atime(
 	xfs_inode_t	*ip)
 {
-	bhv_vnode_t	*vp;
+	struct inode	*inode = ip->i_vnode;
 
-	vp = XFS_ITOV_NULL(ip);
-	if (vp) {
-		ip->i_d.di_atime.t_sec = (__int32_t)vp->i_atime.tv_sec;
-		ip->i_d.di_atime.t_nsec = (__int32_t)vp->i_atime.tv_nsec;
+	if (inode) {
+		ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
+		ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
 	}
 }
 
@@ -80,11 +79,10 @@
 xfs_mark_inode_dirty_sync(
 	xfs_inode_t	*ip)
 {
-	bhv_vnode_t	*vp;
+	struct inode	*inode = ip->i_vnode;
 
-	vp = XFS_ITOV_NULL(ip);
-	if (vp)
-		mark_inode_dirty_sync(vn_to_inode(vp));
+	if (inode)
+		mark_inode_dirty_sync(inode);
 }
 
 /*
@@ -215,66 +213,62 @@
  */
 STATIC int
 xfs_init_security(
-	bhv_vnode_t	*vp,
+	struct inode	*inode,
 	struct inode	*dir)
 {
-	struct inode	*ip = vn_to_inode(vp);
+	struct xfs_inode *ip = XFS_I(inode);
 	size_t		length;
 	void		*value;
 	char		*name;
 	int		error;
 
-	error = security_inode_init_security(ip, dir, &name, &value, &length);
+	error = security_inode_init_security(inode, dir, &name,
+					     &value, &length);
 	if (error) {
 		if (error == -EOPNOTSUPP)
 			return 0;
 		return -error;
 	}
 
-	error = xfs_attr_set(XFS_I(ip), name, value,
-			length, ATTR_SECURE);
+	error = xfs_attr_set(ip, name, value, length, ATTR_SECURE);
 	if (!error)
-		xfs_iflags_set(XFS_I(ip), XFS_IMODIFIED);
+		xfs_iflags_set(ip, XFS_IMODIFIED);
 
 	kfree(name);
 	kfree(value);
 	return error;
 }
 
-/*
- * Determine whether a process has a valid fs_struct (kernel daemons
- * like knfsd don't have an fs_struct).
- *
- * XXX(hch):  nfsd is broken, better fix it instead.
- */
-STATIC_INLINE int
-xfs_has_fs_struct(struct task_struct *task)
+static void
+xfs_dentry_to_name(
+	struct xfs_name	*namep,
+	struct dentry	*dentry)
 {
-	return (task->fs != init_task.fs);
+	namep->name = dentry->d_name.name;
+	namep->len = dentry->d_name.len;
 }
 
 STATIC void
 xfs_cleanup_inode(
 	struct inode	*dir,
-	bhv_vnode_t	*vp,
+	struct inode	*inode,
 	struct dentry	*dentry,
 	int		mode)
 {
-	struct dentry   teardown = {};
+	struct xfs_name	teardown;
 
 	/* Oh, the horror.
 	 * If we can't add the ACL or we fail in
 	 * xfs_init_security we must back out.
 	 * ENOSPC can hit here, among other things.
 	 */
-	teardown.d_inode = vn_to_inode(vp);
-	teardown.d_name = dentry->d_name;
+	xfs_dentry_to_name(&teardown, dentry);
 
 	if (S_ISDIR(mode))
-		xfs_rmdir(XFS_I(dir), &teardown);
+		xfs_rmdir(XFS_I(dir), &teardown, XFS_I(inode));
 	else
-		xfs_remove(XFS_I(dir), &teardown);
-	VN_RELE(vp);
+		xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
+	iput(inode);
 }
 
 STATIC int
@@ -284,9 +278,10 @@
 	int		mode,
 	dev_t		rdev)
 {
-	struct inode	*ip;
-	bhv_vnode_t	*vp = NULL, *dvp = vn_from_inode(dir);
+	struct inode	*inode;
+	struct xfs_inode *ip = NULL;
 	xfs_acl_t	*default_acl = NULL;
+	struct xfs_name	name;
 	attrexists_t	test_default_acl = _ACL_DEFAULT_EXISTS;
 	int		error;
 
@@ -297,59 +292,67 @@
 	if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
 		return -EINVAL;
 
-	if (unlikely(test_default_acl && test_default_acl(dvp))) {
+	if (test_default_acl && test_default_acl(dir)) {
 		if (!_ACL_ALLOC(default_acl)) {
 			return -ENOMEM;
 		}
-		if (!_ACL_GET_DEFAULT(dvp, default_acl)) {
+		if (!_ACL_GET_DEFAULT(dir, default_acl)) {
 			_ACL_FREE(default_acl);
 			default_acl = NULL;
 		}
 	}
 
-	if (IS_POSIXACL(dir) && !default_acl && xfs_has_fs_struct(current))
+	xfs_dentry_to_name(&name, dentry);
+
+	if (IS_POSIXACL(dir) && !default_acl)
 		mode &= ~current->fs->umask;
 
 	switch (mode & S_IFMT) {
-	case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK:
+	case S_IFCHR:
+	case S_IFBLK:
+	case S_IFIFO:
+	case S_IFSOCK:
 		rdev = sysv_encode_dev(rdev);
 	case S_IFREG:
-		error = xfs_create(XFS_I(dir), dentry, mode, rdev, &vp, NULL);
+		error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL);
 		break;
 	case S_IFDIR:
-		error = xfs_mkdir(XFS_I(dir), dentry, mode, &vp, NULL);
+		error = xfs_mkdir(XFS_I(dir), &name, mode, &ip, NULL);
 		break;
 	default:
 		error = EINVAL;
 		break;
 	}
 
-	if (unlikely(!error)) {
-		error = xfs_init_security(vp, dir);
-		if (error)
-			xfs_cleanup_inode(dir, vp, dentry, mode);
-	}
+	if (unlikely(error))
+		goto out_free_acl;
 
-	if (unlikely(default_acl)) {
-		if (!error) {
-			error = _ACL_INHERIT(vp, mode, default_acl);
-			if (!error)
-				xfs_iflags_set(XFS_I(vp), XFS_IMODIFIED);
-			else
-				xfs_cleanup_inode(dir, vp, dentry, mode);
-		}
+	inode = ip->i_vnode;
+
+	error = xfs_init_security(inode, dir);
+	if (unlikely(error))
+		goto out_cleanup_inode;
+
+	if (default_acl) {
+		error = _ACL_INHERIT(inode, mode, default_acl);
+		if (unlikely(error))
+			goto out_cleanup_inode;
+		xfs_iflags_set(ip, XFS_IMODIFIED);
 		_ACL_FREE(default_acl);
 	}
 
-	if (likely(!error)) {
-		ASSERT(vp);
-		ip = vn_to_inode(vp);
 
-		if (S_ISDIR(mode))
-			xfs_validate_fields(ip);
-		d_instantiate(dentry, ip);
-		xfs_validate_fields(dir);
-	}
+	if (S_ISDIR(mode))
+		xfs_validate_fields(inode);
+	d_instantiate(dentry, inode);
+	xfs_validate_fields(dir);
+	return -error;
+
+ out_cleanup_inode:
+	xfs_cleanup_inode(dir, inode, dentry, mode);
+ out_free_acl:
+	if (default_acl)
+		_ACL_FREE(default_acl);
 	return -error;
 }
 
@@ -378,13 +381,15 @@
 	struct dentry	*dentry,
 	struct nameidata *nd)
 {
-	bhv_vnode_t	*cvp;
+	struct xfs_inode *cip;
+	struct xfs_name	name;
 	int		error;
 
 	if (dentry->d_name.len >= MAXNAMELEN)
 		return ERR_PTR(-ENAMETOOLONG);
 
-	error = xfs_lookup(XFS_I(dir), dentry, &cvp);
+	xfs_dentry_to_name(&name, dentry);
+	error = xfs_lookup(XFS_I(dir), &name, &cip);
 	if (unlikely(error)) {
 		if (unlikely(error != ENOENT))
 			return ERR_PTR(-error);
@@ -392,7 +397,7 @@
 		return NULL;
 	}
 
-	return d_splice_alias(vn_to_inode(cvp), dentry);
+	return d_splice_alias(cip->i_vnode, dentry);
 }
 
 STATIC int
@@ -401,23 +406,24 @@
 	struct inode	*dir,
 	struct dentry	*dentry)
 {
-	struct inode	*ip;	/* inode of guy being linked to */
-	bhv_vnode_t	*vp;	/* vp of name being linked */
+	struct inode	*inode;	/* inode of guy being linked to */
+	struct xfs_name	name;
 	int		error;
 
-	ip = old_dentry->d_inode;	/* inode being linked to */
-	vp = vn_from_inode(ip);
+	inode = old_dentry->d_inode;
+	xfs_dentry_to_name(&name, dentry);
 
-	VN_HOLD(vp);
-	error = xfs_link(XFS_I(dir), vp, dentry);
+	igrab(inode);
+	error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
 	if (unlikely(error)) {
-		VN_RELE(vp);
-	} else {
-		xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED);
-		xfs_validate_fields(ip);
-		d_instantiate(dentry, ip);
+		iput(inode);
+		return -error;
 	}
-	return -error;
+
+	xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED);
+	xfs_validate_fields(inode);
+	d_instantiate(dentry, inode);
+	return 0;
 }
 
 STATIC int
@@ -426,11 +432,13 @@
 	struct dentry	*dentry)
 {
 	struct inode	*inode;
+	struct xfs_name	name;
 	int		error;
 
 	inode = dentry->d_inode;
+	xfs_dentry_to_name(&name, dentry);
 
-	error = xfs_remove(XFS_I(dir), dentry);
+	error = xfs_remove(XFS_I(dir), &name, XFS_I(inode));
 	if (likely(!error)) {
 		xfs_validate_fields(dir);	/* size needs update */
 		xfs_validate_fields(inode);
@@ -444,29 +452,34 @@
 	struct dentry	*dentry,
 	const char	*symname)
 {
-	struct inode	*ip;
-	bhv_vnode_t	*cvp;	/* used to lookup symlink to put in dentry */
+	struct inode	*inode;
+	struct xfs_inode *cip = NULL;
+	struct xfs_name	name;
 	int		error;
 	mode_t		mode;
 
-	cvp = NULL;
-
 	mode = S_IFLNK |
 		(irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO);
+	xfs_dentry_to_name(&name, dentry);
 
-	error = xfs_symlink(XFS_I(dir), dentry, (char *)symname, mode,
-			    &cvp, NULL);
-	if (likely(!error && cvp)) {
-		error = xfs_init_security(cvp, dir);
-		if (likely(!error)) {
-			ip = vn_to_inode(cvp);
-			d_instantiate(dentry, ip);
-			xfs_validate_fields(dir);
-			xfs_validate_fields(ip);
-		} else {
-			xfs_cleanup_inode(dir, cvp, dentry, 0);
-		}
-	}
+	error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip, NULL);
+	if (unlikely(error))
+		goto out;
+
+	inode = cip->i_vnode;
+
+	error = xfs_init_security(inode, dir);
+	if (unlikely(error))
+		goto out_cleanup_inode;
+
+	d_instantiate(dentry, inode);
+	xfs_validate_fields(dir);
+	xfs_validate_fields(inode);
+	return 0;
+
+ out_cleanup_inode:
+	xfs_cleanup_inode(dir, inode, dentry, 0);
+ out:
 	return -error;
 }
 
@@ -476,9 +489,12 @@
 	struct dentry	*dentry)
 {
 	struct inode	*inode = dentry->d_inode;
+	struct xfs_name	name;
 	int		error;
 
-	error = xfs_rmdir(XFS_I(dir), dentry);
+	xfs_dentry_to_name(&name, dentry);
+
+	error = xfs_rmdir(XFS_I(dir), &name, XFS_I(inode));
 	if (likely(!error)) {
 		xfs_validate_fields(inode);
 		xfs_validate_fields(dir);
@@ -494,12 +510,15 @@
 	struct dentry	*ndentry)
 {
 	struct inode	*new_inode = ndentry->d_inode;
-	bhv_vnode_t	*tvp;	/* target directory */
+	struct xfs_name	oname;
+	struct xfs_name	nname;
 	int		error;
 
-	tvp = vn_from_inode(ndir);
+	xfs_dentry_to_name(&oname, odentry);
+	xfs_dentry_to_name(&nname, ndentry);
 
-	error = xfs_rename(XFS_I(odir), odentry, tvp, ndentry);
+	error = xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
+							XFS_I(ndir), &nname);
 	if (likely(!error)) {
 		if (new_inode)
 			xfs_validate_fields(new_inode);
@@ -700,11 +719,19 @@
 	return -error;
 }
 
+/*
+ * block_truncate_page can return an error, but we can't propagate it
+ * at all here. Leave a complaint + stack trace in the syslog because
+ * this could be bad. If it is bad, we need to propagate the error further.
+ */
 STATIC void
 xfs_vn_truncate(
 	struct inode	*inode)
 {
-	block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_blocks);
+	int	error;
+	error = block_truncate_page(inode->i_mapping, inode->i_size,
+							xfs_get_blocks);
+	WARN_ON(error);
 }
 
 STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 3ca39c4..e514332 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -99,7 +99,6 @@
 /*
  * Feature macros (disable/enable)
  */
-#undef  HAVE_REFCACHE	/* reference cache not needed for NFS in 2.6 */
 #define HAVE_SPLICE	/* a splice(2) exists in 2.6, but not in 2.4 */
 #ifdef CONFIG_SMP
 #define HAVE_PERCPU_SB	/* per cpu superblock counters are a 2.6 feature */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 1663533..21c0dbc 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -176,7 +176,6 @@
 {
 	struct file		*file = iocb->ki_filp;
 	struct inode		*inode = file->f_mapping->host;
-	bhv_vnode_t		*vp = XFS_ITOV(ip);
 	xfs_mount_t		*mp = ip->i_mount;
 	size_t			size = 0;
 	ssize_t			ret = 0;
@@ -228,11 +227,11 @@
 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 
 	if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
-		bhv_vrwlock_t locktype = VRWLOCK_READ;
 		int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
+		int iolock = XFS_IOLOCK_SHARED;
 
-		ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp, *offset, size,
-					dmflags, &locktype);
+		ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
+					dmflags, &iolock);
 		if (ret) {
 			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 			if (unlikely(ioflags & IO_ISDIRECT))
@@ -242,7 +241,7 @@
 	}
 
 	if (unlikely(ioflags & IO_ISDIRECT)) {
-		if (VN_CACHED(vp))
+		if (inode->i_mapping->nrpages)
 			ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
 						    -1, FI_REMAPF_LOCKED);
 		mutex_unlock(&inode->i_mutex);
@@ -276,7 +275,6 @@
 	int			flags,
 	int			ioflags)
 {
-	bhv_vnode_t		*vp = XFS_ITOV(ip);
 	xfs_mount_t		*mp = ip->i_mount;
 	ssize_t			ret;
 
@@ -287,11 +285,11 @@
 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 
 	if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
-		bhv_vrwlock_t locktype = VRWLOCK_READ;
+		int iolock = XFS_IOLOCK_SHARED;
 		int error;
 
-		error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, *ppos, count,
-					FILP_DELAY_FLAG(infilp), &locktype);
+		error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
+					FILP_DELAY_FLAG(infilp), &iolock);
 		if (error) {
 			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 			return -error;
@@ -317,7 +315,6 @@
 	int			flags,
 	int			ioflags)
 {
-	bhv_vnode_t		*vp = XFS_ITOV(ip);
 	xfs_mount_t		*mp = ip->i_mount;
 	ssize_t			ret;
 	struct inode		*inode = outfilp->f_mapping->host;
@@ -330,11 +327,11 @@
 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
 
 	if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
-		bhv_vrwlock_t locktype = VRWLOCK_WRITE;
+		int iolock = XFS_IOLOCK_EXCL;
 		int error;
 
-		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, vp, *ppos, count,
-					FILP_DELAY_FLAG(outfilp), &locktype);
+		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
+					FILP_DELAY_FLAG(outfilp), &iolock);
 		if (error) {
 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 			return -error;
@@ -573,14 +570,12 @@
 	struct file		*file = iocb->ki_filp;
 	struct address_space	*mapping = file->f_mapping;
 	struct inode		*inode = mapping->host;
-	bhv_vnode_t		*vp = XFS_ITOV(xip);
 	unsigned long		segs = nsegs;
 	xfs_mount_t		*mp;
 	ssize_t			ret = 0, error = 0;
 	xfs_fsize_t		isize, new_size;
 	int			iolock;
 	int			eventsent = 0;
-	bhv_vrwlock_t		locktype;
 	size_t			ocount = 0, count;
 	loff_t			pos;
 	int			need_i_mutex;
@@ -607,11 +602,9 @@
 relock:
 	if (ioflags & IO_ISDIRECT) {
 		iolock = XFS_IOLOCK_SHARED;
-		locktype = VRWLOCK_WRITE_DIRECT;
 		need_i_mutex = 0;
 	} else {
 		iolock = XFS_IOLOCK_EXCL;
-		locktype = VRWLOCK_WRITE;
 		need_i_mutex = 1;
 		mutex_lock(&inode->i_mutex);
 	}
@@ -634,9 +627,8 @@
 			dmflags |= DM_FLAGS_IMUX;
 
 		xfs_iunlock(xip, XFS_ILOCK_EXCL);
-		error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
-				      pos, count,
-				      dmflags, &locktype);
+		error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
+				      pos, count, dmflags, &iolock);
 		if (error) {
 			goto out_unlock_internal;
 		}
@@ -664,10 +656,9 @@
 			return XFS_ERROR(-EINVAL);
 		}
 
-		if (!need_i_mutex && (VN_CACHED(vp) || pos > xip->i_size)) {
+		if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
 			xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
 			iolock = XFS_IOLOCK_EXCL;
-			locktype = VRWLOCK_WRITE;
 			need_i_mutex = 1;
 			mutex_lock(&inode->i_mutex);
 			xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
@@ -727,7 +718,7 @@
 	current->backing_dev_info = mapping->backing_dev_info;
 
 	if ((ioflags & IO_ISDIRECT)) {
-		if (VN_CACHED(vp)) {
+		if (mapping->nrpages) {
 			WARN_ON(need_i_mutex == 0);
 			xfs_inval_cached_trace(xip, pos, -1,
 					(pos & PAGE_CACHE_MASK), -1);
@@ -744,7 +735,6 @@
 			mutex_unlock(&inode->i_mutex);
 
 			iolock = XFS_IOLOCK_SHARED;
-			locktype = VRWLOCK_WRITE_DIRECT;
 			need_i_mutex = 0;
 		}
 
@@ -781,15 +771,15 @@
 
 	if (ret == -ENOSPC &&
 	    DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
-		xfs_rwunlock(xip, locktype);
+		xfs_iunlock(xip, iolock);
 		if (need_i_mutex)
 			mutex_unlock(&inode->i_mutex);
-		error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
-				DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
+		error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
+				DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
 				0, 0, 0); /* Delay flag intentionally  unused */
 		if (need_i_mutex)
 			mutex_lock(&inode->i_mutex);
-		xfs_rwlock(xip, locktype);
+		xfs_ilock(xip, iolock);
 		if (error)
 			goto out_unlock_internal;
 		pos = xip->i_size;
@@ -817,7 +807,8 @@
 	/* Handle various SYNC-type writes */
 	if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
 		int error2;
-		xfs_rwunlock(xip, locktype);
+
+		xfs_iunlock(xip, iolock);
 		if (need_i_mutex)
 			mutex_unlock(&inode->i_mutex);
 		error2 = sync_page_range(inode, mapping, pos, ret);
@@ -825,7 +816,7 @@
 			error = error2;
 		if (need_i_mutex)
 			mutex_lock(&inode->i_mutex);
-		xfs_rwlock(xip, locktype);
+		xfs_ilock(xip, iolock);
 		error2 = xfs_write_sync_logforce(mp, xip);
 		if (!error)
 			error = error2;
@@ -846,7 +837,7 @@
 			xip->i_d.di_size = xip->i_size;
 		xfs_iunlock(xip, XFS_ILOCK_EXCL);
 	}
-	xfs_rwunlock(xip, locktype);
+	xfs_iunlock(xip, iolock);
  out_unlock_mutex:
 	if (need_i_mutex)
 		mutex_unlock(&inode->i_mutex);
@@ -884,28 +875,23 @@
 }
 
 /*
- * Wrapper around bdstrat so that we can stop data
- * from going to disk in case we are shutting down the filesystem.
- * Typically user data goes thru this path; one of the exceptions
- * is the superblock.
+ * Wrapper around bdstrat so that we can stop data from going to disk in case
+ * we are shutting down the filesystem.  Typically user data goes thru this
+ * path; one of the exceptions is the superblock.
  */
-int
+void
 xfsbdstrat(
 	struct xfs_mount	*mp,
 	struct xfs_buf		*bp)
 {
 	ASSERT(mp);
 	if (!XFS_FORCED_SHUTDOWN(mp)) {
-		/* Grio redirection would go here
-		 * if (XFS_BUF_IS_GRIO(bp)) {
-		 */
-
 		xfs_buf_iorequest(bp);
-		return 0;
+		return;
 	}
 
 	xfs_buftrace("XFSBDSTRAT IOERROR", bp);
-	return (xfs_bioerror_relse(bp));
+	xfs_bioerror_relse(bp);
 }
 
 /*
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index e200253..e1d498b 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -68,7 +68,8 @@
 #define xfs_inval_cached_trace(ip, offset, len, first, last)
 #endif
 
-extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
+/* errors from xfsbdstrat() must be extracted from the buffer */
+extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
 extern int xfs_bdstrat_cb(struct xfs_buf *);
 extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
 
diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/linux-2.6/xfs_stats.h
index 8ba7a2f..afd0b0d 100644
--- a/fs/xfs/linux-2.6/xfs_stats.h
+++ b/fs/xfs/linux-2.6/xfs_stats.h
@@ -144,8 +144,8 @@
 # define XFS_STATS_DEC(count)
 # define XFS_STATS_ADD(count, inc)
 
-static __inline void xfs_init_procfs(void) { };
-static __inline void xfs_cleanup_procfs(void) { };
+static inline void xfs_init_procfs(void) { };
+static inline void xfs_cleanup_procfs(void) { };
 
 #endif	/* !CONFIG_PROC_FS */
 
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 8831d95..865eb70 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -896,7 +896,8 @@
 	struct inode		*inode,
 	int			sync)
 {
-	int			error = 0, flags = FLUSH_INODE;
+	int			error = 0;
+	int			flags = 0;
 
 	xfs_itrace_entry(XFS_I(inode));
 	if (sync) {
@@ -934,7 +935,7 @@
 		xfs_inactive(ip);
 		xfs_iflags_clear(ip, XFS_IMODIFIED);
 		if (xfs_reclaim(ip))
-			panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, inode);
+			panic("%s: cannot reclaim 0x%p\n", __func__, inode);
 	}
 
 	ASSERT(XFS_I(inode) == NULL);
@@ -1027,8 +1028,7 @@
 	int		error;
 
 	if (!(mp->m_flags & XFS_MOUNT_RDONLY))
-		error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR |
-				     SYNC_REFCACHE | SYNC_SUPER);
+		error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR);
 	mp->m_sync_seq++;
 	wake_up(&mp->m_wait_single_sync_task);
 }
@@ -1306,7 +1306,7 @@
 	void			*data,
 	int			silent)
 {
-	struct inode		*rootvp;
+	struct inode		*root;
 	struct xfs_mount	*mp = NULL;
 	struct xfs_mount_args	*args = xfs_args_allocate(sb, silent);
 	int			error;
@@ -1344,19 +1344,18 @@
 	sb->s_time_gran = 1;
 	set_posix_acl_flag(sb);
 
-	rootvp = igrab(mp->m_rootip->i_vnode);
-	if (!rootvp) {
+	root = igrab(mp->m_rootip->i_vnode);
+	if (!root) {
 		error = ENOENT;
 		goto fail_unmount;
 	}
-
-	sb->s_root = d_alloc_root(vn_to_inode(rootvp));
-	if (!sb->s_root) {
-		error = ENOMEM;
+	if (is_bad_inode(root)) {
+		error = EINVAL;
 		goto fail_vnrele;
 	}
-	if (is_bad_inode(sb->s_root->d_inode)) {
-		error = EINVAL;
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root) {
+		error = ENOMEM;
 		goto fail_vnrele;
 	}
 
@@ -1378,7 +1377,7 @@
 		dput(sb->s_root);
 		sb->s_root = NULL;
 	} else {
-		VN_RELE(rootvp);
+		iput(root);
 	}
 
 fail_unmount:
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 3efcf45..3efb7c6 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -50,13 +50,7 @@
 # define set_posix_acl_flag(sb)	do { } while (0)
 #endif
 
-#ifdef CONFIG_XFS_SECURITY
-# define XFS_SECURITY_STRING	"security attributes, "
-# define ENOSECURITY		0
-#else
-# define XFS_SECURITY_STRING
-# define ENOSECURITY		EOPNOTSUPP
-#endif
+#define XFS_SECURITY_STRING	"security attributes, "
 
 #ifdef CONFIG_XFS_RT
 # define XFS_REALTIME_STRING	"realtime, "
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
index 4da03a4..7e60c77 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -49,7 +49,6 @@
 #define SYNC_REFCACHE		0x0040  /* prune some of the nfs ref cache */
 #define SYNC_REMOUNT		0x0080  /* remount readonly, no dummy LRs */
 #define SYNC_IOWAIT		0x0100  /* wait for all I/O to complete */
-#define SYNC_SUPER		0x0200  /* flush superblock to disk */
 
 /*
  * When remounting a filesystem read-only or freezing the filesystem,
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index b5ea418..8b4d63c 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -23,8 +23,6 @@
 struct xfs_iomap;
 struct attrlist_cursor_kern;
 
-typedef struct dentry	bhv_vname_t;
-typedef __u64		bhv_vnumber_t;
 typedef struct inode	bhv_vnode_t;
 
 #define VN_ISLNK(vp)	S_ISLNK((vp)->i_mode)
@@ -46,18 +44,6 @@
 }
 
 /*
- * Values for the vop_rwlock/rwunlock flags parameter.
- */
-typedef enum bhv_vrwlock {
-	VRWLOCK_NONE,
-	VRWLOCK_READ,
-	VRWLOCK_WRITE,
-	VRWLOCK_WRITE_DIRECT,
-	VRWLOCK_TRY_READ,
-	VRWLOCK_TRY_WRITE
-} bhv_vrwlock_t;
-
-/*
  * Return values for xfs_inactive.  A return value of
  * VN_INACTIVE_NOCACHE implies that the file system behavior
  * has disassociated its state and bhv_desc_t from the vnode.
@@ -73,12 +59,9 @@
 #define IO_INVIS	0x00020		/* don't update inode timestamps */
 
 /*
- * Flags for vop_iflush call
+ * Flags for xfs_inode_flush
  */
 #define FLUSH_SYNC		1	/* wait for flush to complete	*/
-#define FLUSH_INODE		2	/* flush the inode itself	*/
-#define FLUSH_LOG		4	/* force the last log entry for
-					 * this inode out to disk	*/
 
 /*
  * Flush/Invalidate options for vop_toss/flush/flushinval_pages.
@@ -226,13 +209,6 @@
 }
 
 /*
- * Vname handling macros.
- */
-#define VNAME(dentry)		((char *) (dentry)->d_name.name)
-#define VNAMELEN(dentry)	((dentry)->d_name.len)
-#define VNAME_TO_VNODE(dentry)	(vn_from_inode((dentry)->d_inode))
-
-/*
  * Dealing with bad inodes
  */
 static inline int VN_BAD(bhv_vnode_t *vp)
@@ -303,9 +279,9 @@
 extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *);
 extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *);
 #define xfs_itrace_entry(ip)	\
-	_xfs_itrace_entry(ip, __FUNCTION__, (inst_t *)__return_address)
+	_xfs_itrace_entry(ip, __func__, (inst_t *)__return_address)
 #define xfs_itrace_exit(ip)	\
-	_xfs_itrace_exit(ip, __FUNCTION__, (inst_t *)__return_address)
+	_xfs_itrace_exit(ip, __func__, (inst_t *)__return_address)
 #define xfs_itrace_exit_tag(ip, tag)	\
 	_xfs_itrace_exit(ip, tag, (inst_t *)__return_address)
 #define xfs_itrace_ref(ip)	\
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 665babc..631ebb3 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -1291,7 +1291,7 @@
 	if (flags & XFS_QMOPT_DELWRI) {
 		xfs_bdwrite(mp, bp);
 	} else if (flags & XFS_QMOPT_ASYNC) {
-		xfs_bawrite(mp, bp);
+		error = xfs_bawrite(mp, bp);
 	} else {
 		error = xfs_bwrite(mp, bp);
 	}
@@ -1439,9 +1439,7 @@
 	uint		flags)
 {
 	xfs_dqhash_t	*thishash;
-	xfs_mount_t	*mp;
-
-	mp = dqp->q_mount;
+	xfs_mount_t	*mp = dqp->q_mount;
 
 	ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
 	ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash));
@@ -1485,6 +1483,7 @@
 	 * we're unmounting, we do care, so we flush it and wait.
 	 */
 	if (XFS_DQ_IS_DIRTY(dqp)) {
+		int	error;
 		xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY");
 		/* dqflush unlocks dqflock */
 		/*
@@ -1495,7 +1494,10 @@
 		 * We don't care about getting disk errors here. We need
 		 * to purge this dquot anyway, so we go ahead regardless.
 		 */
-		(void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);
+		error = xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);
+		if (error)
+			xfs_fs_cmn_err(CE_WARN, mp,
+				"xfs_qm_dqpurge: dquot %p flush failed", dqp);
 		xfs_dqflock(dqp);
 	}
 	ASSERT(dqp->q_pincount == 0);
@@ -1580,12 +1582,18 @@
 		    XFS_INCORE_TRYLOCK);
 	if (bp != NULL) {
 		if (XFS_BUF_ISDELAYWRITE(bp)) {
+			int	error;
 			if (XFS_BUF_ISPINNED(bp)) {
 				xfs_log_force(dqp->q_mount,
 					      (xfs_lsn_t)0,
 					      XFS_LOG_FORCE);
 			}
-			xfs_bawrite(dqp->q_mount, bp);
+			error = xfs_bawrite(dqp->q_mount, bp);
+			if (error)
+				xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
+					"xfs_qm_dqflock_pushbuf_wait: "
+					"pushbuf error %d on dqp %p, bp %p",
+					error, dqp, bp);
 		} else {
 			xfs_buf_relse(bp);
 		}
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 1800e8d..36e05ca 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -146,6 +146,7 @@
 	xfs_dq_logitem_t	*logitem)
 {
 	xfs_dquot_t	*dqp;
+	int		error;
 
 	dqp = logitem->qli_dquot;
 
@@ -161,7 +162,11 @@
 	 * lock without sleeping, then there must not have been
 	 * anyone in the process of flushing the dquot.
 	 */
-	xfs_qm_dqflush(dqp, XFS_B_DELWRI);
+	error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+	if (error)
+		xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
+			"xfs_qm_dquot_logitem_push: push error %d on dqp %p",
+			error, dqp);
 	xfs_dqunlock(dqp);
 }
 
@@ -262,11 +267,16 @@
 					      XFS_LOG_FORCE);
 			}
 			if (dopush) {
+				int	error;
 #ifdef XFSRACEDEBUG
 				delay_for_intr();
 				delay(300);
 #endif
-				xfs_bawrite(mp, bp);
+				error = xfs_bawrite(mp, bp);
+				if (error)
+					xfs_fs_cmn_err(CE_WARN, mp,
+	"xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",
+							error, qip, bp);
 			} else {
 				xfs_buf_relse(bp);
 			}
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 8e9c5ae..40ea564 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -304,8 +304,11 @@
  * necessary data structures like quotainfo.  This is also responsible for
  * running a quotacheck as necessary.  We are guaranteed that the superblock
  * is consistently read in at this point.
+ *
+ * If we fail here, the mount will continue with quota turned off. We don't
+ * need to inidicate success or failure at all.
  */
-int
+void
 xfs_qm_mount_quotas(
 	xfs_mount_t	*mp,
 	int		mfsi_flags)
@@ -313,7 +316,6 @@
 	int		error = 0;
 	uint		sbf;
 
-
 	/*
 	 * If quotas on realtime volumes is not supported, we disable
 	 * quotas immediately.
@@ -332,7 +334,8 @@
 	 * Allocate the quotainfo structure inside the mount struct, and
 	 * create quotainode(s), and change/rev superblock if necessary.
 	 */
-	if ((error = xfs_qm_init_quotainfo(mp))) {
+	error = xfs_qm_init_quotainfo(mp);
+	if (error) {
 		/*
 		 * We must turn off quotas.
 		 */
@@ -344,12 +347,11 @@
 	 * If any of the quotas are not consistent, do a quotacheck.
 	 */
 	if (XFS_QM_NEED_QUOTACHECK(mp) &&
-		!(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
-		if ((error = xfs_qm_quotacheck(mp))) {
-			/* Quotacheck has failed and quotas have
-			 * been disabled.
-			 */
-			return XFS_ERROR(error);
+	    !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
+		error = xfs_qm_quotacheck(mp);
+		if (error) {
+			/* Quotacheck failed and disabled quotas. */
+			return;
 		}
 	}
 	/* 
@@ -357,12 +359,10 @@
 	 * quotachecked status, since we won't be doing accounting for
 	 * that type anymore.
 	 */
-	if (!XFS_IS_UQUOTA_ON(mp)) {
+	if (!XFS_IS_UQUOTA_ON(mp))
 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
-	}
-	if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) {
+	if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
 		mp->m_qflags &= ~XFS_OQUOTA_CHKD;
-	}
 
  write_changes:
 	/*
@@ -392,7 +392,7 @@
 		xfs_fs_cmn_err(CE_WARN, mp,
 			"Failed to initialize disk quotas.");
 	}
-	return XFS_ERROR(error);
+	return;
 }
 
 /*
@@ -1438,7 +1438,7 @@
 }
 
 
-STATIC int
+STATIC void
 xfs_qm_reset_dqcounts(
 	xfs_mount_t	*mp,
 	xfs_buf_t	*bp,
@@ -1478,8 +1478,6 @@
 		ddq->d_rtbwarns = 0;
 		ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
 	}
-
-	return 0;
 }
 
 STATIC int
@@ -1520,7 +1518,7 @@
 		if (error)
 			break;
 
-		(void) xfs_qm_reset_dqcounts(mp, bp, firstid, type);
+		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 		xfs_bdwrite(mp, bp);
 		/*
 		 * goto the next block.
@@ -1810,7 +1808,7 @@
 	 * Now release the inode. This will send it to 'inactive', and
 	 * possibly even free blocks.
 	 */
-	VN_RELE(XFS_ITOV(ip));
+	IRELE(ip);
 
 	/*
 	 * Goto next inode.
@@ -1880,6 +1878,14 @@
 	} while (! done);
 
 	/*
+	 * We've made all the changes that we need to make incore.
+	 * Flush them down to disk buffers if everything was updated
+	 * successfully.
+	 */
+	if (!error)
+		error = xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
+
+	/*
 	 * We can get this error if we couldn't do a dquot allocation inside
 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
 	 * dirty dquots that might be cached, we just want to get rid of them
@@ -1890,11 +1896,6 @@
 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
 		goto error_return;
 	}
-	/*
-	 * We've made all the changes that we need to make incore.
-	 * Now flush_them down to disk buffers.
-	 */
-	xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
 
 	/*
 	 * We didn't log anything, because if we crashed, we'll have to
@@ -1926,7 +1927,10 @@
 		ASSERT(mp->m_quotainfo != NULL);
 		ASSERT(xfs_Gqm != NULL);
 		xfs_qm_destroy_quotainfo(mp);
-		(void)xfs_mount_reset_sbqflags(mp);
+		if (xfs_mount_reset_sbqflags(mp)) {
+			cmn_err(CE_WARN, "XFS quotacheck %s: "
+				"Failed to reset quota flags.", mp->m_fsname);
+		}
 	} else {
 		cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
 	}
@@ -1968,7 +1972,7 @@
 			if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
 					     0, 0, &gip, 0))) {
 				if (uip)
-					VN_RELE(XFS_ITOV(uip));
+					IRELE(uip);
 				return XFS_ERROR(error);
 			}
 		}
@@ -1999,7 +2003,7 @@
 					  sbflags | XFS_SB_GQUOTINO, flags);
 		if (error) {
 			if (uip)
-				VN_RELE(XFS_ITOV(uip));
+				IRELE(uip);
 
 			return XFS_ERROR(error);
 		}
@@ -2093,12 +2097,17 @@
 		 * dirty dquots.
 		 */
 		if (XFS_DQ_IS_DIRTY(dqp)) {
+			int	error;
 			xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
 			/*
 			 * We flush it delayed write, so don't bother
 			 * releasing the mplock.
 			 */
-			(void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+			error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+			if (error) {
+				xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
+			"xfs_qm_dqflush_all: dquot %p flush failed", dqp);
+			}
 			xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
 			dqp = dqp->dq_flnext;
 			continue;
@@ -2265,12 +2274,17 @@
 		 * dirty dquots.
 		 */
 		if (XFS_DQ_IS_DIRTY(dqp)) {
+			int	error;
 			xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
 			/*
 			 * We flush it delayed write, so don't bother
 			 * releasing the freelist lock.
 			 */
-			(void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+			error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+			if (error) {
+				xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
+			"xfs_qm_dqreclaim: dquot %p flush failed", dqp);
+			}
 			xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
 			continue;
 		}
@@ -2378,9 +2392,9 @@
 	}
 
 	xfs_mod_sb(tp, flags);
-	(void) xfs_trans_commit(tp, 0);
+	error = xfs_trans_commit(tp, 0);
 
-	return 0;
+	return error;
 }
 
 
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index baf537c..cd2300e 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -165,7 +165,7 @@
 #define XFS_QM_RELE(xqm)	((xqm)->qm_nrefs--)
 
 extern void		xfs_qm_destroy_quotainfo(xfs_mount_t *);
-extern int		xfs_qm_mount_quotas(xfs_mount_t *, int);
+extern void		xfs_qm_mount_quotas(xfs_mount_t *, int);
 extern int		xfs_qm_quotacheck(xfs_mount_t *);
 extern void		xfs_qm_unmount_quotadestroy(xfs_mount_t *);
 extern int		xfs_qm_unmount_quotas(xfs_mount_t *);
diff --git a/fs/xfs/quota/xfs_qm_stats.h b/fs/xfs/quota/xfs_qm_stats.h
index a50ffab..5b964fc 100644
--- a/fs/xfs/quota/xfs_qm_stats.h
+++ b/fs/xfs/quota/xfs_qm_stats.h
@@ -45,8 +45,8 @@
 
 # define XQM_STATS_INC(count)	do { } while (0)
 
-static __inline void xfs_qm_init_procfs(void) { };
-static __inline void xfs_qm_cleanup_procfs(void) { };
+static inline void xfs_qm_init_procfs(void) { };
+static inline void xfs_qm_cleanup_procfs(void) { };
 
 #endif
 
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index d2b8be7..8342823 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -279,9 +279,12 @@
 
 	/*
 	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
-	 * and synchronously.
+	 * and synchronously. If we fail to write, we should abort the
+	 * operation as it cannot be recovered safely if we crash.
 	 */
-	xfs_qm_log_quotaoff(mp, &qoffstart, flags);
+	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
+	if (error)
+		goto out_error;
 
 	/*
 	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
@@ -337,7 +340,12 @@
 	 * So, we have QUOTAOFF start and end logitems; the start
 	 * logitem won't get overwritten until the end logitem appears...
 	 */
-	xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
+	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
+	if (error) {
+		/* We're screwed now. Shutdown is the only option. */
+		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+		goto out_error;
+	}
 
 	/*
 	 * If quotas is completely disabled, close shop.
@@ -361,6 +369,7 @@
 		XFS_PURGE_INODE(XFS_QI_GQIP(mp));
 		XFS_QI_GQIP(mp) = NULL;
 	}
+out_error:
 	mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
 
 	return (error);
@@ -371,12 +380,11 @@
 	xfs_mount_t	*mp,
 	uint		flags)
 {
-	int		error;
+	int		error = 0, error2 = 0;
 	xfs_inode_t	*qip;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return XFS_ERROR(EPERM);
-	error = 0;
 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
 		qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
 		return XFS_ERROR(EINVAL);
@@ -384,22 +392,22 @@
 
 	if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
 		error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0);
-		if (! error) {
-			(void) xfs_truncate_file(mp, qip);
-			VN_RELE(XFS_ITOV(qip));
+		if (!error) {
+			error = xfs_truncate_file(mp, qip);
+			IRELE(qip);
 		}
 	}
 
 	if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
 	    mp->m_sb.sb_gquotino != NULLFSINO) {
-		error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
-		if (! error) {
-			(void) xfs_truncate_file(mp, qip);
-			VN_RELE(XFS_ITOV(qip));
+		error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
+		if (!error2) {
+			error2 = xfs_truncate_file(mp, qip);
+			IRELE(qip);
 		}
 	}
 
-	return (error);
+	return error ? error : error2;
 }
 
 
@@ -552,13 +560,13 @@
 		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
 		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
 		if (tempuqip)
-			VN_RELE(XFS_ITOV(uip));
+			IRELE(uip);
 	}
 	if (gip) {
 		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
 		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
 		if (tempgqip)
-			VN_RELE(XFS_ITOV(gip));
+			IRELE(gip);
 	}
 	if (mp->m_quotainfo) {
 		out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp);
@@ -726,12 +734,12 @@
 	xfs_trans_log_dquot(tp, dqp);
 
 	xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT");
-	xfs_trans_commit(tp, 0);
+	error = xfs_trans_commit(tp, 0);
 	xfs_qm_dqprint(dqp);
 	xfs_qm_dqrele(dqp);
 	mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
 
-	return (0);
+	return error;
 }
 
 STATIC int
@@ -1095,7 +1103,7 @@
 		 * inactive code in hell.
 		 */
 		if (vnode_refd)
-			VN_RELE(vp);
+			IRELE(ip);
 		XFS_MOUNT_ILOCK(mp);
 		/*
 		 * If an inode was inserted or removed, we gotta
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
index 129067c..0b75d30 100644
--- a/fs/xfs/support/ktrace.c
+++ b/fs/xfs/support/ktrace.c
@@ -24,7 +24,7 @@
 void __init
 ktrace_init(int zentries)
 {
-	ktrace_zentries = zentries;
+	ktrace_zentries = roundup_pow_of_two(zentries);
 
 	ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
 					"ktrace_hdr");
@@ -47,13 +47,16 @@
  * ktrace_alloc()
  *
  * Allocate a ktrace header and enough buffering for the given
- * number of entries.
+ * number of entries. Round the number of entries up to a
+ * power of 2 so we can do fast masking to get the index from
+ * the atomic index counter.
  */
 ktrace_t *
 ktrace_alloc(int nentries, unsigned int __nocast sleep)
 {
 	ktrace_t        *ktp;
 	ktrace_entry_t  *ktep;
+	int		entries;
 
 	ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
 
@@ -70,11 +73,12 @@
 	/*
 	 * Special treatment for buffers with the ktrace_zentries entries
 	 */
-	if (nentries == ktrace_zentries) {
+	entries = roundup_pow_of_two(nentries);
+	if (entries == ktrace_zentries) {
 		ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
 							    sleep);
 	} else {
-		ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
+		ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
 							    sleep | KM_LARGE);
 	}
 
@@ -91,8 +95,10 @@
 	}
 
 	ktp->kt_entries  = ktep;
-	ktp->kt_nentries = nentries;
-	ktp->kt_index    = 0;
+	ktp->kt_nentries = entries;
+	ASSERT(is_power_of_2(entries));
+	ktp->kt_index_mask = entries - 1;
+	atomic_set(&ktp->kt_index, 0);
 	ktp->kt_rollover = 0;
 	return ktp;
 }
@@ -151,8 +157,6 @@
 	void            *val14,
 	void            *val15)
 {
-	static DEFINE_SPINLOCK(wrap_lock);
-	unsigned long	flags;
 	int             index;
 	ktrace_entry_t  *ktep;
 
@@ -161,12 +165,8 @@
 	/*
 	 * Grab an entry by pushing the index up to the next one.
 	 */
-	spin_lock_irqsave(&wrap_lock, flags);
-	index = ktp->kt_index;
-	if (++ktp->kt_index == ktp->kt_nentries)
-		ktp->kt_index = 0;
-	spin_unlock_irqrestore(&wrap_lock, flags);
-
+	index = atomic_add_return(1, &ktp->kt_index);
+	index = (index - 1) & ktp->kt_index_mask;
 	if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
 		ktp->kt_rollover = 1;
 
@@ -199,11 +199,12 @@
 ktrace_nentries(
 	ktrace_t        *ktp)
 {
-	if (ktp == NULL) {
+	int	index;
+	if (ktp == NULL)
 		return 0;
-	}
 
-	return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index);
+	index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
+	return (ktp->kt_rollover ? ktp->kt_nentries : index);
 }
 
 /*
@@ -228,7 +229,7 @@
 	int             nentries;
 
 	if (ktp->kt_rollover)
-		index = ktp->kt_index;
+		index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
 	else
 		index = 0;
 
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h
index 56e72b4..741d694 100644
--- a/fs/xfs/support/ktrace.h
+++ b/fs/xfs/support/ktrace.h
@@ -30,7 +30,8 @@
  */
 typedef struct ktrace {
 	int		kt_nentries;	/* number of entries in trace buf */
-	int		kt_index;	/* current index in entries */
+	atomic_t	kt_index;	/* current index in entries */
+	unsigned int	kt_index_mask;
 	int		kt_rollover;
 	ktrace_entry_t	*kt_entries;	/* buffer of entries */
 } ktrace_t;
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 540e4c9..765aaf6 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -22,7 +22,7 @@
 #define STATIC
 #define DEBUG 1
 #define XFS_BUF_LOCK_TRACKING 1
-/* #define QUOTADEBUG 1 */
+#define QUOTADEBUG 1
 #endif
 
 #ifdef CONFIG_XFS_TRACE
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 7272fe3..8e130b9 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -307,12 +307,13 @@
 
 	VN_HOLD(vp);
 	error = xfs_acl_allow_set(vp, kind);
-	if (error)
-		goto out;
 
 	/* Incoming ACL exists, set file mode based on its value */
-	if (kind == _ACL_TYPE_ACCESS)
-		xfs_acl_setmode(vp, xfs_acl, &basicperms);
+	if (!error && kind == _ACL_TYPE_ACCESS)
+		error = xfs_acl_setmode(vp, xfs_acl, &basicperms);
+
+	if (error)
+		goto out;
 
 	/*
 	 * If we have more than std unix permissions, set up the actual attr.
@@ -323,7 +324,7 @@
 	if (!basicperms) {
 		xfs_acl_set_attr(vp, xfs_acl, kind, &error);
 	} else {
-		xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
+		error = -xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
 	}
 
 out:
@@ -707,7 +708,9 @@
 
 	memcpy(cacl, pdaclp, sizeof(xfs_acl_t));
 	xfs_acl_filter_mode(mode, cacl);
-	xfs_acl_setmode(vp, cacl, &basicperms);
+	error = xfs_acl_setmode(vp, cacl, &basicperms);
+	if (error)
+		goto out_error;
 
 	/*
 	 * Set the Default and Access ACL on the file.  The mode is already
@@ -720,6 +723,7 @@
 		xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error);
 	if (!error && !basicperms)
 		xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error);
+out_error:
 	_ACL_FREE(cacl);
 	return error;
 }
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index bdbfbbe..1956f83 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -45,7 +45,7 @@
 #define	XFSA_FIXUP_BNO_OK	1
 #define	XFSA_FIXUP_CNT_OK	2
 
-STATIC int
+STATIC void
 xfs_alloc_search_busy(xfs_trans_t *tp,
 		    xfs_agnumber_t agno,
 		    xfs_agblock_t bno,
@@ -55,24 +55,24 @@
 ktrace_t *xfs_alloc_trace_buf;
 
 #define	TRACE_ALLOC(s,a)	\
-	xfs_alloc_trace_alloc(__FUNCTION__, s, a, __LINE__)
+	xfs_alloc_trace_alloc(__func__, s, a, __LINE__)
 #define	TRACE_FREE(s,a,b,x,f)	\
-	xfs_alloc_trace_free(__FUNCTION__, s, mp, a, b, x, f, __LINE__)
+	xfs_alloc_trace_free(__func__, s, mp, a, b, x, f, __LINE__)
 #define	TRACE_MODAGF(s,a,f)	\
-	xfs_alloc_trace_modagf(__FUNCTION__, s, mp, a, f, __LINE__)
-#define	TRACE_BUSY(__FUNCTION__,s,ag,agb,l,sl,tp)	\
-	xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__)
-#define	TRACE_UNBUSY(__FUNCTION__,s,ag,sl,tp)	\
-	xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
-#define	TRACE_BUSYSEARCH(__FUNCTION__,s,ag,agb,l,sl,tp)	\
-	xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
+	xfs_alloc_trace_modagf(__func__, s, mp, a, f, __LINE__)
+#define	TRACE_BUSY(__func__,s,ag,agb,l,sl,tp)	\
+	xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__)
+#define	TRACE_UNBUSY(__func__,s,ag,sl,tp)	\
+	xfs_alloc_trace_busy(__func__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
+#define	TRACE_BUSYSEARCH(__func__,s,ag,agb,l,tp)	\
+	xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, 0, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
 #else
 #define	TRACE_ALLOC(s,a)
 #define	TRACE_FREE(s,a,b,x,f)
 #define	TRACE_MODAGF(s,a,f)
 #define	TRACE_BUSY(s,a,ag,agb,l,sl,tp)
 #define	TRACE_UNBUSY(fname,s,ag,sl,tp)
-#define	TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp)
+#define	TRACE_BUSYSEARCH(fname,s,ag,agb,l,tp)
 #endif	/* XFS_ALLOC_TRACE */
 
 /*
@@ -93,7 +93,7 @@
  * Compute aligned version of the found extent.
  * Takes alignment and min length into account.
  */
-STATIC int				/* success (>= minlen) */
+STATIC void
 xfs_alloc_compute_aligned(
 	xfs_agblock_t	foundbno,	/* starting block in found extent */
 	xfs_extlen_t	foundlen,	/* length in found extent */
@@ -116,7 +116,6 @@
 	}
 	*resbno = bno;
 	*reslen = len;
-	return len >= minlen;
 }
 
 /*
@@ -837,9 +836,9 @@
 			if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
 				goto error0;
 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-			if (!xfs_alloc_compute_aligned(ltbno, ltlen,
-					args->alignment, args->minlen,
-					&ltbnoa, &ltlena))
+			xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment,
+					args->minlen, &ltbnoa, &ltlena);
+			if (ltlena < args->minlen)
 				continue;
 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
 			xfs_alloc_fix_len(args);
@@ -958,9 +957,9 @@
 			if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
 				goto error0;
 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-			if (xfs_alloc_compute_aligned(ltbno, ltlen,
-					args->alignment, args->minlen,
-					&ltbnoa, &ltlena))
+			xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment,
+					args->minlen, &ltbnoa, &ltlena);
+			if (ltlena >= args->minlen)
 				break;
 			if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i)))
 				goto error0;
@@ -974,9 +973,9 @@
 			if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
 				goto error0;
 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-			if (xfs_alloc_compute_aligned(gtbno, gtlen,
-					args->alignment, args->minlen,
-					&gtbnoa, &gtlena))
+			xfs_alloc_compute_aligned(gtbno, gtlen, args->alignment,
+					args->minlen, &gtbnoa, &gtlena);
+			if (gtlena >= args->minlen)
 				break;
 			if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i)))
 				goto error0;
@@ -2562,9 +2561,10 @@
 
 
 /*
- * returns non-zero if any of (agno,bno):len is in a busy list
+ * If we find the extent in the busy list, force the log out to get the
+ * extent out of the busy list so the caller can use it straight away.
  */
-STATIC int
+STATIC void
 xfs_alloc_search_busy(xfs_trans_t *tp,
 		    xfs_agnumber_t agno,
 		    xfs_agblock_t bno,
@@ -2572,7 +2572,6 @@
 {
 	xfs_mount_t		*mp;
 	xfs_perag_busy_t	*bsy;
-	int			n;
 	xfs_agblock_t		uend, bend;
 	xfs_lsn_t		lsn;
 	int			cnt;
@@ -2585,21 +2584,18 @@
 	uend = bno + len - 1;
 
 	/* search pagb_list for this slot, skipping open slots */
-	for (bsy = mp->m_perag[agno].pagb_list, n = 0;
-	     cnt; bsy++, n++) {
+	for (bsy = mp->m_perag[agno].pagb_list; cnt; bsy++) {
 
 		/*
 		 * (start1,length1) within (start2, length2)
 		 */
 		if (bsy->busy_tp != NULL) {
 			bend = bsy->busy_start + bsy->busy_length - 1;
-			if ((bno > bend) ||
-			    (uend < bsy->busy_start)) {
+			if ((bno > bend) || (uend < bsy->busy_start)) {
 				cnt--;
 			} else {
 				TRACE_BUSYSEARCH("xfs_alloc_search_busy",
-						 "found1", agno, bno, len, n,
-						 tp);
+					 "found1", agno, bno, len, tp);
 				break;
 			}
 		}
@@ -2610,15 +2606,12 @@
 	 * transaction that freed the block
 	 */
 	if (cnt) {
-		TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, n, tp);
+		TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp);
 		lsn = bsy->busy_tp->t_commit_lsn;
 		spin_unlock(&mp->m_perag[agno].pagb_lock);
 		xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
 	} else {
-		TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, n, tp);
-		n = -1;
+		TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp);
 		spin_unlock(&mp->m_perag[agno].pagb_lock);
 	}
-
-	return n;
 }
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index e58f321..36d781e 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -2647,14 +2647,6 @@
 }
 
 STATIC int
-attr_secure_capable(
-	bhv_vnode_t	*vp,
-	cred_t		*cred)
-{
-	return -ENOSECURITY;
-}
-
-STATIC int
 attr_system_set(
 	bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
 {
@@ -2724,7 +2716,7 @@
 	.attr_get	= attr_generic_get,
 	.attr_set	= attr_generic_set,
 	.attr_remove	= attr_generic_remove,
-	.attr_capable	= attr_secure_capable,
+	.attr_capable	= (attrcapable_t)fs_noerr,
 };
 
 struct attrnames attr_user = {
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 96ba6aa..303d41e 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -166,7 +166,7 @@
 
 	if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
 		if (bytes <= XFS_IFORK_ASIZE(dp))
-			return mp->m_attroffset >> 3;
+			return dp->i_d.di_forkoff;
 		return 0;
 	}
 
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 2def273..eb198c0 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -323,13 +323,13 @@
 	int		whichfork);	/* data or attr fork */
 
 #define	XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)	\
-	xfs_bmap_trace_delete(__FUNCTION__,d,ip,i,c,w)
+	xfs_bmap_trace_delete(__func__,d,ip,i,c,w)
 #define	XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w)	\
-	xfs_bmap_trace_insert(__FUNCTION__,d,ip,i,c,r1,r2,w)
+	xfs_bmap_trace_insert(__func__,d,ip,i,c,r1,r2,w)
 #define	XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w)	\
-	xfs_bmap_trace_post_update(__FUNCTION__,d,ip,i,w)
+	xfs_bmap_trace_post_update(__func__,d,ip,i,w)
 #define	XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w)	\
-	xfs_bmap_trace_pre_update(__FUNCTION__,d,ip,i,w)
+	xfs_bmap_trace_pre_update(__func__,d,ip,i,w)
 #else
 #define	XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)
 #define	XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w)
@@ -2402,7 +2402,7 @@
 
 #define XFS_ALLOC_GAP_UNITS	4
 
-STATIC int
+STATIC void
 xfs_bmap_adjacent(
 	xfs_bmalloca_t	*ap)		/* bmap alloc argument struct */
 {
@@ -2548,7 +2548,6 @@
 			ap->rval = gotbno;
 	}
 #undef ISVALID
-	return 0;
 }
 
 STATIC int
@@ -4154,16 +4153,21 @@
 	 * number of leaf entries, is controlled by the type of di_nextents
 	 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
 	 * (a signed 16-bit number, xfs_aextnum_t).
+	 *
+	 * Note that we can no longer assume that if we are in ATTR1 that
+	 * the fork offset of all the inodes will be (m_attroffset >> 3)
+	 * because we could have mounted with ATTR2 and then mounted back
+	 * with ATTR1, keeping the di_forkoff's fixed but probably at
+	 * various positions. Therefore, for both ATTR1 and ATTR2
+	 * we have to assume the worst case scenario of a minimum size
+	 * available.
 	 */
 	if (whichfork == XFS_DATA_FORK) {
 		maxleafents = MAXEXTNUM;
-		sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
-			XFS_BMDR_SPACE_CALC(MINDBTPTRS) : mp->m_attroffset;
+		sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
 	} else {
 		maxleafents = MAXAEXTNUM;
-		sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
-			XFS_BMDR_SPACE_CALC(MINABTPTRS) :
-			mp->m_sb.sb_inodesize - mp->m_attroffset;
+		sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
 	}
 	maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
 	minleafrecs = mp->m_bmap_dmnr[0];
@@ -5772,7 +5776,6 @@
 	int			error;		/* return value */
 	__int64_t		fixlen;		/* length for -1 case */
 	int			i;		/* extent number */
-	bhv_vnode_t		*vp;		/* corresponding vnode */
 	int			lock;		/* lock state */
 	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
 	xfs_mount_t		*mp;		/* file system mount point */
@@ -5789,7 +5792,6 @@
 	int			bmapi_flags;	/* flags for xfs_bmapi */
 	__int32_t		oflags;		/* getbmapx bmv_oflags field */
 
-	vp = XFS_ITOV(ip);
 	mp = ip->i_mount;
 
 	whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -5811,7 +5813,7 @@
 	if ((interface & BMV_IF_NO_DMAPI_READ) == 0 &&
 	    DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
 	    whichfork == XFS_DATA_FORK) {
-		error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, 0, 0, 0, NULL);
+		error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL);
 		if (error)
 			return XFS_ERROR(error);
 	}
@@ -5869,6 +5871,10 @@
 		/* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
 		error = xfs_flush_pages(ip, (xfs_off_t)0,
 					       -1, 0, FI_REMAPF);
+		if (error) {
+			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+		return error;
+		}
 	}
 
 	ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0);
@@ -6162,10 +6168,10 @@
 			}
 			if (*thispa == *pp) {
 				cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
-					__FUNCTION__, j, i,
+					__func__, j, i,
 					(unsigned long long)be64_to_cpu(*thispa));
 				panic("%s: ptrs are equal in node\n",
-					__FUNCTION__);
+					__func__);
 			}
 		}
 	}
@@ -6192,7 +6198,7 @@
 	xfs_mount_t		*mp;	/* file system mount structure */
 	__be64			*pp;	/* pointer to block address */
 	xfs_bmbt_rec_t		*ep;	/* pointer to current extent */
-	xfs_bmbt_rec_t		*lastp; /* pointer to previous extent */
+	xfs_bmbt_rec_t		last = {0, 0}; /* last extent in prev block */
 	xfs_bmbt_rec_t		*nextp;	/* pointer to next extent */
 	int			bp_release = 0;
 
@@ -6262,7 +6268,6 @@
 	/*
 	 * Loop over all leaf nodes checking that all extents are in the right order.
 	 */
-	lastp = NULL;
 	for (;;) {
 		xfs_fsblock_t	nextbno;
 		xfs_extnum_t	num_recs;
@@ -6283,18 +6288,16 @@
 		 */
 
 		ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
+		if (i) {
+			xfs_btree_check_rec(XFS_BTNUM_BMAP, &last, ep);
+		}
 		for (j = 1; j < num_recs; j++) {
 			nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1);
-			if (lastp) {
-				xfs_btree_check_rec(XFS_BTNUM_BMAP,
-					(void *)lastp, (void *)ep);
-			}
-			xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
-				(void *)(nextp));
-			lastp = ep;
+			xfs_btree_check_rec(XFS_BTNUM_BMAP, ep, nextp);
 			ep = nextp;
 		}
 
+		last = *ep;
 		i += num_recs;
 		if (bp_release) {
 			bp_release = 0;
@@ -6325,13 +6328,13 @@
 	return;
 
 error0:
-	cmn_err(CE_WARN, "%s: at error0", __FUNCTION__);
+	cmn_err(CE_WARN, "%s: at error0", __func__);
 	if (bp_release)
 		xfs_trans_brelse(NULL, bp);
 error_norelse:
 	cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
-		__FUNCTION__, i);
-	panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__);
+		__func__, i);
+	panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
 	return;
 }
 #endif
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 87224b7..6ff70cd 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -151,7 +151,7 @@
 	xfs_extnum_t		cnt,		/* count of entries in list */
 	int			whichfork);	/* data or attr fork */
 #define	XFS_BMAP_TRACE_EXLIST(ip,c,w)	\
-	xfs_bmap_trace_exlist(__FUNCTION__,ip,c,w)
+	xfs_bmap_trace_exlist(__func__,ip,c,w)
 #else
 #define	XFS_BMAP_TRACE_EXLIST(ip,c,w)
 #endif
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index bd18987..4f0e849 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -275,21 +275,21 @@
 }
 
 #define	XFS_BMBT_TRACE_ARGBI(c,b,i)	\
-	xfs_bmbt_trace_argbi(__FUNCTION__, c, b, i, __LINE__)
+	xfs_bmbt_trace_argbi(__func__, c, b, i, __LINE__)
 #define	XFS_BMBT_TRACE_ARGBII(c,b,i,j)	\
-	xfs_bmbt_trace_argbii(__FUNCTION__, c, b, i, j, __LINE__)
+	xfs_bmbt_trace_argbii(__func__, c, b, i, j, __LINE__)
 #define	XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j)	\
-	xfs_bmbt_trace_argfffi(__FUNCTION__, c, o, b, i, j, __LINE__)
+	xfs_bmbt_trace_argfffi(__func__, c, o, b, i, j, __LINE__)
 #define	XFS_BMBT_TRACE_ARGI(c,i)	\
-	xfs_bmbt_trace_argi(__FUNCTION__, c, i, __LINE__)
+	xfs_bmbt_trace_argi(__func__, c, i, __LINE__)
 #define	XFS_BMBT_TRACE_ARGIFK(c,i,f,s)	\
-	xfs_bmbt_trace_argifk(__FUNCTION__, c, i, f, s, __LINE__)
+	xfs_bmbt_trace_argifk(__func__, c, i, f, s, __LINE__)
 #define	XFS_BMBT_TRACE_ARGIFR(c,i,f,r)	\
-	xfs_bmbt_trace_argifr(__FUNCTION__, c, i, f, r, __LINE__)
+	xfs_bmbt_trace_argifr(__func__, c, i, f, r, __LINE__)
 #define	XFS_BMBT_TRACE_ARGIK(c,i,k)	\
-	xfs_bmbt_trace_argik(__FUNCTION__, c, i, k, __LINE__)
+	xfs_bmbt_trace_argik(__func__, c, i, k, __LINE__)
 #define	XFS_BMBT_TRACE_CURSOR(c,s)	\
-	xfs_bmbt_trace_cursor(__FUNCTION__, c, s, __LINE__)
+	xfs_bmbt_trace_cursor(__func__, c, s, __LINE__)
 #else
 #define	XFS_BMBT_TRACE_ARGBI(c,b,i)
 #define	XFS_BMBT_TRACE_ARGBII(c,b,i,j)
@@ -2027,6 +2027,24 @@
 
 /*
  * Insert the current record at the point referenced by cur.
+ *
+ * A multi-level split of the tree on insert will invalidate the original
+ * cursor. It appears, however, that some callers assume that the cursor is
+ * always valid. Hence if we do a multi-level split we need to revalidate the
+ * cursor.
+ *
+ * When a split occurs, we will see a new cursor returned. Use that as a
+ * trigger to determine if we need to revalidate the original cursor. If we get
+ * a split, then use the original irec to lookup up the path of the record we
+ * just inserted.
+ *
+ * Note that the fact that the btree root is in the inode means that we can
+ * have the level of the tree change without a "split" occurring at the root
+ * level. What happens is that the root is migrated to an allocated block and
+ * the inode root is pointed to it. This means a single split can change the
+ * level of the tree (level 2 -> level 3) and invalidate the old cursor. Hence
+ * the level change should be accounted as a split so as to correctly trigger a
+ * revalidation of the old cursor.
  */
 int					/* error */
 xfs_bmbt_insert(
@@ -2039,11 +2057,14 @@
 	xfs_fsblock_t	nbno;
 	xfs_btree_cur_t	*ncur;
 	xfs_bmbt_rec_t	nrec;
+	xfs_bmbt_irec_t	oirec;		/* original irec */
 	xfs_btree_cur_t	*pcur;
+	int		splits = 0;
 
 	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
 	level = 0;
 	nbno = NULLFSBLOCK;
+	oirec = cur->bc_rec.b;
 	xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
 	ncur = NULL;
 	pcur = cur;
@@ -2052,11 +2073,13 @@
 				&i))) {
 			if (pcur != cur)
 				xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
-			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
-			return error;
+			goto error0;
 		}
 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
 		if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) {
+			/* allocating a new root is effectively a split */
+			if (cur->bc_nlevels != pcur->bc_nlevels)
+				splits++;
 			cur->bc_nlevels = pcur->bc_nlevels;
 			cur->bc_private.b.allocated +=
 				pcur->bc_private.b.allocated;
@@ -2070,10 +2093,21 @@
 			xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
 		}
 		if (ncur) {
+			splits++;
 			pcur = ncur;
 			ncur = NULL;
 		}
 	} while (nbno != NULLFSBLOCK);
+
+	if (splits > 1) {
+		/* revalidate the old cursor as we had a multi-level split */
+		error = xfs_bmbt_lookup_eq(cur, oirec.br_startoff,
+				oirec.br_startblock, oirec.br_blockcount, &i);
+		if (error)
+			goto error0;
+		ASSERT(i == 1);
+	}
+
 	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 	*stat = i;
 	return 0;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 63debd1..53a71c6 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -645,7 +645,12 @@
 	bp = bip->bli_buf;
 
 	if (XFS_BUF_ISDELAYWRITE(bp)) {
-		xfs_bawrite(bip->bli_item.li_mountp, bp);
+		int	error;
+		error = xfs_bawrite(bip->bli_item.li_mountp, bp);
+		if (error)
+			xfs_fs_cmn_err(CE_WARN, bip->bli_item.li_mountp,
+			"xfs_buf_item_push: pushbuf error %d on bip %p, bp %p",
+					error, bip, bp);
 	} else {
 		xfs_buf_relse(bp);
 	}
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index e92e73f..7cb2652 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -44,6 +44,7 @@
 #include "xfs_error.h"
 #include "xfs_vnodeops.h"
 
+struct xfs_name xfs_name_dotdot = {"..", 2};
 
 void
 xfs_dir_mount(
@@ -146,8 +147,7 @@
 xfs_dir_createname(
 	xfs_trans_t		*tp,
 	xfs_inode_t		*dp,
-	char			*name,
-	int			namelen,
+	struct xfs_name		*name,
 	xfs_ino_t		inum,		/* new entry inode number */
 	xfs_fsblock_t		*first,		/* bmap's firstblock */
 	xfs_bmap_free_t		*flist,		/* bmap's freeblock list */
@@ -162,9 +162,9 @@
 		return rval;
 	XFS_STATS_INC(xs_dir_create);
 
-	args.name = name;
-	args.namelen = namelen;
-	args.hashval = xfs_da_hashname(name, namelen);
+	args.name = name->name;
+	args.namelen = name->len;
+	args.hashval = xfs_da_hashname(name->name, name->len);
 	args.inumber = inum;
 	args.dp = dp;
 	args.firstblock = first;
@@ -197,8 +197,7 @@
 xfs_dir_lookup(
 	xfs_trans_t	*tp,
 	xfs_inode_t	*dp,
-	char		*name,
-	int		namelen,
+	struct xfs_name	*name,
 	xfs_ino_t	*inum)		/* out: inode number */
 {
 	xfs_da_args_t	args;
@@ -207,18 +206,14 @@
 
 	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
 	XFS_STATS_INC(xs_dir_lookup);
+	memset(&args, 0, sizeof(xfs_da_args_t));
 
-	args.name = name;
-	args.namelen = namelen;
-	args.hashval = xfs_da_hashname(name, namelen);
-	args.inumber = 0;
+	args.name = name->name;
+	args.namelen = name->len;
+	args.hashval = xfs_da_hashname(name->name, name->len);
 	args.dp = dp;
-	args.firstblock = NULL;
-	args.flist = NULL;
-	args.total = 0;
 	args.whichfork = XFS_DATA_FORK;
 	args.trans = tp;
-	args.justcheck = args.addname = 0;
 	args.oknoent = 1;
 
 	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
@@ -247,8 +242,7 @@
 xfs_dir_removename(
 	xfs_trans_t	*tp,
 	xfs_inode_t	*dp,
-	char		*name,
-	int		namelen,
+	struct xfs_name	*name,
 	xfs_ino_t	ino,
 	xfs_fsblock_t	*first,		/* bmap's firstblock */
 	xfs_bmap_free_t	*flist,		/* bmap's freeblock list */
@@ -261,9 +255,9 @@
 	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
 	XFS_STATS_INC(xs_dir_remove);
 
-	args.name = name;
-	args.namelen = namelen;
-	args.hashval = xfs_da_hashname(name, namelen);
+	args.name = name->name;
+	args.namelen = name->len;
+	args.hashval = xfs_da_hashname(name->name, name->len);
 	args.inumber = ino;
 	args.dp = dp;
 	args.firstblock = first;
@@ -329,8 +323,7 @@
 xfs_dir_replace(
 	xfs_trans_t	*tp,
 	xfs_inode_t	*dp,
-	char		*name,		/* name of entry to replace */
-	int		namelen,
+	struct xfs_name	*name,		/* name of entry to replace */
 	xfs_ino_t	inum,		/* new inode number */
 	xfs_fsblock_t	*first,		/* bmap's firstblock */
 	xfs_bmap_free_t	*flist,		/* bmap's freeblock list */
@@ -345,9 +338,9 @@
 	if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum)))
 		return rval;
 
-	args.name = name;
-	args.namelen = namelen;
-	args.hashval = xfs_da_hashname(name, namelen);
+	args.name = name->name;
+	args.namelen = name->len;
+	args.hashval = xfs_da_hashname(name->name, name->len);
 	args.inumber = inum;
 	args.dp = dp;
 	args.firstblock = first;
@@ -374,28 +367,29 @@
 
 /*
  * See if this entry can be added to the directory without allocating space.
+ * First checks that the caller couldn't reserve enough space (resblks = 0).
  */
 int
 xfs_dir_canenter(
 	xfs_trans_t	*tp,
 	xfs_inode_t	*dp,
-	char		*name,		/* name of entry to add */
-	int		namelen)
+	struct xfs_name	*name,		/* name of entry to add */
+	uint		resblks)
 {
 	xfs_da_args_t	args;
 	int		rval;
 	int		v;		/* type-checking value */
 
-	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	if (resblks)
+		return 0;
 
-	args.name = name;
-	args.namelen = namelen;
-	args.hashval = xfs_da_hashname(name, namelen);
-	args.inumber = 0;
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	memset(&args, 0, sizeof(xfs_da_args_t));
+
+	args.name = name->name;
+	args.namelen = name->len;
+	args.hashval = xfs_da_hashname(name->name, name->len);
 	args.dp = dp;
-	args.firstblock = NULL;
-	args.flist = NULL;
-	args.total = 0;
 	args.whichfork = XFS_DATA_FORK;
 	args.trans = tp;
 	args.justcheck = args.addname = args.oknoent = 1;
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index b265197..6392f93 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -59,6 +59,8 @@
  */
 typedef	xfs_off_t	xfs_dir2_off_t;
 
+extern struct xfs_name	xfs_name_dotdot;
+
 /*
  * Generic directory interface routines
  */
@@ -68,21 +70,21 @@
 extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp,
 				struct xfs_inode *pdp);
 extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp,
-				char *name, int namelen, xfs_ino_t inum,
+				struct xfs_name *name, xfs_ino_t inum,
 				xfs_fsblock_t *first,
 				struct xfs_bmap_free *flist, xfs_extlen_t tot);
 extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
-				char *name, int namelen, xfs_ino_t *inum);
+				struct xfs_name *name, xfs_ino_t *inum);
 extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
-				char *name, int namelen, xfs_ino_t ino,
+				struct xfs_name *name, xfs_ino_t ino,
 				xfs_fsblock_t *first,
 				struct xfs_bmap_free *flist, xfs_extlen_t tot);
 extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
-				char *name, int namelen, xfs_ino_t inum,
+				struct xfs_name *name, xfs_ino_t inum,
 				xfs_fsblock_t *first,
 				struct xfs_bmap_free *flist, xfs_extlen_t tot);
 extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
-				char *name, int namelen);
+				struct xfs_name *name, uint resblks);
 extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
 
 /*
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index eb03eab..3f3785b 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -73,7 +73,7 @@
 #define TRACE4(mp,t,a0,a1,a2,a3)	TRACE6(mp,t,a0,a1,a2,a3,0,0)
 #define TRACE5(mp,t,a0,a1,a2,a3,a4)	TRACE6(mp,t,a0,a1,a2,a3,a4,0)
 #define TRACE6(mp,t,a0,a1,a2,a3,a4,a5) \
-	xfs_filestreams_trace(mp, t, __FUNCTION__, __LINE__, \
+	xfs_filestreams_trace(mp, t, __func__, __LINE__, \
 				(__psunsigned_t)a0, (__psunsigned_t)a1, \
 				(__psunsigned_t)a2, (__psunsigned_t)a3, \
 				(__psunsigned_t)a4, (__psunsigned_t)a5)
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 5a146cb..a64dfbd 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -107,6 +107,16 @@
 /*
  * Allocation group level functions.
  */
+static inline int
+xfs_ialloc_cluster_alignment(
+	xfs_alloc_arg_t	*args)
+{
+	if (xfs_sb_version_hasalign(&args->mp->m_sb) &&
+	    args->mp->m_sb.sb_inoalignmt >=
+	     XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp)))
+		return args->mp->m_sb.sb_inoalignmt;
+	return 1;
+}
 
 /*
  * Allocate new inodes in the allocation group specified by agbp.
@@ -167,10 +177,24 @@
 		args.mod = args.total = args.wasdel = args.isfl =
 			args.userdata = args.minalignslop = 0;
 		args.prod = 1;
-		args.alignment = 1;
+
 		/*
-		 * Allow space for the inode btree to split.
+		 * We need to take into account alignment here to ensure that
+		 * we don't modify the free list if we fail to have an exact
+		 * block. If we don't have an exact match, and every oher
+		 * attempt allocation attempt fails, we'll end up cancelling
+		 * a dirty transaction and shutting down.
+		 *
+		 * For an exact allocation, alignment must be 1,
+		 * however we need to take cluster alignment into account when
+		 * fixing up the freelist. Use the minalignslop field to
+		 * indicate that extra blocks might be required for alignment,
+		 * but not to use them in the actual exact allocation.
 		 */
+		args.alignment = 1;
+		args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1;
+
+		/* Allow space for the inode btree to split. */
 		args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
 		if ((error = xfs_alloc_vextent(&args)))
 			return error;
@@ -191,13 +215,8 @@
 			ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
 			args.alignment = args.mp->m_dalign;
 			isaligned = 1;
-		} else if (xfs_sb_version_hasalign(&args.mp->m_sb) &&
-			   args.mp->m_sb.sb_inoalignmt >=
-			   XFS_B_TO_FSBT(args.mp,
-			  	XFS_INODE_CLUSTER_SIZE(args.mp)))
-				args.alignment = args.mp->m_sb.sb_inoalignmt;
-		else
-			args.alignment = 1;
+		} else
+			args.alignment = xfs_ialloc_cluster_alignment(&args);
 		/*
 		 * Need to figure out where to allocate the inode blocks.
 		 * Ideally they should be spaced out through the a.g.
@@ -230,12 +249,7 @@
 		args.agbno = be32_to_cpu(agi->agi_root);
 		args.fsbno = XFS_AGB_TO_FSB(args.mp,
 				be32_to_cpu(agi->agi_seqno), args.agbno);
-		if (xfs_sb_version_hasalign(&args.mp->m_sb) &&
-			args.mp->m_sb.sb_inoalignmt >=
-			XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
-				args.alignment = args.mp->m_sb.sb_inoalignmt;
-		else
-			args.alignment = 1;
+		args.alignment = xfs_ialloc_cluster_alignment(&args);
 		if ((error = xfs_alloc_vextent(&args)))
 			return error;
 	}
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 8e09b71..e657c51 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -78,7 +78,6 @@
 	xfs_inode_t	*ip;
 	xfs_inode_t	*iq;
 	int		error;
-	xfs_icluster_t	*icl, *new_icl = NULL;
 	unsigned long	first_index, mask;
 	xfs_perag_t	*pag;
 	xfs_agino_t	agino;
@@ -229,11 +228,9 @@
 	}
 
 	/*
-	 * This is a bit messy - we preallocate everything we _might_
-	 * need before we pick up the ici lock. That way we don't have to
-	 * juggle locks and go all the way back to the start.
+	 * Preload the radix tree so we can insert safely under the
+	 * write spinlock.
 	 */
-	new_icl = kmem_zone_alloc(xfs_icluster_zone, KM_SLEEP);
 	if (radix_tree_preload(GFP_KERNEL)) {
 		xfs_idestroy(ip);
 		delay(1);
@@ -242,17 +239,6 @@
 	mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
 	first_index = agino & mask;
 	write_lock(&pag->pag_ici_lock);
-
-	/*
-	 * Find the cluster if it exists
-	 */
-	icl = NULL;
-	if (radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&iq,
-							first_index, 1)) {
-		if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) == first_index)
-			icl = iq->i_cluster;
-	}
-
 	/*
 	 * insert the new inode
 	 */
@@ -267,30 +253,13 @@
 	}
 
 	/*
-	 * These values _must_ be set before releasing ihlock!
+	 * These values _must_ be set before releasing the radix tree lock!
 	 */
 	ip->i_udquot = ip->i_gdquot = NULL;
 	xfs_iflags_set(ip, XFS_INEW);
 
-	ASSERT(ip->i_cluster == NULL);
-
-	if (!icl) {
-		spin_lock_init(&new_icl->icl_lock);
-		INIT_HLIST_HEAD(&new_icl->icl_inodes);
-		icl = new_icl;
-		new_icl = NULL;
-	} else {
-		ASSERT(!hlist_empty(&icl->icl_inodes));
-	}
-	spin_lock(&icl->icl_lock);
-	hlist_add_head(&ip->i_cnode, &icl->icl_inodes);
-	ip->i_cluster = icl;
-	spin_unlock(&icl->icl_lock);
-
 	write_unlock(&pag->pag_ici_lock);
 	radix_tree_preload_end();
-	if (new_icl)
-		kmem_zone_free(xfs_icluster_zone, new_icl);
 
 	/*
 	 * Link ip to its mount and thread it on the mount's inode list.
@@ -529,18 +498,6 @@
 	xfs_put_perag(mp, pag);
 
 	/*
-	 * Remove from cluster list
-	 */
-	mp = ip->i_mount;
-	spin_lock(&ip->i_cluster->icl_lock);
-	hlist_del(&ip->i_cnode);
-	spin_unlock(&ip->i_cluster->icl_lock);
-
-	/* was last inode in cluster? */
-	if (hlist_empty(&ip->i_cluster->icl_inodes))
-		kmem_zone_free(xfs_icluster_zone, ip->i_cluster);
-
-	/*
 	 * Remove from mount's inode list.
 	 */
 	XFS_MOUNT_ILOCK(mp);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index f43a6e0..ca12acb 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -55,7 +55,6 @@
 
 kmem_zone_t *xfs_ifork_zone;
 kmem_zone_t *xfs_inode_zone;
-kmem_zone_t *xfs_icluster_zone;
 
 /*
  * Used in xfs_itruncate().  This is the maximum number of extents
@@ -126,6 +125,90 @@
 #endif
 
 /*
+ * Find the buffer associated with the given inode map
+ * We do basic validation checks on the buffer once it has been
+ * retrieved from disk.
+ */
+STATIC int
+xfs_imap_to_bp(
+	xfs_mount_t	*mp,
+	xfs_trans_t	*tp,
+	xfs_imap_t	*imap,
+	xfs_buf_t	**bpp,
+	uint		buf_flags,
+	uint		imap_flags)
+{
+	int		error;
+	int		i;
+	int		ni;
+	xfs_buf_t	*bp;
+
+	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
+				   (int)imap->im_len, buf_flags, &bp);
+	if (error) {
+		if (error != EAGAIN) {
+			cmn_err(CE_WARN,
+				"xfs_imap_to_bp: xfs_trans_read_buf()returned "
+				"an error %d on %s.  Returning error.",
+				error, mp->m_fsname);
+		} else {
+			ASSERT(buf_flags & XFS_BUF_TRYLOCK);
+		}
+		return error;
+	}
+
+	/*
+	 * Validate the magic number and version of every inode in the buffer
+	 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
+	 */
+#ifdef DEBUG
+	ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog;
+#else	/* usual case */
+	ni = 1;
+#endif
+
+	for (i = 0; i < ni; i++) {
+		int		di_ok;
+		xfs_dinode_t	*dip;
+
+		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
+					(i << mp->m_sb.sb_inodelog));
+		di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC &&
+			    XFS_DINODE_GOOD_VERSION(dip->di_core.di_version);
+		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
+						XFS_ERRTAG_ITOBP_INOTOBP,
+						XFS_RANDOM_ITOBP_INOTOBP))) {
+			if (imap_flags & XFS_IMAP_BULKSTAT) {
+				xfs_trans_brelse(tp, bp);
+				return XFS_ERROR(EINVAL);
+			}
+			XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
+						XFS_ERRLEVEL_HIGH, mp, dip);
+#ifdef DEBUG
+			cmn_err(CE_PANIC,
+					"Device %s - bad inode magic/vsn "
+					"daddr %lld #%d (magic=%x)",
+				XFS_BUFTARG_NAME(mp->m_ddev_targp),
+				(unsigned long long)imap->im_blkno, i,
+				be16_to_cpu(dip->di_core.di_magic));
+#endif
+			xfs_trans_brelse(tp, bp);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+	}
+
+	xfs_inobp_check(mp, bp);
+
+	/*
+	 * Mark the buffer as an inode buffer now that it looks good
+	 */
+	XFS_BUF_SET_VTYPE(bp, B_FS_INO);
+
+	*bpp = bp;
+	return 0;
+}
+
+/*
  * This routine is called to map an inode number within a file
  * system to the buffer containing the on-disk version of the
  * inode.  It returns a pointer to the buffer containing the
@@ -147,72 +230,19 @@
 	xfs_buf_t	**bpp,
 	int		*offset)
 {
-	int		di_ok;
 	xfs_imap_t	imap;
 	xfs_buf_t	*bp;
 	int		error;
-	xfs_dinode_t	*dip;
 
-	/*
-	 * Call the space management code to find the location of the
-	 * inode on disk.
-	 */
 	imap.im_blkno = 0;
 	error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
-	if (error != 0) {
-		cmn_err(CE_WARN,
-	"xfs_inotobp: xfs_imap()  returned an "
-	"error %d on %s.  Returning error.", error, mp->m_fsname);
+	if (error)
 		return error;
-	}
 
-	/*
-	 * If the inode number maps to a block outside the bounds of the
-	 * file system then return NULL rather than calling read_buf
-	 * and panicing when we get an error from the driver.
-	 */
-	if ((imap.im_blkno + imap.im_len) >
-	    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
-		cmn_err(CE_WARN,
-	"xfs_inotobp: inode number (%llu + %d) maps to a block outside the bounds "
-	"of the file system %s.  Returning EINVAL.",
-			(unsigned long long)imap.im_blkno,
-			imap.im_len, mp->m_fsname);
-		return XFS_ERROR(EINVAL);
-	}
-
-	/*
-	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
-	 * default to just a read_buf() call.
-	 */
-	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
-				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
-
-	if (error) {
-		cmn_err(CE_WARN,
-	"xfs_inotobp: xfs_trans_read_buf()  returned an "
-	"error %d on %s.  Returning error.", error, mp->m_fsname);
+	error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, 0);
+	if (error)
 		return error;
-	}
-	dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0);
-	di_ok =
-		be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC &&
-		XFS_DINODE_GOOD_VERSION(dip->di_core.di_version);
-	if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP,
-			XFS_RANDOM_ITOBP_INOTOBP))) {
-		XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip);
-		xfs_trans_brelse(tp, bp);
-		cmn_err(CE_WARN,
-	"xfs_inotobp: XFS_TEST_ERROR()  returned an "
-	"error on %s.  Returning EFSCORRUPTED.",  mp->m_fsname);
-		return XFS_ERROR(EFSCORRUPTED);
-	}
 
-	xfs_inobp_check(mp, bp);
-
-	/*
-	 * Set *dipp to point to the on-disk inode in the buffer.
-	 */
 	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
 	*bpp = bp;
 	*offset = imap.im_boffset;
@@ -248,46 +278,21 @@
 	xfs_dinode_t	**dipp,
 	xfs_buf_t	**bpp,
 	xfs_daddr_t	bno,
-	uint		imap_flags)
+	uint		imap_flags,
+	uint		buf_flags)
 {
 	xfs_imap_t	imap;
 	xfs_buf_t	*bp;
 	int		error;
-	int		i;
-	int		ni;
 
 	if (ip->i_blkno == (xfs_daddr_t)0) {
-		/*
-		 * Call the space management code to find the location of the
-		 * inode on disk.
-		 */
 		imap.im_blkno = bno;
-		if ((error = xfs_imap(mp, tp, ip->i_ino, &imap,
-					XFS_IMAP_LOOKUP | imap_flags)))
+		error = xfs_imap(mp, tp, ip->i_ino, &imap,
+					XFS_IMAP_LOOKUP | imap_flags);
+		if (error)
 			return error;
 
 		/*
-		 * If the inode number maps to a block outside the bounds
-		 * of the file system then return NULL rather than calling
-		 * read_buf and panicing when we get an error from the
-		 * driver.
-		 */
-		if ((imap.im_blkno + imap.im_len) >
-		    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
-#ifdef DEBUG
-			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
-					"(imap.im_blkno (0x%llx) "
-					"+ imap.im_len (0x%llx)) > "
-					" XFS_FSB_TO_BB(mp, "
-					"mp->m_sb.sb_dblocks) (0x%llx)",
-					(unsigned long long) imap.im_blkno,
-					(unsigned long long) imap.im_len,
-					XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
-#endif /* DEBUG */
-			return XFS_ERROR(EINVAL);
-		}
-
-		/*
 		 * Fill in the fields in the inode that will be used to
 		 * map the inode to its buffer from now on.
 		 */
@@ -305,76 +310,17 @@
 	}
 	ASSERT(bno == 0 || bno == imap.im_blkno);
 
-	/*
-	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
-	 * default to just a read_buf() call.
-	 */
-	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
-				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
-	if (error) {
-#ifdef DEBUG
-		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
-				"xfs_trans_read_buf() returned error %d, "
-				"imap.im_blkno 0x%llx, imap.im_len 0x%llx",
-				error, (unsigned long long) imap.im_blkno,
-				(unsigned long long) imap.im_len);
-#endif /* DEBUG */
+	error = xfs_imap_to_bp(mp, tp, &imap, &bp, buf_flags, imap_flags);
+	if (error)
 		return error;
+
+	if (!bp) {
+		ASSERT(buf_flags & XFS_BUF_TRYLOCK);
+		ASSERT(tp == NULL);
+		*bpp = NULL;
+		return EAGAIN;
 	}
 
-	/*
-	 * Validate the magic number and version of every inode in the buffer
-	 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
-	 * No validation is done here in userspace (xfs_repair).
-	 */
-#if !defined(__KERNEL__)
-	ni = 0;
-#elif defined(DEBUG)
-	ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
-#else	/* usual case */
-	ni = 1;
-#endif
-
-	for (i = 0; i < ni; i++) {
-		int		di_ok;
-		xfs_dinode_t	*dip;
-
-		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
-					(i << mp->m_sb.sb_inodelog));
-		di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC &&
-			    XFS_DINODE_GOOD_VERSION(dip->di_core.di_version);
-		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
-						XFS_ERRTAG_ITOBP_INOTOBP,
-						XFS_RANDOM_ITOBP_INOTOBP))) {
-			if (imap_flags & XFS_IMAP_BULKSTAT) {
-				xfs_trans_brelse(tp, bp);
-				return XFS_ERROR(EINVAL);
-			}
-#ifdef DEBUG
-			cmn_err(CE_ALERT,
-					"Device %s - bad inode magic/vsn "
-					"daddr %lld #%d (magic=%x)",
-				XFS_BUFTARG_NAME(mp->m_ddev_targp),
-				(unsigned long long)imap.im_blkno, i,
-				be16_to_cpu(dip->di_core.di_magic));
-#endif
-			XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH,
-					     mp, dip);
-			xfs_trans_brelse(tp, bp);
-			return XFS_ERROR(EFSCORRUPTED);
-		}
-	}
-
-	xfs_inobp_check(mp, bp);
-
-	/*
-	 * Mark the buffer as an inode buffer now that it looks good
-	 */
-	XFS_BUF_SET_VTYPE(bp, B_FS_INO);
-
-	/*
-	 * Set *dipp to point to the on-disk inode in the buffer.
-	 */
 	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
 	*bpp = bp;
 	return 0;
@@ -878,7 +824,7 @@
 	 * return NULL as well.  Set i_blkno to 0 so that xfs_itobp() will
 	 * know that this is a new incore inode.
 	 */
-	error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags);
+	error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags, XFS_BUF_LOCK);
 	if (error) {
 		kmem_zone_free(xfs_inode_zone, ip);
 		return error;
@@ -1518,51 +1464,50 @@
 }
 
 /*
- * Shrink the file to the given new_size.  The new
- * size must be smaller than the current size.
- * This will free up the underlying blocks
- * in the removed range after a call to xfs_itruncate_start()
- * or xfs_atruncate_start().
+ * Shrink the file to the given new_size.  The new size must be smaller than
+ * the current size.  This will free up the underlying blocks in the removed
+ * range after a call to xfs_itruncate_start() or xfs_atruncate_start().
  *
- * The transaction passed to this routine must have made
- * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES.
- * This routine may commit the given transaction and
- * start new ones, so make sure everything involved in
- * the transaction is tidy before calling here.
- * Some transaction will be returned to the caller to be
- * committed.  The incoming transaction must already include
- * the inode, and both inode locks must be held exclusively.
- * The inode must also be "held" within the transaction.  On
- * return the inode will be "held" within the returned transaction.
- * This routine does NOT require any disk space to be reserved
- * for it within the transaction.
+ * The transaction passed to this routine must have made a permanent log
+ * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
+ * given transaction and start new ones, so make sure everything involved in
+ * the transaction is tidy before calling here.  Some transaction will be
+ * returned to the caller to be committed.  The incoming transaction must
+ * already include the inode, and both inode locks must be held exclusively.
+ * The inode must also be "held" within the transaction.  On return the inode
+ * will be "held" within the returned transaction.  This routine does NOT
+ * require any disk space to be reserved for it within the transaction.
  *
- * The fork parameter must be either xfs_attr_fork or xfs_data_fork,
- * and it indicates the fork which is to be truncated.  For the
- * attribute fork we only support truncation to size 0.
+ * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it
+ * indicates the fork which is to be truncated.  For the attribute fork we only
+ * support truncation to size 0.
  *
- * We use the sync parameter to indicate whether or not the first
- * transaction we perform might have to be synchronous.  For the attr fork,
- * it needs to be so if the unlink of the inode is not yet known to be
- * permanent in the log.  This keeps us from freeing and reusing the
- * blocks of the attribute fork before the unlink of the inode becomes
- * permanent.
+ * We use the sync parameter to indicate whether or not the first transaction
+ * we perform might have to be synchronous.  For the attr fork, it needs to be
+ * so if the unlink of the inode is not yet known to be permanent in the log.
+ * This keeps us from freeing and reusing the blocks of the attribute fork
+ * before the unlink of the inode becomes permanent.
  *
- * For the data fork, we normally have to run synchronously if we're
- * being called out of the inactive path or we're being called
- * out of the create path where we're truncating an existing file.
- * Either way, the truncate needs to be sync so blocks don't reappear
- * in the file with altered data in case of a crash.  wsync filesystems
- * can run the first case async because anything that shrinks the inode
- * has to run sync so by the time we're called here from inactive, the
- * inode size is permanently set to 0.
+ * For the data fork, we normally have to run synchronously if we're being
+ * called out of the inactive path or we're being called out of the create path
+ * where we're truncating an existing file.  Either way, the truncate needs to
+ * be sync so blocks don't reappear in the file with altered data in case of a
+ * crash.  wsync filesystems can run the first case async because anything that
+ * shrinks the inode has to run sync so by the time we're called here from
+ * inactive, the inode size is permanently set to 0.
  *
- * Calls from the truncate path always need to be sync unless we're
- * in a wsync filesystem and the file has already been unlinked.
+ * Calls from the truncate path always need to be sync unless we're in a wsync
+ * filesystem and the file has already been unlinked.
  *
- * The caller is responsible for correctly setting the sync parameter.
- * It gets too hard for us to guess here which path we're being called
- * out of just based on inode state.
+ * The caller is responsible for correctly setting the sync parameter.  It gets
+ * too hard for us to guess here which path we're being called out of just
+ * based on inode state.
+ *
+ * If we get an error, we must return with the inode locked and linked into the
+ * current transaction. This keeps things simple for the higher level code,
+ * because it always knows that the inode is locked and held in the transaction
+ * that returns to it whether errors occur or not.  We don't mark the inode
+ * dirty on error so that transactions can be easily aborted if possible.
  */
 int
 xfs_itruncate_finish(
@@ -1741,65 +1686,51 @@
 		 */
 		error = xfs_bmap_finish(tp, &free_list, &committed);
 		ntp = *tp;
+		if (committed) {
+			/* link the inode into the next xact in the chain */
+			xfs_trans_ijoin(ntp, ip,
+					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+			xfs_trans_ihold(ntp, ip);
+		}
+
 		if (error) {
 			/*
-			 * If the bmap finish call encounters an error,
-			 * return to the caller where the transaction
-			 * can be properly aborted.  We just need to
-			 * make sure we're not holding any resources
-			 * that we were not when we came in.
+			 * If the bmap finish call encounters an error, return
+			 * to the caller where the transaction can be properly
+			 * aborted.  We just need to make sure we're not
+			 * holding any resources that we were not when we came
+			 * in.
 			 *
-			 * Aborting from this point might lose some
-			 * blocks in the file system, but oh well.
+			 * Aborting from this point might lose some blocks in
+			 * the file system, but oh well.
 			 */
 			xfs_bmap_cancel(&free_list);
-			if (committed) {
-				/*
-				 * If the passed in transaction committed
-				 * in xfs_bmap_finish(), then we want to
-				 * add the inode to this one before returning.
-				 * This keeps things simple for the higher
-				 * level code, because it always knows that
-				 * the inode is locked and held in the
-				 * transaction that returns to it whether
-				 * errors occur or not.  We don't mark the
-				 * inode dirty so that this transaction can
-				 * be easily aborted if possible.
-				 */
-				xfs_trans_ijoin(ntp, ip,
-					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-				xfs_trans_ihold(ntp, ip);
-			}
 			return error;
 		}
 
 		if (committed) {
 			/*
-			 * The first xact was committed,
-			 * so add the inode to the new one.
-			 * Mark it dirty so it will be logged
-			 * and moved forward in the log as
-			 * part of every commit.
+			 * Mark the inode dirty so it will be logged and
+			 * moved forward in the log as part of every commit.
 			 */
-			xfs_trans_ijoin(ntp, ip,
-					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-			xfs_trans_ihold(ntp, ip);
 			xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
 		}
+
 		ntp = xfs_trans_dup(ntp);
-		(void) xfs_trans_commit(*tp, 0);
+		error = xfs_trans_commit(*tp, 0);
 		*tp = ntp;
-		error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
-					  XFS_TRANS_PERM_LOG_RES,
-					  XFS_ITRUNCATE_LOG_COUNT);
-		/*
-		 * Add the inode being truncated to the next chained
-		 * transaction.
-		 */
+
+		/* link the inode into the next transaction in the chain */
 		xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
 		xfs_trans_ihold(ntp, ip);
+
+		if (!error)
+			error = xfs_trans_reserve(ntp, 0,
+					XFS_ITRUNCATE_LOG_RES(mp), 0,
+					XFS_TRANS_PERM_LOG_RES,
+					XFS_ITRUNCATE_LOG_COUNT);
 		if (error)
-			return (error);
+			return error;
 	}
 	/*
 	 * Only update the size in the case of the data fork, but
@@ -1967,7 +1898,7 @@
 		 * Here we put the head pointer into our next pointer,
 		 * and then we fall through to point the head at us.
 		 */
-		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
+		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
 		if (error)
 			return error;
 
@@ -2075,7 +2006,7 @@
 		 * of dealing with the buffer when there is no need to
 		 * change it.
 		 */
-		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
+		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
 		if (error) {
 			cmn_err(CE_WARN,
 				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
@@ -2137,7 +2068,7 @@
 		 * Now last_ibp points to the buffer previous to us on
 		 * the unlinked list.  Pull us from the list.
 		 */
-		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
+		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
 		if (error) {
 			cmn_err(CE_WARN,
 				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
@@ -2172,13 +2103,6 @@
 	return 0;
 }
 
-STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip)
-{
-	return (((ip->i_itemp == NULL) ||
-		!(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
-		(ip->i_update_core == 0));
-}
-
 STATIC void
 xfs_ifree_cluster(
 	xfs_inode_t	*free_ip,
@@ -2400,7 +2324,7 @@
 
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
-	error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0);
+	error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
 	if (error)
 		return error;
 
@@ -2678,14 +2602,31 @@
 	fsbno = imap->im_blkno ?
 		XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
 	error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
-	if (error != 0) {
+	if (error)
 		return error;
-	}
+
 	imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
 	imap->im_len = XFS_FSB_TO_BB(mp, len);
 	imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
 	imap->im_ioffset = (ushort)off;
 	imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
+
+	/*
+	 * If the inode number maps to a block outside the bounds
+	 * of the file system then return NULL rather than calling
+	 * read_buf and panicing when we get an error from the
+	 * driver.
+	 */
+	if ((imap->im_blkno + imap->im_len) >
+	    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
+		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
+			"(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > "
+			" XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)",
+			(unsigned long long) imap->im_blkno,
+			(unsigned long long) imap->im_len,
+			XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
+		return EINVAL;
+	}
 	return 0;
 }
 
@@ -2826,38 +2767,41 @@
 }
 
 /*
- * This is called to wait for the given inode to be unpinned.
- * It will sleep until this happens.  The caller must have the
- * inode locked in at least shared mode so that the buffer cannot
- * be subsequently pinned once someone is waiting for it to be
- * unpinned.
+ * This is called to unpin an inode. It can be directed to wait or to return
+ * immediately without waiting for the inode to be unpinned.  The caller must
+ * have the inode locked in at least shared mode so that the buffer cannot be
+ * subsequently pinned once someone is waiting for it to be unpinned.
  */
 STATIC void
+__xfs_iunpin_wait(
+	xfs_inode_t	*ip,
+	int		wait)
+{
+	xfs_inode_log_item_t	*iip = ip->i_itemp;
+
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
+	if (atomic_read(&ip->i_pincount) == 0)
+		return;
+
+	/* Give the log a push to start the unpinning I/O */
+	xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ?
+				iip->ili_last_lsn : 0, XFS_LOG_FORCE);
+	if (wait)
+		wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
+}
+
+static inline void
 xfs_iunpin_wait(
 	xfs_inode_t	*ip)
 {
-	xfs_inode_log_item_t	*iip;
-	xfs_lsn_t	lsn;
+	__xfs_iunpin_wait(ip, 1);
+}
 
-	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
-
-	if (atomic_read(&ip->i_pincount) == 0) {
-		return;
-	}
-
-	iip = ip->i_itemp;
-	if (iip && iip->ili_last_lsn) {
-		lsn = iip->ili_last_lsn;
-	} else {
-		lsn = (xfs_lsn_t)0;
-	}
-
-	/*
-	 * Give the log a push so we don't wait here too long.
-	 */
-	xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE);
-
-	wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
+static inline void
+xfs_iunpin_nowait(
+	xfs_inode_t	*ip)
+{
+	__xfs_iunpin_wait(ip, 0);
 }
 
 
@@ -2932,7 +2876,7 @@
  * format indicates the current state of the fork.
  */
 /*ARGSUSED*/
-STATIC int
+STATIC void
 xfs_iflush_fork(
 	xfs_inode_t		*ip,
 	xfs_dinode_t		*dip,
@@ -2953,16 +2897,16 @@
 	static const short	extflag[2] =
 		{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
 
-	if (iip == NULL)
-		return 0;
+	if (!iip)
+		return;
 	ifp = XFS_IFORK_PTR(ip, whichfork);
 	/*
 	 * This can happen if we gave up in iformat in an error path,
 	 * for the attribute fork.
 	 */
-	if (ifp == NULL) {
+	if (!ifp) {
 		ASSERT(whichfork == XFS_ATTR_FORK);
-		return 0;
+		return;
 	}
 	cp = XFS_DFORK_PTR(dip, whichfork);
 	mp = ip->i_mount;
@@ -3023,8 +2967,145 @@
 		ASSERT(0);
 		break;
 	}
+}
 
+STATIC int
+xfs_iflush_cluster(
+	xfs_inode_t	*ip,
+	xfs_buf_t	*bp)
+{
+	xfs_mount_t		*mp = ip->i_mount;
+	xfs_perag_t		*pag = xfs_get_perag(mp, ip->i_ino);
+	unsigned long		first_index, mask;
+	int			ilist_size;
+	xfs_inode_t		**ilist;
+	xfs_inode_t		*iq;
+	int			nr_found;
+	int			clcount = 0;
+	int			bufwasdelwri;
+	int			i;
+
+	ASSERT(pag->pagi_inodeok);
+	ASSERT(pag->pag_ici_init);
+
+	ilist_size = XFS_INODE_CLUSTER_SIZE(mp) * sizeof(xfs_inode_t *);
+	ilist = kmem_alloc(ilist_size, KM_MAYFAIL);
+	if (!ilist)
+		return 0;
+
+	mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
+	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
+	read_lock(&pag->pag_ici_lock);
+	/* really need a gang lookup range call here */
+	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
+					first_index,
+					XFS_INODE_CLUSTER_SIZE(mp));
+	if (nr_found == 0)
+		goto out_free;
+
+	for (i = 0; i < nr_found; i++) {
+		iq = ilist[i];
+		if (iq == ip)
+			continue;
+		/* if the inode lies outside this cluster, we're done. */
+		if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index)
+			break;
+		/*
+		 * Do an un-protected check to see if the inode is dirty and
+		 * is a candidate for flushing.  These checks will be repeated
+		 * later after the appropriate locks are acquired.
+		 */
+		if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
+			continue;
+
+		/*
+		 * Try to get locks.  If any are unavailable or it is pinned,
+		 * then this inode cannot be flushed and is skipped.
+		 */
+
+		if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
+			continue;
+		if (!xfs_iflock_nowait(iq)) {
+			xfs_iunlock(iq, XFS_ILOCK_SHARED);
+			continue;
+		}
+		if (xfs_ipincount(iq)) {
+			xfs_ifunlock(iq);
+			xfs_iunlock(iq, XFS_ILOCK_SHARED);
+			continue;
+		}
+
+		/*
+		 * arriving here means that this inode can be flushed.  First
+		 * re-check that it's dirty before flushing.
+		 */
+		if (!xfs_inode_clean(iq)) {
+			int	error;
+			error = xfs_iflush_int(iq, bp);
+			if (error) {
+				xfs_iunlock(iq, XFS_ILOCK_SHARED);
+				goto cluster_corrupt_out;
+			}
+			clcount++;
+		} else {
+			xfs_ifunlock(iq);
+		}
+		xfs_iunlock(iq, XFS_ILOCK_SHARED);
+	}
+
+	if (clcount) {
+		XFS_STATS_INC(xs_icluster_flushcnt);
+		XFS_STATS_ADD(xs_icluster_flushinode, clcount);
+	}
+
+out_free:
+	read_unlock(&pag->pag_ici_lock);
+	kmem_free(ilist, ilist_size);
 	return 0;
+
+
+cluster_corrupt_out:
+	/*
+	 * Corruption detected in the clustering loop.  Invalidate the
+	 * inode buffer and shut down the filesystem.
+	 */
+	read_unlock(&pag->pag_ici_lock);
+	/*
+	 * Clean up the buffer.  If it was B_DELWRI, just release it --
+	 * brelse can handle it with no problems.  If not, shut down the
+	 * filesystem before releasing the buffer.
+	 */
+	bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp);
+	if (bufwasdelwri)
+		xfs_buf_relse(bp);
+
+	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+
+	if (!bufwasdelwri) {
+		/*
+		 * Just like incore_relse: if we have b_iodone functions,
+		 * mark the buffer as an error and call them.  Otherwise
+		 * mark it as stale and brelse.
+		 */
+		if (XFS_BUF_IODONE_FUNC(bp)) {
+			XFS_BUF_CLR_BDSTRAT_FUNC(bp);
+			XFS_BUF_UNDONE(bp);
+			XFS_BUF_STALE(bp);
+			XFS_BUF_SHUT(bp);
+			XFS_BUF_ERROR(bp,EIO);
+			xfs_biodone(bp);
+		} else {
+			XFS_BUF_STALE(bp);
+			xfs_buf_relse(bp);
+		}
+	}
+
+	/*
+	 * Unlocks the flush lock
+	 */
+	xfs_iflush_abort(iq);
+	kmem_free(ilist, ilist_size);
+	return XFS_ERROR(EFSCORRUPTED);
 }
 
 /*
@@ -3046,11 +3127,7 @@
 	xfs_dinode_t		*dip;
 	xfs_mount_t		*mp;
 	int			error;
-	/* REFERENCED */
-	xfs_inode_t		*iq;
-	int			clcount;	/* count of inodes clustered */
-	int			bufwasdelwri;
-	struct hlist_node	*entry;
+	int			noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
 	enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
 
 	XFS_STATS_INC(xs_iflush_count);
@@ -3067,8 +3144,7 @@
 	 * If the inode isn't dirty, then just release the inode
 	 * flush lock and do nothing.
 	 */
-	if ((ip->i_update_core == 0) &&
-	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
+	if (xfs_inode_clean(ip)) {
 		ASSERT((iip != NULL) ?
 			 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1);
 		xfs_ifunlock(ip);
@@ -3076,11 +3152,21 @@
 	}
 
 	/*
-	 * We can't flush the inode until it is unpinned, so
-	 * wait for it.  We know noone new can pin it, because
-	 * we are holding the inode lock shared and you need
-	 * to hold it exclusively to pin the inode.
+	 * We can't flush the inode until it is unpinned, so wait for it if we
+	 * are allowed to block.  We know noone new can pin it, because we are
+	 * holding the inode lock shared and you need to hold it exclusively to
+	 * pin the inode.
+	 *
+	 * If we are not allowed to block, force the log out asynchronously so
+	 * that when we come back the inode will be unpinned. If other inodes
+	 * in the same cluster are dirty, they will probably write the inode
+	 * out for us if they occur after the log force completes.
 	 */
+	if (noblock && xfs_ipincount(ip)) {
+		xfs_iunpin_nowait(ip);
+		xfs_ifunlock(ip);
+		return EAGAIN;
+	}
 	xfs_iunpin_wait(ip);
 
 	/*
@@ -3097,15 +3183,6 @@
 	}
 
 	/*
-	 * Get the buffer containing the on-disk inode.
-	 */
-	error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0);
-	if (error) {
-		xfs_ifunlock(ip);
-		return error;
-	}
-
-	/*
 	 * Decide how buffer will be flushed out.  This is done before
 	 * the call to xfs_iflush_int because this field is zeroed by it.
 	 */
@@ -3121,6 +3198,7 @@
 		case XFS_IFLUSH_DELWRI_ELSE_SYNC:
 			flags = 0;
 			break;
+		case XFS_IFLUSH_ASYNC_NOBLOCK:
 		case XFS_IFLUSH_ASYNC:
 		case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
 			flags = INT_ASYNC;
@@ -3140,6 +3218,7 @@
 		case XFS_IFLUSH_DELWRI:
 			flags = INT_DELWRI;
 			break;
+		case XFS_IFLUSH_ASYNC_NOBLOCK:
 		case XFS_IFLUSH_ASYNC:
 			flags = INT_ASYNC;
 			break;
@@ -3154,94 +3233,41 @@
 	}
 
 	/*
+	 * Get the buffer containing the on-disk inode.
+	 */
+	error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0,
+				noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
+	if (error || !bp) {
+		xfs_ifunlock(ip);
+		return error;
+	}
+
+	/*
 	 * First flush out the inode that xfs_iflush was called with.
 	 */
 	error = xfs_iflush_int(ip, bp);
-	if (error) {
+	if (error)
 		goto corrupt_out;
-	}
+
+	/*
+	 * If the buffer is pinned then push on the log now so we won't
+	 * get stuck waiting in the write for too long.
+	 */
+	if (XFS_BUF_ISPINNED(bp))
+		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
 
 	/*
 	 * inode clustering:
 	 * see if other inodes can be gathered into this write
 	 */
-	spin_lock(&ip->i_cluster->icl_lock);
-	ip->i_cluster->icl_buf = bp;
-
-	clcount = 0;
-	hlist_for_each_entry(iq, entry, &ip->i_cluster->icl_inodes, i_cnode) {
-		if (iq == ip)
-			continue;
-
-		/*
-		 * Do an un-protected check to see if the inode is dirty and
-		 * is a candidate for flushing.  These checks will be repeated
-		 * later after the appropriate locks are acquired.
-		 */
-		iip = iq->i_itemp;
-		if ((iq->i_update_core == 0) &&
-		    ((iip == NULL) ||
-		     !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
-		      xfs_ipincount(iq) == 0) {
-			continue;
-		}
-
-		/*
-		 * Try to get locks.  If any are unavailable,
-		 * then this inode cannot be flushed and is skipped.
-		 */
-
-		/* get inode locks (just i_lock) */
-		if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) {
-			/* get inode flush lock */
-			if (xfs_iflock_nowait(iq)) {
-				/* check if pinned */
-				if (xfs_ipincount(iq) == 0) {
-					/* arriving here means that
-					 * this inode can be flushed.
-					 * first re-check that it's
-					 * dirty
-					 */
-					iip = iq->i_itemp;
-					if ((iq->i_update_core != 0)||
-					    ((iip != NULL) &&
-					     (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
-						clcount++;
-						error = xfs_iflush_int(iq, bp);
-						if (error) {
-							xfs_iunlock(iq,
-								    XFS_ILOCK_SHARED);
-							goto cluster_corrupt_out;
-						}
-					} else {
-						xfs_ifunlock(iq);
-					}
-				} else {
-					xfs_ifunlock(iq);
-				}
-			}
-			xfs_iunlock(iq, XFS_ILOCK_SHARED);
-		}
-	}
-	spin_unlock(&ip->i_cluster->icl_lock);
-
-	if (clcount) {
-		XFS_STATS_INC(xs_icluster_flushcnt);
-		XFS_STATS_ADD(xs_icluster_flushinode, clcount);
-	}
-
-	/*
-	 * If the buffer is pinned then push on the log so we won't
-	 * get stuck waiting in the write for too long.
-	 */
-	if (XFS_BUF_ISPINNED(bp)){
-		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
-	}
+	error = xfs_iflush_cluster(ip, bp);
+	if (error)
+		goto cluster_corrupt_out;
 
 	if (flags & INT_DELWRI) {
 		xfs_bdwrite(mp, bp);
 	} else if (flags & INT_ASYNC) {
-		xfs_bawrite(mp, bp);
+		error = xfs_bawrite(mp, bp);
 	} else {
 		error = xfs_bwrite(mp, bp);
 	}
@@ -3250,52 +3276,11 @@
 corrupt_out:
 	xfs_buf_relse(bp);
 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-	xfs_iflush_abort(ip);
-	/*
-	 * Unlocks the flush lock
-	 */
-	return XFS_ERROR(EFSCORRUPTED);
-
 cluster_corrupt_out:
-	/* Corruption detected in the clustering loop.  Invalidate the
-	 * inode buffer and shut down the filesystem.
-	 */
-	spin_unlock(&ip->i_cluster->icl_lock);
-
-	/*
-	 * Clean up the buffer.  If it was B_DELWRI, just release it --
-	 * brelse can handle it with no problems.  If not, shut down the
-	 * filesystem before releasing the buffer.
-	 */
-	if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) {
-		xfs_buf_relse(bp);
-	}
-
-	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-
-	if(!bufwasdelwri)  {
-		/*
-		 * Just like incore_relse: if we have b_iodone functions,
-		 * mark the buffer as an error and call them.  Otherwise
-		 * mark it as stale and brelse.
-		 */
-		if (XFS_BUF_IODONE_FUNC(bp)) {
-			XFS_BUF_CLR_BDSTRAT_FUNC(bp);
-			XFS_BUF_UNDONE(bp);
-			XFS_BUF_STALE(bp);
-			XFS_BUF_SHUT(bp);
-			XFS_BUF_ERROR(bp,EIO);
-			xfs_biodone(bp);
-		} else {
-			XFS_BUF_STALE(bp);
-			xfs_buf_relse(bp);
-		}
-	}
-
-	xfs_iflush_abort(iq);
 	/*
 	 * Unlocks the flush lock
 	 */
+	xfs_iflush_abort(ip);
 	return XFS_ERROR(EFSCORRUPTED);
 }
 
@@ -3325,8 +3310,7 @@
 	 * If the inode isn't dirty, then just release the inode
 	 * flush lock and do nothing.
 	 */
-	if ((ip->i_update_core == 0) &&
-	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
+	if (xfs_inode_clean(ip)) {
 		xfs_ifunlock(ip);
 		return 0;
 	}
@@ -3459,16 +3443,9 @@
 		}
 	}
 
-	if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) {
-		goto corrupt_out;
-	}
-
-	if (XFS_IFORK_Q(ip)) {
-		/*
-		 * The only error from xfs_iflush_fork is on the data fork.
-		 */
-		(void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
-	}
+	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
+	if (XFS_IFORK_Q(ip))
+		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
 	xfs_inobp_check(mp, bp);
 
 	/*
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index bfcd72c..93c3769 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -133,19 +133,6 @@
 } dm_attrs_t;
 
 /*
- * This is the xfs inode cluster structure.  This structure is used by
- * xfs_iflush to find inodes that share a cluster and can be flushed to disk at
- * the same time.
- */
-typedef struct xfs_icluster {
-	struct hlist_head	icl_inodes;	/* list of inodes on cluster */
-	xfs_daddr_t		icl_blkno;	/* starting block number of
-						 * the cluster */
-	struct xfs_buf		*icl_buf;	/* the inode buffer */
-	spinlock_t		icl_lock;	/* inode list lock */
-} xfs_icluster_t;
-
-/*
  * This is the xfs in-core inode structure.
  * Most of the on-disk inode is embedded in the i_d field.
  *
@@ -240,10 +227,6 @@
 	atomic_t		i_pincount;	/* inode pin count */
 	wait_queue_head_t	i_ipin_wait;	/* inode pinning wait queue */
 	spinlock_t		i_flags_lock;	/* inode i_flags lock */
-#ifdef HAVE_REFCACHE
-	struct xfs_inode	**i_refcache;	/* ptr to entry in ref cache */
-	struct xfs_inode	*i_release;	/* inode to unref */
-#endif
 	/* Miscellaneous state. */
 	unsigned short		i_flags;	/* see defined flags below */
 	unsigned char		i_update_core;	/* timestamps/size is dirty */
@@ -252,8 +235,6 @@
 	unsigned int		i_delayed_blks;	/* count of delay alloc blks */
 
 	xfs_icdinode_t		i_d;		/* most of ondisk inode */
-	xfs_icluster_t		*i_cluster;	/* cluster list header */
-	struct hlist_node	i_cnode;	/* cluster link node */
 
 	xfs_fsize_t		i_size;		/* in-memory size */
 	xfs_fsize_t		i_new_size;	/* size when write completes */
@@ -461,6 +442,7 @@
 #define	XFS_IFLUSH_SYNC			3
 #define	XFS_IFLUSH_ASYNC		4
 #define	XFS_IFLUSH_DELWRI		5
+#define	XFS_IFLUSH_ASYNC_NOBLOCK	6
 
 /*
  * Flags for xfs_itruncate_start().
@@ -515,7 +497,7 @@
  */
 int		xfs_itobp(struct xfs_mount *, struct xfs_trans *,
 			  xfs_inode_t *, struct xfs_dinode **, struct xfs_buf **,
-			  xfs_daddr_t, uint);
+			  xfs_daddr_t, uint, uint);
 int		xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
 			  xfs_inode_t **, xfs_daddr_t, uint);
 int		xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int);
@@ -597,7 +579,6 @@
 #define	xfs_inobp_check(mp, bp)
 #endif /* DEBUG */
 
-extern struct kmem_zone	*xfs_icluster_zone;
 extern struct kmem_zone	*xfs_ifork_zone;
 extern struct kmem_zone	*xfs_inode_zone;
 extern struct kmem_zone	*xfs_ili_zone;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 2c775b4..93b5db4 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -40,6 +40,7 @@
 #include "xfs_btree.h"
 #include "xfs_ialloc.h"
 #include "xfs_rw.h"
+#include "xfs_error.h"
 
 
 kmem_zone_t	*xfs_ili_zone;		/* inode log item zone */
@@ -813,7 +814,12 @@
 					      XFS_LOG_FORCE);
 			}
 			if (dopush) {
-				xfs_bawrite(mp, bp);
+				int	error;
+				error = xfs_bawrite(mp, bp);
+				if (error)
+					xfs_fs_cmn_err(CE_WARN, mp,
+		"xfs_inode_item_pushbuf: pushbuf error %d on iip %p, bp %p",
+							error, iip, bp);
 			} else {
 				xfs_buf_relse(bp);
 			}
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index bfe92ea..4051307 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -168,6 +168,14 @@
 	return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT);
 }
 
+static inline int xfs_inode_clean(xfs_inode_t *ip)
+{
+	return (!ip->i_itemp ||
+		!(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
+	       !ip->i_update_core;
+}
+
+
 #ifdef __KERNEL__
 
 extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index fde37f8..fb3cf11 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -802,8 +802,11 @@
 			 */
 			nimaps = 1;
 			end_fsb = XFS_B_TO_FSB(mp, ip->i_size);
-			xfs_bmap_last_offset(NULL, ip, &last_block,
-				XFS_DATA_FORK);
+			error = xfs_bmap_last_offset(NULL, ip, &last_block,
+							XFS_DATA_FORK);
+			if (error)
+				goto trans_cancel;
+
 			last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
 			if ((map_start_fsb + count_fsb) > last_block) {
 				count_fsb = last_block - map_start_fsb;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f615e04..eb85bde 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -129,7 +129,7 @@
 	return error;
 }
 
-STATIC int
+STATIC void
 xfs_bulkstat_one_dinode(
 	xfs_mount_t	*mp,		/* mount point for filesystem */
 	xfs_ino_t	ino,		/* inode number to get data for */
@@ -198,8 +198,6 @@
 		buf->bs_blocks = be64_to_cpu(dic->di_nblocks);
 		break;
 	}
-
-	return 0;
 }
 
 STATIC int
@@ -614,7 +612,8 @@
 							xfs_buf_relse(bp);
 						error = xfs_itobp(mp, NULL, ip,
 								&dip, &bp, bno,
-								XFS_IMAP_BULKSTAT);
+								XFS_IMAP_BULKSTAT,
+								XFS_BUF_LOCK);
 						if (!error)
 							clustidx = ip->i_boffset / mp->m_sb.sb_inodesize;
 						kmem_zone_free(xfs_inode_zone, ip);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 31f2b04..afaee30 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -41,6 +41,7 @@
 #include "xfs_inode.h"
 #include "xfs_rw.h"
 
+kmem_zone_t	*xfs_log_ticket_zone;
 
 #define xlog_write_adv_cnt(ptr, len, off, bytes) \
 	{ (ptr) += (bytes); \
@@ -73,8 +74,6 @@
 				       xlog_ticket_t	*ticket,
 				       int		*continued_write,
 				       int		*logoffsetp);
-STATIC void xlog_state_put_ticket(xlog_t	*log,
-				  xlog_ticket_t *tic);
 STATIC int  xlog_state_release_iclog(xlog_t		*log,
 				     xlog_in_core_t	*iclog);
 STATIC void xlog_state_switch_iclogs(xlog_t		*log,
@@ -101,7 +100,6 @@
 
 
 /* local ticket functions */
-STATIC void		xlog_state_ticket_alloc(xlog_t *log);
 STATIC xlog_ticket_t	*xlog_ticket_get(xlog_t *log,
 					 int	unit_bytes,
 					 int	count,
@@ -330,7 +328,7 @@
 		 */
 		xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
 		xlog_ungrant_log_space(log, ticket);
-		xlog_state_put_ticket(log, ticket);
+		xlog_ticket_put(log, ticket);
 	} else {
 		xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
 		xlog_regrant_reserve_log_space(log, ticket);
@@ -384,7 +382,27 @@
 		return xlog_state_sync_all(log, flags, log_flushed);
 	else
 		return xlog_state_sync(log, lsn, flags, log_flushed);
-}	/* xfs_log_force */
+}	/* _xfs_log_force */
+
+/*
+ * Wrapper for _xfs_log_force(), to be used when caller doesn't care
+ * about errors or whether the log was flushed or not. This is the normal
+ * interface to use when trying to unpin items or move the log forward.
+ */
+void
+xfs_log_force(
+	xfs_mount_t	*mp,
+	xfs_lsn_t	lsn,
+	uint		flags)
+{
+	int	error;
+	error = _xfs_log_force(mp, lsn, flags, NULL);
+	if (error) {
+		xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
+			"error %d returned.", error);
+	}
+}
+
 
 /*
  * Attaches a new iclog I/O completion callback routine during
@@ -397,12 +415,10 @@
 	       void		  *iclog_hndl,	/* iclog to hang callback off */
 	       xfs_log_callback_t *cb)
 {
-	xlog_t *log = mp->m_log;
 	xlog_in_core_t	  *iclog = (xlog_in_core_t *)iclog_hndl;
 	int	abortflg;
 
-	cb->cb_next = NULL;
-	spin_lock(&log->l_icloglock);
+	spin_lock(&iclog->ic_callback_lock);
 	abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
 	if (!abortflg) {
 		ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
@@ -411,7 +427,7 @@
 		*(iclog->ic_callback_tail) = cb;
 		iclog->ic_callback_tail = &(cb->cb_next);
 	}
-	spin_unlock(&log->l_icloglock);
+	spin_unlock(&iclog->ic_callback_lock);
 	return abortflg;
 }	/* xfs_log_notify */
 
@@ -471,6 +487,8 @@
 		/* may sleep if need to allocate more tickets */
 		internal_ticket = xlog_ticket_get(log, unit_bytes, cnt,
 						  client, flags);
+		if (!internal_ticket)
+			return XFS_ERROR(ENOMEM);
 		internal_ticket->t_trans_type = t_type;
 		*ticket = internal_ticket;
 		xlog_trace_loggrant(log, internal_ticket, 
@@ -636,7 +654,8 @@
 	if (mp->m_flags & XFS_MOUNT_RDONLY)
 		return 0;
 
-	xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC);
+	error = _xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC, NULL);
+	ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
 
 #ifdef DEBUG
 	first_iclog = iclog = log->l_iclog;
@@ -675,10 +694,10 @@
 
 		spin_lock(&log->l_icloglock);
 		iclog = log->l_iclog;
-		iclog->ic_refcnt++;
+		atomic_inc(&iclog->ic_refcnt);
 		spin_unlock(&log->l_icloglock);
 		xlog_state_want_sync(log, iclog);
-		(void) xlog_state_release_iclog(log, iclog);
+		error = xlog_state_release_iclog(log, iclog);
 
 		spin_lock(&log->l_icloglock);
 		if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
@@ -695,7 +714,7 @@
 		if (tic) {
 			xlog_trace_loggrant(log, tic, "unmount rec");
 			xlog_ungrant_log_space(log, tic);
-			xlog_state_put_ticket(log, tic);
+			xlog_ticket_put(log, tic);
 		}
 	} else {
 		/*
@@ -713,11 +732,11 @@
 		 */
 		spin_lock(&log->l_icloglock);
 		iclog = log->l_iclog;
-		iclog->ic_refcnt++;
+		atomic_inc(&iclog->ic_refcnt);
 		spin_unlock(&log->l_icloglock);
 
 		xlog_state_want_sync(log, iclog);
-		(void) xlog_state_release_iclog(log, iclog);
+		error =  xlog_state_release_iclog(log, iclog);
 
 		spin_lock(&log->l_icloglock);
 
@@ -732,7 +751,7 @@
 		}
 	}
 
-	return 0;
+	return error;
 }	/* xfs_log_unmount_write */
 
 /*
@@ -1210,7 +1229,6 @@
 	spin_lock_init(&log->l_icloglock);
 	spin_lock_init(&log->l_grant_lock);
 	initnsema(&log->l_flushsema, 0, "ic-flush");
-	xlog_state_ticket_alloc(log);  /* wait until after icloglock inited */
 
 	/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
 	ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1240,9 +1258,9 @@
 		XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
 		iclog->ic_bp = bp;
 		iclog->hic_data = bp->b_addr;
-
+#ifdef DEBUG
 		log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
-
+#endif
 		head = &iclog->ic_header;
 		memset(head, 0, sizeof(xlog_rec_header_t));
 		head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
@@ -1253,10 +1271,11 @@
 		head->h_fmt = cpu_to_be32(XLOG_FMT);
 		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
 
-
 		iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize;
 		iclog->ic_state = XLOG_STATE_ACTIVE;
 		iclog->ic_log = log;
+		atomic_set(&iclog->ic_refcnt, 0);
+		spin_lock_init(&iclog->ic_callback_lock);
 		iclog->ic_callback_tail = &(iclog->ic_callback);
 		iclog->ic_datap = (char *)iclog->hic_data + log->l_iclog_hsize;
 
@@ -1405,7 +1424,7 @@
 	int		v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
 
 	XFS_STATS_INC(xs_log_writes);
-	ASSERT(iclog->ic_refcnt == 0);
+	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
 
 	/* Add for LR header */
 	count_init = log->l_iclog_hsize + iclog->ic_offset;
@@ -1538,7 +1557,6 @@
 xlog_dealloc_log(xlog_t *log)
 {
 	xlog_in_core_t	*iclog, *next_iclog;
-	xlog_ticket_t	*tic, *next_tic;
 	int		i;
 
 	iclog = log->l_iclog;
@@ -1559,22 +1577,6 @@
 	spinlock_destroy(&log->l_icloglock);
 	spinlock_destroy(&log->l_grant_lock);
 
-	/* XXXsup take a look at this again. */
-	if ((log->l_ticket_cnt != log->l_ticket_tcnt)  &&
-	    !XLOG_FORCED_SHUTDOWN(log)) {
-		xfs_fs_cmn_err(CE_WARN, log->l_mp,
-			"xlog_dealloc_log: (cnt: %d, total: %d)",
-			log->l_ticket_cnt, log->l_ticket_tcnt);
-		/* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */
-
-	} else {
-		tic = log->l_unmount_free;
-		while (tic) {
-			next_tic = tic->t_next;
-			kmem_free(tic, PAGE_SIZE);
-			tic = next_tic;
-		}
-	}
 	xfs_buf_free(log->l_xbuf);
 #ifdef XFS_LOG_TRACE
 	if (log->l_trace != NULL) {
@@ -1987,7 +1989,7 @@
 		if (iclog->ic_state == XLOG_STATE_DIRTY) {
 			iclog->ic_state	= XLOG_STATE_ACTIVE;
 			iclog->ic_offset       = 0;
-			iclog->ic_callback	= NULL;   /* don't need to free */
+			ASSERT(iclog->ic_callback == NULL);
 			/*
 			 * If the number of ops in this iclog indicate it just
 			 * contains the dummy transaction, we can
@@ -2190,37 +2192,40 @@
 					be64_to_cpu(iclog->ic_header.h_lsn);
 				spin_unlock(&log->l_grant_lock);
 
-				/*
-				 * Keep processing entries in the callback list
-				 * until we come around and it is empty.  We
-				 * need to atomically see that the list is
-				 * empty and change the state to DIRTY so that
-				 * we don't miss any more callbacks being added.
-				 */
-				spin_lock(&log->l_icloglock);
 			} else {
+				spin_unlock(&log->l_icloglock);
 				ioerrors++;
 			}
-			cb = iclog->ic_callback;
 
+			/*
+			 * Keep processing entries in the callback list until
+			 * we come around and it is empty.  We need to
+			 * atomically see that the list is empty and change the
+			 * state to DIRTY so that we don't miss any more
+			 * callbacks being added.
+			 */
+			spin_lock(&iclog->ic_callback_lock);
+			cb = iclog->ic_callback;
 			while (cb) {
 				iclog->ic_callback_tail = &(iclog->ic_callback);
 				iclog->ic_callback = NULL;
-				spin_unlock(&log->l_icloglock);
+				spin_unlock(&iclog->ic_callback_lock);
 
 				/* perform callbacks in the order given */
 				for (; cb; cb = cb_next) {
 					cb_next = cb->cb_next;
 					cb->cb_func(cb->cb_arg, aborted);
 				}
-				spin_lock(&log->l_icloglock);
+				spin_lock(&iclog->ic_callback_lock);
 				cb = iclog->ic_callback;
 			}
 
 			loopdidcallbacks++;
 			funcdidcallbacks++;
 
+			spin_lock(&log->l_icloglock);
 			ASSERT(iclog->ic_callback == NULL);
+			spin_unlock(&iclog->ic_callback_lock);
 			if (!(iclog->ic_state & XLOG_STATE_IOERROR))
 				iclog->ic_state = XLOG_STATE_DIRTY;
 
@@ -2241,7 +2246,7 @@
 			repeats = 0;
 			xfs_fs_cmn_err(CE_WARN, log->l_mp,
 				"%s: possible infinite loop (%d iterations)",
-				__FUNCTION__, flushcnt);
+				__func__, flushcnt);
 		}
 	} while (!ioerrors && loopdidcallbacks);
 
@@ -2309,7 +2314,7 @@
 
 	ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
 	       iclog->ic_state == XLOG_STATE_IOERROR);
-	ASSERT(iclog->ic_refcnt == 0);
+	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
 	ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
 
 
@@ -2391,7 +2396,7 @@
 	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
 	head = &iclog->ic_header;
 
-	iclog->ic_refcnt++;			/* prevents sync */
+	atomic_inc(&iclog->ic_refcnt);	/* prevents sync */
 	log_offset = iclog->ic_offset;
 
 	/* On the 1st write to an iclog, figure out lsn.  This works
@@ -2423,12 +2428,12 @@
 		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
 
 		/* If I'm the only one writing to this iclog, sync it to disk */
-		if (iclog->ic_refcnt == 1) {
+		if (atomic_read(&iclog->ic_refcnt) == 1) {
 			spin_unlock(&log->l_icloglock);
 			if ((error = xlog_state_release_iclog(log, iclog)))
 				return error;
 		} else {
-			iclog->ic_refcnt--;
+			atomic_dec(&iclog->ic_refcnt);
 			spin_unlock(&log->l_icloglock);
 		}
 		goto restart;
@@ -2792,18 +2797,6 @@
 
 
 /*
- * Atomically put back used ticket.
- */
-STATIC void
-xlog_state_put_ticket(xlog_t	    *log,
-		      xlog_ticket_t *tic)
-{
-	spin_lock(&log->l_icloglock);
-	xlog_ticket_put(log, tic);
-	spin_unlock(&log->l_icloglock);
-}	/* xlog_state_put_ticket */
-
-/*
  * Flush iclog to disk if this is the last reference to the given iclog and
  * the WANT_SYNC bit is set.
  *
@@ -2813,33 +2806,35 @@
  *
  */
 STATIC int
-xlog_state_release_iclog(xlog_t		*log,
-			 xlog_in_core_t	*iclog)
+xlog_state_release_iclog(
+	xlog_t		*log,
+	xlog_in_core_t	*iclog)
 {
 	int		sync = 0;	/* do we sync? */
 
-	xlog_assign_tail_lsn(log->l_mp);
+	if (iclog->ic_state & XLOG_STATE_IOERROR)
+		return XFS_ERROR(EIO);
 
-	spin_lock(&log->l_icloglock);
+	ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
+	if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
+		return 0;
 
 	if (iclog->ic_state & XLOG_STATE_IOERROR) {
 		spin_unlock(&log->l_icloglock);
 		return XFS_ERROR(EIO);
 	}
-
-	ASSERT(iclog->ic_refcnt > 0);
 	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
 	       iclog->ic_state == XLOG_STATE_WANT_SYNC);
 
-	if (--iclog->ic_refcnt == 0 &&
-	    iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+	if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+		/* update tail before writing to iclog */
+		xlog_assign_tail_lsn(log->l_mp);
 		sync++;
 		iclog->ic_state = XLOG_STATE_SYNCING;
 		iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
 		xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
 		/* cycle incremented when incrementing curr_block */
 	}
-
 	spin_unlock(&log->l_icloglock);
 
 	/*
@@ -2849,11 +2844,9 @@
 	 * this iclog has consistent data, so we ignore IOERROR
 	 * flags after this point.
 	 */
-	if (sync) {
+	if (sync)
 		return xlog_sync(log, iclog);
-	}
 	return 0;
-
 }	/* xlog_state_release_iclog */
 
 
@@ -2953,7 +2946,8 @@
 		 * previous iclog and go to sleep.
 		 */
 		if (iclog->ic_state == XLOG_STATE_DIRTY ||
-		    (iclog->ic_refcnt == 0 && iclog->ic_offset == 0)) {
+		    (atomic_read(&iclog->ic_refcnt) == 0
+		     && iclog->ic_offset == 0)) {
 			iclog = iclog->ic_prev;
 			if (iclog->ic_state == XLOG_STATE_ACTIVE ||
 			    iclog->ic_state == XLOG_STATE_DIRTY)
@@ -2961,14 +2955,14 @@
 			else
 				goto maybe_sleep;
 		} else {
-			if (iclog->ic_refcnt == 0) {
+			if (atomic_read(&iclog->ic_refcnt) == 0) {
 				/* We are the only one with access to this
 				 * iclog.  Flush it out now.  There should
 				 * be a roundoff of zero to show that someone
 				 * has already taken care of the roundoff from
 				 * the previous sync.
 				 */
-				iclog->ic_refcnt++;
+				atomic_inc(&iclog->ic_refcnt);
 				lsn = be64_to_cpu(iclog->ic_header.h_lsn);
 				xlog_state_switch_iclogs(log, iclog, 0);
 				spin_unlock(&log->l_icloglock);
@@ -3100,7 +3094,7 @@
 			already_slept = 1;
 			goto try_again;
 		} else {
-			iclog->ic_refcnt++;
+			atomic_inc(&iclog->ic_refcnt);
 			xlog_state_switch_iclogs(log, iclog, 0);
 			spin_unlock(&log->l_icloglock);
 			if (xlog_state_release_iclog(log, iclog))
@@ -3172,92 +3166,19 @@
  */
 
 /*
- *	Algorithm doesn't take into account page size. ;-(
- */
-STATIC void
-xlog_state_ticket_alloc(xlog_t *log)
-{
-	xlog_ticket_t	*t_list;
-	xlog_ticket_t	*next;
-	xfs_caddr_t	buf;
-	uint		i = (PAGE_SIZE / sizeof(xlog_ticket_t)) - 2;
-
-	/*
-	 * The kmem_zalloc may sleep, so we shouldn't be holding the
-	 * global lock.  XXXmiken: may want to use zone allocator.
-	 */
-	buf = (xfs_caddr_t) kmem_zalloc(PAGE_SIZE, KM_SLEEP);
-
-	spin_lock(&log->l_icloglock);
-
-	/* Attach 1st ticket to Q, so we can keep track of allocated memory */
-	t_list = (xlog_ticket_t *)buf;
-	t_list->t_next = log->l_unmount_free;
-	log->l_unmount_free = t_list++;
-	log->l_ticket_cnt++;
-	log->l_ticket_tcnt++;
-
-	/* Next ticket becomes first ticket attached to ticket free list */
-	if (log->l_freelist != NULL) {
-		ASSERT(log->l_tail != NULL);
-		log->l_tail->t_next = t_list;
-	} else {
-		log->l_freelist = t_list;
-	}
-	log->l_ticket_cnt++;
-	log->l_ticket_tcnt++;
-
-	/* Cycle through rest of alloc'ed memory, building up free Q */
-	for ( ; i > 0; i--) {
-		next = t_list + 1;
-		t_list->t_next = next;
-		t_list = next;
-		log->l_ticket_cnt++;
-		log->l_ticket_tcnt++;
-	}
-	t_list->t_next = NULL;
-	log->l_tail = t_list;
-	spin_unlock(&log->l_icloglock);
-}	/* xlog_state_ticket_alloc */
-
-
-/*
- * Put ticket into free list
- *
- * Assumption: log lock is held around this call.
+ * Free a used ticket.
  */
 STATIC void
 xlog_ticket_put(xlog_t		*log,
 		xlog_ticket_t	*ticket)
 {
 	sv_destroy(&ticket->t_sema);
-
-	/*
-	 * Don't think caching will make that much difference.  It's
-	 * more important to make debug easier.
-	 */
-#if 0
-	/* real code will want to use LIFO for caching */
-	ticket->t_next = log->l_freelist;
-	log->l_freelist = ticket;
-	/* no need to clear fields */
-#else
-	/* When we debug, it is easier if tickets are cycled */
-	ticket->t_next     = NULL;
-	if (log->l_tail) {
-		log->l_tail->t_next = ticket;
-	} else {
-		ASSERT(log->l_freelist == NULL);
-		log->l_freelist = ticket;
-	}
-	log->l_tail	    = ticket;
-#endif /* DEBUG */
-	log->l_ticket_cnt++;
+	kmem_zone_free(xfs_log_ticket_zone, ticket);
 }	/* xlog_ticket_put */
 
 
 /*
- * Grab ticket off freelist or allocation some more
+ * Allocate and initialise a new log ticket.
  */
 STATIC xlog_ticket_t *
 xlog_ticket_get(xlog_t		*log,
@@ -3269,21 +3190,9 @@
 	xlog_ticket_t	*tic;
 	uint		num_headers;
 
- alloc:
-	if (log->l_freelist == NULL)
-		xlog_state_ticket_alloc(log);		/* potentially sleep */
-
-	spin_lock(&log->l_icloglock);
-	if (log->l_freelist == NULL) {
-		spin_unlock(&log->l_icloglock);
-		goto alloc;
-	}
-	tic		= log->l_freelist;
-	log->l_freelist	= tic->t_next;
-	if (log->l_freelist == NULL)
-		log->l_tail = NULL;
-	log->l_ticket_cnt--;
-	spin_unlock(&log->l_icloglock);
+	tic = kmem_zone_zalloc(xfs_log_ticket_zone, KM_SLEEP|KM_MAYFAIL);
+	if (!tic)
+		return NULL;
 
 	/*
 	 * Permanent reservations have up to 'cnt'-1 active log operations
@@ -3611,8 +3520,8 @@
 	 * before we mark the filesystem SHUTDOWN and wake
 	 * everybody up to tell the bad news.
 	 */
-	spin_lock(&log->l_grant_lock);
 	spin_lock(&log->l_icloglock);
+	spin_lock(&log->l_grant_lock);
 	mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
 	XFS_BUF_DONE(mp->m_sb_bp);
 	/*
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 4cdac04..d1d678e 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -142,8 +142,9 @@
 			 xfs_lsn_t	lsn,
 			 uint		flags,
 			 int		*log_forced);
-#define xfs_log_force(mp, lsn, flags) \
-	_xfs_log_force(mp, lsn, flags, NULL);
+void	  xfs_log_force(struct xfs_mount	*mp,
+			xfs_lsn_t		lsn,
+			uint			flags);
 int	  xfs_log_mount(struct xfs_mount	*mp,
 			struct xfs_buftarg	*log_target,
 			xfs_daddr_t		start_block,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index c6244cc..8952a39 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -242,7 +242,7 @@
 
 typedef struct xlog_ticket {
 	sv_t		   t_sema;	 /* sleep on this semaphore      : 20 */
- 	struct xlog_ticket *t_next;	 /*			         :4|8 */
+	struct xlog_ticket *t_next;	 /*			         :4|8 */
 	struct xlog_ticket *t_prev;	 /*				 :4|8 */
 	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
 	int		   t_curr_res;	 /* current reservation in bytes : 4  */
@@ -324,6 +324,19 @@
  * - ic_offset is the current number of bytes written to in this iclog.
  * - ic_refcnt is bumped when someone is writing to the log.
  * - ic_state is the state of the iclog.
+ *
+ * Because of cacheline contention on large machines, we need to separate
+ * various resources onto different cachelines. To start with, make the
+ * structure cacheline aligned. The following fields can be contended on
+ * by independent processes:
+ *
+ *	- ic_callback_*
+ *	- ic_refcnt
+ *	- fields protected by the global l_icloglock
+ *
+ * so we need to ensure that these fields are located in separate cachelines.
+ * We'll put all the read-only and l_icloglock fields in the first cacheline,
+ * and move everything else out to subsequent cachelines.
  */
 typedef struct xlog_iclog_fields {
 	sv_t			ic_forcesema;
@@ -332,17 +345,22 @@
 	struct xlog_in_core	*ic_prev;
 	struct xfs_buf		*ic_bp;
 	struct log		*ic_log;
-	xfs_log_callback_t	*ic_callback;
-	xfs_log_callback_t	**ic_callback_tail;
-#ifdef XFS_LOG_TRACE
-	struct ktrace		*ic_trace;
-#endif
 	int			ic_size;
 	int			ic_offset;
-	int			ic_refcnt;
 	int			ic_bwritecnt;
 	ushort_t		ic_state;
 	char			*ic_datap;	/* pointer to iclog data */
+#ifdef XFS_LOG_TRACE
+	struct ktrace		*ic_trace;
+#endif
+
+	/* Callback structures need their own cacheline */
+	spinlock_t		ic_callback_lock ____cacheline_aligned_in_smp;
+	xfs_log_callback_t	*ic_callback;
+	xfs_log_callback_t	**ic_callback_tail;
+
+	/* reference counts need their own cacheline */
+	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
 } xlog_iclog_fields_t;
 
 typedef union xlog_in_core2 {
@@ -366,6 +384,7 @@
 #define	ic_bp		hic_fields.ic_bp
 #define	ic_log		hic_fields.ic_log
 #define	ic_callback	hic_fields.ic_callback
+#define	ic_callback_lock hic_fields.ic_callback_lock
 #define	ic_callback_tail hic_fields.ic_callback_tail
 #define	ic_trace	hic_fields.ic_trace
 #define	ic_size		hic_fields.ic_size
@@ -383,55 +402,11 @@
  * that round off problems won't occur when releasing partial reservations.
  */
 typedef struct log {
-	/* The following block of fields are changed while holding icloglock */
-	sema_t			l_flushsema;    /* iclog flushing semaphore */
-	int			l_flushcnt;	/* # of procs waiting on this
-						 * sema */
-	int			l_ticket_cnt;	/* free ticket count */
-	int			l_ticket_tcnt;	/* total ticket count */
-	int			l_covered_state;/* state of "covering disk
-						 * log entries" */
-	xlog_ticket_t		*l_freelist;    /* free list of tickets */
-	xlog_ticket_t		*l_unmount_free;/* kmem_free these addresses */
-	xlog_ticket_t		*l_tail;        /* free list of tickets */
-	xlog_in_core_t		*l_iclog;       /* head log queue	*/
-	spinlock_t		l_icloglock;    /* grab to change iclog state */
-	xfs_lsn_t		l_tail_lsn;     /* lsn of 1st LR with unflushed
-						 * buffers */
-	xfs_lsn_t		l_last_sync_lsn;/* lsn of last LR on disk */
+	/* The following fields don't need locking */
 	struct xfs_mount	*l_mp;	        /* mount point */
 	struct xfs_buf		*l_xbuf;        /* extra buffer for log
 						 * wrapping */
 	struct xfs_buftarg	*l_targ;        /* buftarg of log */
-	xfs_daddr_t		l_logBBstart;   /* start block of log */
-	int			l_logsize;      /* size of log in bytes */
-	int			l_logBBsize;    /* size of log in BB chunks */
-	int			l_curr_cycle;   /* Cycle number of log writes */
-	int			l_prev_cycle;   /* Cycle number before last
-						 * block increment */
-	int			l_curr_block;   /* current logical log block */
-	int			l_prev_block;   /* previous logical log block */
-	int			l_iclog_size;	/* size of log in bytes */
-	int			l_iclog_size_log; /* log power size of log */
-	int			l_iclog_bufs;	/* number of iclog buffers */
-
-	/* The following field are used for debugging; need to hold icloglock */
-	char			*l_iclog_bak[XLOG_MAX_ICLOGS];
-
-	/* The following block of fields are changed while holding grant_lock */
-	spinlock_t		l_grant_lock;
-	xlog_ticket_t		*l_reserve_headq;
-	xlog_ticket_t		*l_write_headq;
-	int			l_grant_reserve_cycle;
-	int			l_grant_reserve_bytes;
-	int			l_grant_write_cycle;
-	int			l_grant_write_bytes;
-
-	/* The following fields don't need locking */
-#ifdef XFS_LOG_TRACE
-	struct ktrace		*l_trace;
-	struct ktrace		*l_grant_trace;
-#endif
 	uint			l_flags;
 	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
 	struct xfs_buf_cancel	**l_buf_cancel_table;
@@ -440,6 +415,50 @@
 	uint			l_sectbb_log;   /* log2 of sector size in BBs */
 	uint			l_sectbb_mask;  /* sector size (in BBs)
 						 * alignment mask */
+	int			l_iclog_size;	/* size of log in bytes */
+	int			l_iclog_size_log; /* log power size of log */
+	int			l_iclog_bufs;	/* number of iclog buffers */
+	xfs_daddr_t		l_logBBstart;   /* start block of log */
+	int			l_logsize;      /* size of log in bytes */
+	int			l_logBBsize;    /* size of log in BB chunks */
+
+	/* The following block of fields are changed while holding icloglock */
+	sema_t			l_flushsema ____cacheline_aligned_in_smp;
+						/* iclog flushing semaphore */
+	int			l_flushcnt;	/* # of procs waiting on this
+						 * sema */
+	int			l_covered_state;/* state of "covering disk
+						 * log entries" */
+	xlog_in_core_t		*l_iclog;       /* head log queue	*/
+	spinlock_t		l_icloglock;    /* grab to change iclog state */
+	xfs_lsn_t		l_tail_lsn;     /* lsn of 1st LR with unflushed
+						 * buffers */
+	xfs_lsn_t		l_last_sync_lsn;/* lsn of last LR on disk */
+	int			l_curr_cycle;   /* Cycle number of log writes */
+	int			l_prev_cycle;   /* Cycle number before last
+						 * block increment */
+	int			l_curr_block;   /* current logical log block */
+	int			l_prev_block;   /* previous logical log block */
+
+	/* The following block of fields are changed while holding grant_lock */
+	spinlock_t		l_grant_lock ____cacheline_aligned_in_smp;
+	xlog_ticket_t		*l_reserve_headq;
+	xlog_ticket_t		*l_write_headq;
+	int			l_grant_reserve_cycle;
+	int			l_grant_reserve_bytes;
+	int			l_grant_write_cycle;
+	int			l_grant_write_bytes;
+
+#ifdef XFS_LOG_TRACE
+	struct ktrace		*l_trace;
+	struct ktrace		*l_grant_trace;
+#endif
+
+	/* The following field are used for debugging; need to hold icloglock */
+#ifdef DEBUG
+	char			*l_iclog_bak[XLOG_MAX_ICLOGS];
+#endif
+
 } xlog_t;
 
 #define XLOG_FORCED_SHUTDOWN(log)	((log)->l_flags & XLOG_IO_ERROR)
@@ -459,6 +478,8 @@
 extern void	 xlog_put_bp(struct xfs_buf *);
 extern int	 xlog_bread(xlog_t *, xfs_daddr_t, int, struct xfs_buf *);
 
+extern kmem_zone_t	*xfs_log_ticket_zone;
+
 /* iclog tracing */
 #define XLOG_TRACE_GRAB_FLUSH  1
 #define XLOG_TRACE_REL_FLUSH   2
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b2b70eb..e65ab4a 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -46,6 +46,7 @@
 #include "xfs_trans_priv.h"
 #include "xfs_quota.h"
 #include "xfs_rw.h"
+#include "xfs_utils.h"
 
 STATIC int	xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
 STATIC int	xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
@@ -120,7 +121,8 @@
 	XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
 
 	xfsbdstrat(log->l_mp, bp);
-	if ((error = xfs_iowait(bp)))
+	error = xfs_iowait(bp);
+	if (error)
 		xfs_ioerror_alert("xlog_bread", log->l_mp,
 				  bp, XFS_BUF_ADDR(bp));
 	return error;
@@ -191,7 +193,7 @@
 {
 	int			b;
 
-	cmn_err(CE_DEBUG, "%s:  SB : uuid = ", __FUNCTION__);
+	cmn_err(CE_DEBUG, "%s:  SB : uuid = ", __func__);
 	for (b = 0; b < 16; b++)
 		cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]);
 	cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
@@ -1160,10 +1162,14 @@
 		if (j == 0 && (start_block + endcount > ealign)) {
 			offset = XFS_BUF_PTR(bp);
 			balign = BBTOB(ealign - start_block);
-			XFS_BUF_SET_PTR(bp, offset + balign, BBTOB(sectbb));
-			if ((error = xlog_bread(log, ealign, sectbb, bp)))
+			error = XFS_BUF_SET_PTR(bp, offset + balign,
+						BBTOB(sectbb));
+			if (!error)
+				error = xlog_bread(log, ealign, sectbb, bp);
+			if (!error)
+				error = XFS_BUF_SET_PTR(bp, offset, bufblks);
+			if (error)
 				break;
-			XFS_BUF_SET_PTR(bp, offset, bufblks);
 		}
 
 		offset = xlog_align(log, start_block, endcount, bp);
@@ -2280,7 +2286,9 @@
 		 * invalidate the buffer when we write it out below.
 		 */
 		imap.im_blkno = 0;
-		xfs_imap(log->l_mp, NULL, ino, &imap, 0);
+		error = xfs_imap(log->l_mp, NULL, ino, &imap, 0);
+		if (error)
+			goto error;
 	}
 
 	/*
@@ -2964,7 +2972,7 @@
  * Process an extent free intent item that was recovered from
  * the log.  We need to free the extents that it describes.
  */
-STATIC void
+STATIC int
 xlog_recover_process_efi(
 	xfs_mount_t		*mp,
 	xfs_efi_log_item_t	*efip)
@@ -2972,6 +2980,7 @@
 	xfs_efd_log_item_t	*efdp;
 	xfs_trans_t		*tp;
 	int			i;
+	int			error = 0;
 	xfs_extent_t		*extp;
 	xfs_fsblock_t		startblock_fsb;
 
@@ -2995,23 +3004,32 @@
 			 * free the memory associated with it.
 			 */
 			xfs_efi_release(efip, efip->efi_format.efi_nextents);
-			return;
+			return XFS_ERROR(EIO);
 		}
 	}
 
 	tp = xfs_trans_alloc(mp, 0);
-	xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
+	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
+	if (error)
+		goto abort_error;
 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
 
 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
 		extp = &(efip->efi_format.efi_extents[i]);
-		xfs_free_extent(tp, extp->ext_start, extp->ext_len);
+		error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
+		if (error)
+			goto abort_error;
 		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
 					 extp->ext_len);
 	}
 
 	efip->efi_flags |= XFS_EFI_RECOVERED;
-	xfs_trans_commit(tp, 0);
+	error = xfs_trans_commit(tp, 0);
+	return error;
+
+abort_error:
+	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+	return error;
 }
 
 /*
@@ -3059,7 +3077,7 @@
  * everything already in the AIL, we stop processing as soon as
  * we see something other than an EFI in the AIL.
  */
-STATIC void
+STATIC int
 xlog_recover_process_efis(
 	xlog_t			*log)
 {
@@ -3067,6 +3085,7 @@
 	xfs_efi_log_item_t	*efip;
 	int			gen;
 	xfs_mount_t		*mp;
+	int			error = 0;
 
 	mp = log->l_mp;
 	spin_lock(&mp->m_ail_lock);
@@ -3091,11 +3110,14 @@
 		}
 
 		spin_unlock(&mp->m_ail_lock);
-		xlog_recover_process_efi(mp, efip);
+		error = xlog_recover_process_efi(mp, efip);
+		if (error)
+			return error;
 		spin_lock(&mp->m_ail_lock);
 		lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
 	}
 	spin_unlock(&mp->m_ail_lock);
+	return error;
 }
 
 /*
@@ -3115,21 +3137,18 @@
 	int		error;
 
 	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
-	xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0);
-
-	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+	error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0);
+	if (!error)
+		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
 				   XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
 				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
-	if (error) {
-		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
-		return;
-	}
+	if (error)
+		goto out_abort;
 
+	error = EINVAL;
 	agi = XFS_BUF_TO_AGI(agibp);
-	if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC) {
-		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
-		return;
-	}
+	if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC)
+		goto out_abort;
 
 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
 	offset = offsetof(xfs_agi_t, agi_unlinked) +
@@ -3137,7 +3156,17 @@
 	xfs_trans_log_buf(tp, agibp, offset,
 			  (offset + sizeof(xfs_agino_t) - 1));
 
-	(void) xfs_trans_commit(tp, 0);
+	error = xfs_trans_commit(tp, 0);
+	if (error)
+		goto out_error;
+	return;
+
+out_abort:
+	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+out_error:
+	xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: "
+			"failed to clear agi %d. Continuing.", agno);
+	return;
 }
 
 /*
@@ -3214,7 +3243,8 @@
 					 * next inode in the bucket.
 					 */
 					error = xfs_itobp(mp, NULL, ip, &dip,
-							&ibp, 0, 0);
+							&ibp, 0, 0,
+							XFS_BUF_LOCK);
 					ASSERT(error || (dip != NULL));
 				}
 
@@ -3247,7 +3277,7 @@
 					if (ip->i_d.di_mode == 0)
 						xfs_iput_new(ip, 0);
 					else
-						VN_RELE(XFS_ITOV(ip));
+						IRELE(ip);
 				} else {
 					/*
 					 * We can't read in the inode
@@ -3445,7 +3475,7 @@
 	    (!rhead->h_version ||
 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
 		xlog_warn("XFS: %s: unrecognised log version (%d).",
-			__FUNCTION__, be32_to_cpu(rhead->h_version));
+			__func__, be32_to_cpu(rhead->h_version));
 		return XFS_ERROR(EIO);
 	}
 
@@ -3604,15 +3634,19 @@
 				 *   _first_, then the log start (LR header end)
 				 *   - order is important.
 				 */
+				wrapped_hblks = hblks - split_hblks;
 				bufaddr = XFS_BUF_PTR(hbp);
-				XFS_BUF_SET_PTR(hbp,
+				error = XFS_BUF_SET_PTR(hbp,
 						bufaddr + BBTOB(split_hblks),
 						BBTOB(hblks - split_hblks));
-				wrapped_hblks = hblks - split_hblks;
-				error = xlog_bread(log, 0, wrapped_hblks, hbp);
+				if (!error)
+					error = xlog_bread(log, 0,
+							wrapped_hblks, hbp);
+				if (!error)
+					error = XFS_BUF_SET_PTR(hbp, bufaddr,
+							BBTOB(hblks));
 				if (error)
 					goto bread_err2;
-				XFS_BUF_SET_PTR(hbp, bufaddr, BBTOB(hblks));
 				if (!offset)
 					offset = xlog_align(log, 0,
 							wrapped_hblks, hbp);
@@ -3664,13 +3698,18 @@
 				 *   - order is important.
 				 */
 				bufaddr = XFS_BUF_PTR(dbp);
-				XFS_BUF_SET_PTR(dbp,
+				error = XFS_BUF_SET_PTR(dbp,
 						bufaddr + BBTOB(split_bblks),
 						BBTOB(bblks - split_bblks));
-				if ((error = xlog_bread(log, wrapped_hblks,
-						bblks - split_bblks, dbp)))
+				if (!error)
+					error = xlog_bread(log, wrapped_hblks,
+							bblks - split_bblks,
+							dbp);
+				if (!error)
+					error = XFS_BUF_SET_PTR(dbp, bufaddr,
+							h_size);
+				if (error)
 					goto bread_err2;
-				XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
 				if (!offset)
 					offset = xlog_align(log, wrapped_hblks,
 						bblks - split_bblks, dbp);
@@ -3826,7 +3865,8 @@
 	XFS_BUF_READ(bp);
 	XFS_BUF_UNASYNC(bp);
 	xfsbdstrat(log->l_mp, bp);
-	if ((error = xfs_iowait(bp))) {
+	error = xfs_iowait(bp);
+	if (error) {
 		xfs_ioerror_alert("xlog_do_recover",
 				  log->l_mp, bp, XFS_BUF_ADDR(bp));
 		ASSERT(0);
@@ -3917,7 +3957,14 @@
 	 * rather than accepting new requests.
 	 */
 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
-		xlog_recover_process_efis(log);
+		int	error;
+		error = xlog_recover_process_efis(log);
+		if (error) {
+			cmn_err(CE_ALERT,
+				"Failed to recover EFIs on filesystem: %s",
+				log->l_mp->m_fsname);
+			return error;
+		}
 		/*
 		 * Sync the log to get all the EFIs out of the AIL.
 		 * This isn't absolutely necessary, but it helps in
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 8ed164e..2fec452 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -43,8 +43,9 @@
 #include "xfs_rw.h"
 #include "xfs_quota.h"
 #include "xfs_fsops.h"
+#include "xfs_utils.h"
 
-STATIC void	xfs_mount_log_sb(xfs_mount_t *, __int64_t);
+STATIC int	xfs_mount_log_sb(xfs_mount_t *, __int64_t);
 STATIC int	xfs_uuid_mount(xfs_mount_t *);
 STATIC void	xfs_uuid_unmount(xfs_mount_t *mp);
 STATIC void	xfs_unmountfs_wait(xfs_mount_t *);
@@ -57,7 +58,7 @@
 STATIC void	xfs_icsb_sync_counters(xfs_mount_t *);
 STATIC int	xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
 						int64_t, int);
-STATIC int	xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
+STATIC void	xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
 
 #else
 
@@ -956,7 +957,6 @@
 {
 	xfs_sb_t	*sbp = &(mp->m_sb);
 	xfs_inode_t	*rip;
-	bhv_vnode_t	*rvp = NULL;
 	__uint64_t	resblks;
 	__int64_t	update_flags = 0LL;
 	uint		quotamount, quotaflags;
@@ -964,11 +964,6 @@
 	int		uuid_mounted = 0;
 	int		error = 0;
 
-	if (mp->m_sb_bp == NULL) {
-		error = xfs_readsb(mp, mfsi_flags);
-		if (error)
-			return error;
-	}
 	xfs_mount_common(mp, sbp);
 
 	/*
@@ -1163,7 +1158,6 @@
 	}
 
 	ASSERT(rip != NULL);
-	rvp = XFS_ITOV(rip);
 
 	if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) {
 		cmn_err(CE_WARN, "XFS: corrupted root inode");
@@ -1195,8 +1189,13 @@
 	/*
 	 * If fs is not mounted readonly, then update the superblock changes.
 	 */
-	if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY))
-		xfs_mount_log_sb(mp, update_flags);
+	if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+		error = xfs_mount_log_sb(mp, update_flags);
+		if (error) {
+			cmn_err(CE_WARN, "XFS: failed to write sb changes");
+			goto error4;
+		}
+	}
 
 	/*
 	 * Initialise the XFS quota management subsystem for this mount
@@ -1233,12 +1232,15 @@
 	 *
 	 * We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
 	 * This may drive us straight to ENOSPC on mount, but that implies
-	 * we were already there on the last unmount.
+	 * we were already there on the last unmount. Warn if this occurs.
 	 */
 	resblks = mp->m_sb.sb_dblocks;
 	do_div(resblks, 20);
 	resblks = min_t(__uint64_t, resblks, 1024);
-	xfs_reserve_blocks(mp, &resblks, NULL);
+	error = xfs_reserve_blocks(mp, &resblks, NULL);
+	if (error)
+		cmn_err(CE_WARN, "XFS: Unable to allocate reserve blocks. "
+				"Continuing without a reserve pool.");
 
 	return 0;
 
@@ -1246,7 +1248,7 @@
 	/*
 	 * Free up the root inode.
 	 */
-	VN_RELE(rvp);
+	IRELE(rip);
  error3:
 	xfs_log_unmount_dealloc(mp);
  error2:
@@ -1274,6 +1276,7 @@
 xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
 {
 	__uint64_t	resblks;
+	int		error = 0;
 
 	/*
 	 * We can potentially deadlock here if we have an inode cluster
@@ -1317,9 +1320,15 @@
 	 * value does not matter....
 	 */
 	resblks = 0;
-	xfs_reserve_blocks(mp, &resblks, NULL);
+	error = xfs_reserve_blocks(mp, &resblks, NULL);
+	if (error)
+		cmn_err(CE_WARN, "XFS: Unable to free reserved block pool. "
+				"Freespace may not be correct on next mount.");
 
-	xfs_log_sbcount(mp, 1);
+	error = xfs_log_sbcount(mp, 1);
+	if (error)
+		cmn_err(CE_WARN, "XFS: Unable to update superblock counters. "
+				"Freespace may not be correct on next mount.");
 	xfs_unmountfs_writesb(mp);
 	xfs_unmountfs_wait(mp); 		/* wait for async bufs */
 	xfs_log_unmount(mp);			/* Done! No more fs ops. */
@@ -1411,9 +1420,8 @@
 	xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
 	if (sync)
 		xfs_trans_set_sync(tp);
-	xfs_trans_commit(tp, 0);
-
-	return 0;
+	error = xfs_trans_commit(tp, 0);
+	return error;
 }
 
 STATIC void
@@ -1462,7 +1470,6 @@
 		XFS_BUF_UNASYNC(sbp);
 		ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
 		xfsbdstrat(mp, sbp);
-		/* Nevermind errors we might get here. */
 		error = xfs_iowait(sbp);
 		if (error)
 			xfs_ioerror_alert("xfs_unmountfs_writesb",
@@ -1911,24 +1918,27 @@
  * be altered by the mount options, as well as any potential sb_features2
  * fixup. Only the first superblock is updated.
  */
-STATIC void
+STATIC int
 xfs_mount_log_sb(
 	xfs_mount_t	*mp,
 	__int64_t	fields)
 {
 	xfs_trans_t	*tp;
+	int		error;
 
 	ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
 			 XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2));
 
 	tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
-	if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
-				XFS_DEFAULT_LOG_COUNT)) {
+	error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
+				XFS_DEFAULT_LOG_COUNT);
+	if (error) {
 		xfs_trans_cancel(tp, 0);
-		return;
+		return error;
 	}
 	xfs_mod_sb(tp, fields);
-	xfs_trans_commit(tp, 0);
+	error = xfs_trans_commit(tp, 0);
+	return error;
 }
 
 
@@ -2189,7 +2199,7 @@
 	return test_bit(field, &mp->m_icsb_counters);
 }
 
-STATIC int
+STATIC void
 xfs_icsb_disable_counter(
 	xfs_mount_t	*mp,
 	xfs_sb_field_t	field)
@@ -2207,7 +2217,7 @@
 	 * the m_icsb_mutex.
 	 */
 	if (xfs_icsb_counter_disabled(mp, field))
-		return 0;
+		return;
 
 	xfs_icsb_lock_all_counters(mp);
 	if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
@@ -2230,8 +2240,6 @@
 	}
 
 	xfs_icsb_unlock_all_counters(mp);
-
-	return 0;
 }
 
 STATIC void
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1d8a472..1ed5751 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -66,17 +66,17 @@
  * Prototypes and functions for the Data Migration subsystem.
  */
 
-typedef int	(*xfs_send_data_t)(int, bhv_vnode_t *,
-			xfs_off_t, size_t, int, bhv_vrwlock_t *);
+typedef int	(*xfs_send_data_t)(int, struct xfs_inode *,
+			xfs_off_t, size_t, int, int *);
 typedef int	(*xfs_send_mmap_t)(struct vm_area_struct *, uint);
-typedef int	(*xfs_send_destroy_t)(bhv_vnode_t *, dm_right_t);
+typedef int	(*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t);
 typedef int	(*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *,
-			bhv_vnode_t *,
-			dm_right_t, bhv_vnode_t *, dm_right_t,
-			char *, char *, mode_t, int, int);
+			struct xfs_inode *, dm_right_t,
+			struct xfs_inode *, dm_right_t,
+			const char *, const char *, mode_t, int, int);
 typedef int	(*xfs_send_mount_t)(struct xfs_mount *, dm_right_t,
 			char *, char *);
-typedef void	(*xfs_send_unmount_t)(struct xfs_mount *, bhv_vnode_t *,
+typedef void	(*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *,
 			dm_right_t, mode_t, int, int);
 
 typedef struct xfs_dmops {
@@ -88,20 +88,20 @@
 	xfs_send_unmount_t	xfs_send_unmount;
 } xfs_dmops_t;
 
-#define XFS_SEND_DATA(mp, ev,vp,off,len,fl,lock) \
-	(*(mp)->m_dm_ops->xfs_send_data)(ev,vp,off,len,fl,lock)
+#define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \
+	(*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock)
 #define XFS_SEND_MMAP(mp, vma,fl) \
 	(*(mp)->m_dm_ops->xfs_send_mmap)(vma,fl)
-#define XFS_SEND_DESTROY(mp, vp,right) \
-	(*(mp)->m_dm_ops->xfs_send_destroy)(vp,right)
+#define XFS_SEND_DESTROY(mp, ip,right) \
+	(*(mp)->m_dm_ops->xfs_send_destroy)(ip,right)
 #define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
 	(*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl)
 #define XFS_SEND_PREUNMOUNT(mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
 	(*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT,mp,b1,r1,b2,r2,n1,n2,mode,rval,fl)
 #define XFS_SEND_MOUNT(mp,right,path,name) \
 	(*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name)
-#define XFS_SEND_UNMOUNT(mp, vp,right,mode,rval,fl) \
-	(*(mp)->m_dm_ops->xfs_send_unmount)(mp,vp,right,mode,rval,fl)
+#define XFS_SEND_UNMOUNT(mp, ip,right,mode,rval,fl) \
+	(*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl)
 
 
 /*
@@ -220,7 +220,7 @@
 #endif
 
 typedef struct xfs_ail {
-	xfs_ail_entry_t		xa_ail;
+	struct list_head	xa_ail;
 	uint			xa_gen;
 	struct task_struct	*xa_task;
 	xfs_lsn_t		xa_target;
@@ -401,7 +401,7 @@
 
 /*
  * Allow large block sizes to be reported to userspace programs if the
- * "largeio" mount option is used. 
+ * "largeio" mount option is used.
  *
  * If compatibility mode is specified, simply return the basic unit of caching
  * so that we don't get inefficient read/modify/write I/O from user apps.
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 7eb157a..ee37189 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -36,7 +36,6 @@
 #include "xfs_bmap.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
-#include "xfs_refcache.h"
 #include "xfs_utils.h"
 #include "xfs_trans_space.h"
 #include "xfs_vnodeops.h"
@@ -84,25 +83,23 @@
  */
 STATIC int
 xfs_lock_for_rename(
-	xfs_inode_t	*dp1,	/* old (source) directory inode */
-	xfs_inode_t	*dp2,	/* new (target) directory inode */
-	bhv_vname_t	*vname1,/* old entry name */
-	bhv_vname_t	*vname2,/* new entry name */
-	xfs_inode_t	**ipp1,	/* inode of old entry */
-	xfs_inode_t	**ipp2,	/* inode of new entry, if it
+	xfs_inode_t	*dp1,	/* in: old (source) directory inode */
+	xfs_inode_t	*dp2,	/* in: new (target) directory inode */
+	xfs_inode_t	*ip1,	/* in: inode of old entry */
+	struct xfs_name	*name2,	/* in: new entry name */
+	xfs_inode_t	**ipp2,	/* out: inode of new entry, if it
 				   already exists, NULL otherwise. */
-	xfs_inode_t	**i_tab,/* array of inode returned, sorted */
-	int		*num_inodes)  /* number of inodes in array */
+	xfs_inode_t	**i_tab,/* out: array of inode returned, sorted */
+	int		*num_inodes)  /* out: number of inodes in array */
 {
-	xfs_inode_t		*ip1, *ip2, *temp;
+	xfs_inode_t		*ip2 = NULL;
+	xfs_inode_t		*temp;
 	xfs_ino_t		inum1, inum2;
 	int			error;
 	int			i, j;
 	uint			lock_mode;
 	int			diff_dirs = (dp1 != dp2);
 
-	ip2 = NULL;
-
 	/*
 	 * First, find out the current inums of the entries so that we
 	 * can determine the initial locking order.  We'll have to
@@ -110,27 +107,20 @@
 	 * to see if we still have the right inodes, directories, etc.
 	 */
 	lock_mode = xfs_ilock_map_shared(dp1);
-	error = xfs_get_dir_entry(vname1, &ip1);
-	if (error) {
-		xfs_iunlock_map_shared(dp1, lock_mode);
-		return error;
-	}
+	IHOLD(ip1);
+	xfs_itrace_ref(ip1);
 
 	inum1 = ip1->i_ino;
 
-	ASSERT(ip1);
-	xfs_itrace_ref(ip1);
-
 	/*
 	 * Unlock dp1 and lock dp2 if they are different.
 	 */
-
 	if (diff_dirs) {
 		xfs_iunlock_map_shared(dp1, lock_mode);
 		lock_mode = xfs_ilock_map_shared(dp2);
 	}
 
-	error = xfs_dir_lookup_int(dp2, lock_mode, vname2, &inum2, &ip2);
+	error = xfs_dir_lookup_int(dp2, lock_mode, name2, &inum2, &ip2);
 	if (error == ENOENT) {		/* target does not need to exist. */
 		inum2 = 0;
 	} else if (error) {
@@ -162,6 +152,7 @@
 		*num_inodes = 4;
 		i_tab[3] = ip2;
 	}
+	*ipp2 = i_tab[3];
 
 	/*
 	 * Sort the elements via bubble sort.  (Remember, there are at
@@ -199,21 +190,6 @@
 		xfs_lock_inodes(i_tab, *num_inodes, 0, XFS_ILOCK_SHARED);
 	}
 
-	/*
-	 * Set the return value. Null out any unused entries in i_tab.
-	 */
-	*ipp1 = *ipp2 = NULL;
-	for (i=0; i < *num_inodes; i++) {
-		if (i_tab[i]->i_ino == inum1) {
-			*ipp1 = i_tab[i];
-		}
-		if (i_tab[i]->i_ino == inum2) {
-			*ipp2 = i_tab[i];
-		}
-	}
-	for (;i < 4; i++) {
-		i_tab[i] = NULL;
-	}
 	return 0;
 }
 
@@ -223,13 +199,13 @@
 int
 xfs_rename(
 	xfs_inode_t	*src_dp,
-	bhv_vname_t	*src_vname,
-	bhv_vnode_t	*target_dir_vp,
-	bhv_vname_t	*target_vname)
+	struct xfs_name	*src_name,
+	xfs_inode_t	*src_ip,
+	xfs_inode_t	*target_dp,
+	struct xfs_name	*target_name)
 {
-	bhv_vnode_t	*src_dir_vp = XFS_ITOV(src_dp);
 	xfs_trans_t	*tp;
-	xfs_inode_t	*target_dp, *src_ip, *target_ip;
+	xfs_inode_t	*target_ip;
 	xfs_mount_t	*mp = src_dp->i_mount;
 	int		new_parent;		/* moving to a new dir */
 	int		src_is_directory;	/* src_name is a directory */
@@ -243,29 +219,16 @@
 	int		spaceres;
 	int		target_link_zero = 0;
 	int		num_inodes;
-	char		*src_name = VNAME(src_vname);
-	char		*target_name = VNAME(target_vname);
-	int		src_namelen = VNAMELEN(src_vname);
-	int		target_namelen = VNAMELEN(target_vname);
 
 	xfs_itrace_entry(src_dp);
-	xfs_itrace_entry(xfs_vtoi(target_dir_vp));
-
-	/*
-	 * Find the XFS behavior descriptor for the target directory
-	 * vnode since it was not handed to us.
-	 */
-	target_dp = xfs_vtoi(target_dir_vp);
-	if (target_dp == NULL) {
-		return XFS_ERROR(EXDEV);
-	}
+	xfs_itrace_entry(target_dp);
 
 	if (DM_EVENT_ENABLED(src_dp, DM_EVENT_RENAME) ||
 	    DM_EVENT_ENABLED(target_dp, DM_EVENT_RENAME)) {
 		error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME,
-					src_dir_vp, DM_RIGHT_NULL,
-					target_dir_vp, DM_RIGHT_NULL,
-					src_name, target_name,
+					src_dp, DM_RIGHT_NULL,
+					target_dp, DM_RIGHT_NULL,
+					src_name->name, target_name->name,
 					0, 0, 0);
 		if (error) {
 			return error;
@@ -282,10 +245,8 @@
 	 * does not exist in the source directory.
 	 */
 	tp = NULL;
-	error = xfs_lock_for_rename(src_dp, target_dp, src_vname,
-			target_vname, &src_ip, &target_ip, inodes,
-			&num_inodes);
-
+	error = xfs_lock_for_rename(src_dp, target_dp, src_ip, target_name,
+					&target_ip, inodes, &num_inodes);
 	if (error) {
 		/*
 		 * We have nothing locked, no inode references, and
@@ -331,7 +292,7 @@
 	XFS_BMAP_INIT(&free_list, &first_block);
 	tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
 	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
-	spaceres = XFS_RENAME_SPACE_RES(mp, target_namelen);
+	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
 	error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0,
 			XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);
 	if (error == ENOSPC) {
@@ -365,10 +326,10 @@
 	 * them when they unlock the inodes.  Also, we need to be careful
 	 * not to add an inode to the transaction more than once.
 	 */
-	VN_HOLD(src_dir_vp);
+	IHOLD(src_dp);
 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
 	if (new_parent) {
-		VN_HOLD(target_dir_vp);
+		IHOLD(target_dp);
 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
 	}
 	if ((src_ip != src_dp) && (src_ip != target_dp)) {
@@ -389,9 +350,8 @@
 		 * If there's no space reservation, check the entry will
 		 * fit before actually inserting it.
 		 */
-		if (spaceres == 0 &&
-		    (error = xfs_dir_canenter(tp, target_dp, target_name,
-						target_namelen)))
+		error = xfs_dir_canenter(tp, target_dp, target_name, spaceres);
+		if (error)
 			goto error_return;
 		/*
 		 * If target does not exist and the rename crosses
@@ -399,8 +359,8 @@
 		 * to account for the ".." reference from the new entry.
 		 */
 		error = xfs_dir_createname(tp, target_dp, target_name,
-					   target_namelen, src_ip->i_ino,
-					   &first_block, &free_list, spaceres);
+						src_ip->i_ino, &first_block,
+						&free_list, spaceres);
 		if (error == ENOSPC)
 			goto error_return;
 		if (error)
@@ -439,7 +399,7 @@
 		 * name at the destination directory, remove it first.
 		 */
 		error = xfs_dir_replace(tp, target_dp, target_name,
-					target_namelen, src_ip->i_ino,
+					src_ip->i_ino,
 					&first_block, &free_list, spaceres);
 		if (error)
 			goto abort_return;
@@ -476,7 +436,8 @@
 		 * Rewrite the ".." entry to point to the new
 		 * directory.
 		 */
-		error = xfs_dir_replace(tp, src_ip, "..", 2, target_dp->i_ino,
+		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
+					target_dp->i_ino,
 					&first_block, &free_list, spaceres);
 		ASSERT(error != EEXIST);
 		if (error)
@@ -512,8 +473,8 @@
 			goto abort_return;
 	}
 
-	error = xfs_dir_removename(tp, src_dp, src_name, src_namelen,
-			src_ip->i_ino, &first_block, &free_list, spaceres);
+	error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
+					&first_block, &free_list, spaceres);
 	if (error)
 		goto abort_return;
 	xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -580,10 +541,8 @@
 	 * the vnode references.
 	 */
 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-	if (target_ip != NULL) {
-		xfs_refcache_purge_ip(target_ip);
+	if (target_ip != NULL)
 		IRELE(target_ip);
-	}
 	/*
 	 * Let interposed file systems know about removed links.
 	 */
@@ -598,9 +557,9 @@
 	if (DM_EVENT_ENABLED(src_dp, DM_EVENT_POSTRENAME) ||
 	    DM_EVENT_ENABLED(target_dp, DM_EVENT_POSTRENAME)) {
 		(void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME,
-					src_dir_vp, DM_RIGHT_NULL,
-					target_dir_vp, DM_RIGHT_NULL,
-					src_name, target_name,
+					src_dp, DM_RIGHT_NULL,
+					target_dp, DM_RIGHT_NULL,
+					src_name->name, target_name->name,
 					0, error, 0);
 	}
 	return error;
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 47082c0..a0dc6e5 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -44,6 +44,7 @@
 #include "xfs_rw.h"
 #include "xfs_inode_item.h"
 #include "xfs_trans_space.h"
+#include "xfs_utils.h"
 
 
 /*
@@ -123,14 +124,14 @@
 				XFS_GROWRTALLOC_LOG_RES(mp), 0,
 				XFS_TRANS_PERM_LOG_RES,
 				XFS_DEFAULT_PERM_LOG_COUNT)))
-			goto error_exit;
+			goto error_cancel;
 		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
 		/*
 		 * Lock the inode.
 		 */
 		if ((error = xfs_trans_iget(mp, tp, ino, 0,
 						XFS_ILOCK_EXCL, &ip)))
-			goto error_exit;
+			goto error_cancel;
 		XFS_BMAP_INIT(&flist, &firstblock);
 		/*
 		 * Allocate blocks to the bitmap file.
@@ -143,14 +144,16 @@
 		if (!error && nmap < 1)
 			error = XFS_ERROR(ENOSPC);
 		if (error)
-			goto error_exit;
+			goto error_cancel;
 		/*
 		 * Free any blocks freed up in the transaction, then commit.
 		 */
 		error = xfs_bmap_finish(&tp, &flist, &committed);
 		if (error)
-			goto error_exit;
-		xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+			goto error_cancel;
+		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+		if (error)
+			goto error;
 		/*
 		 * Now we need to clear the allocated blocks.
 		 * Do this one block per transaction, to keep it simple.
@@ -165,13 +168,13 @@
 			 */
 			if ((error = xfs_trans_reserve(tp, 0,
 					XFS_GROWRTZERO_LOG_RES(mp), 0, 0, 0)))
-				goto error_exit;
+				goto error_cancel;
 			/*
 			 * Lock the bitmap inode.
 			 */
 			if ((error = xfs_trans_iget(mp, tp, ino, 0,
 							XFS_ILOCK_EXCL, &ip)))
-				goto error_exit;
+				goto error_cancel;
 			/*
 			 * Get a buffer for the block.
 			 */
@@ -180,14 +183,16 @@
 				mp->m_bsize, 0);
 			if (bp == NULL) {
 				error = XFS_ERROR(EIO);
-				goto error_exit;
+				goto error_cancel;
 			}
 			memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize);
 			xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
 			/*
 			 * Commit the transaction.
 			 */
-			xfs_trans_commit(tp, 0);
+			error = xfs_trans_commit(tp, 0);
+			if (error)
+				goto error;
 		}
 		/*
 		 * Go on to the next extent, if any.
@@ -195,8 +200,9 @@
 		oblocks = map.br_startoff + map.br_blockcount;
 	}
 	return 0;
-error_exit:
+error_cancel:
 	xfs_trans_cancel(tp, cancelflags);
+error:
 	return error;
 }
 
@@ -1875,6 +1881,7 @@
 	xfs_trans_t	*tp;		/* transaction pointer */
 
 	sbp = &mp->m_sb;
+	cancelflags = 0;
 	/*
 	 * Initial error checking.
 	 */
@@ -2041,13 +2048,15 @@
 		 */
 		mp->m_rsumlevels = nrsumlevels;
 		mp->m_rsumsize = nrsumsize;
-		/*
-		 * Commit the transaction.
-		 */
-		xfs_trans_commit(tp, 0);
+
+		error = xfs_trans_commit(tp, 0);
+		if (error) {
+			tp = NULL;
+			break;
+		}
 	}
 
-	if (error)
+	if (error && tp)
 		xfs_trans_cancel(tp, cancelflags);
 
 	/*
@@ -2278,7 +2287,7 @@
 	ASSERT(sbp->sb_rsumino != NULLFSINO);
 	error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0);
 	if (error) {
-		VN_RELE(XFS_ITOV(mp->m_rbmip));
+		IRELE(mp->m_rbmip);
 		return error;
 	}
 	ASSERT(mp->m_rsumip != NULL);
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index cd3ece6..b0f31c0 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -126,11 +126,11 @@
 		 * when we return.
 		 */
 		if (iip && iip->ili_last_lsn) {
-			xfs_log_force(mp, iip->ili_last_lsn,
-					XFS_LOG_FORCE | XFS_LOG_SYNC);
+			error = _xfs_log_force(mp, iip->ili_last_lsn,
+					XFS_LOG_FORCE | XFS_LOG_SYNC, NULL);
 		} else if (xfs_ipincount(ip) > 0) {
-			xfs_log_force(mp, (xfs_lsn_t)0,
-					XFS_LOG_FORCE | XFS_LOG_SYNC);
+			error = _xfs_log_force(mp, (xfs_lsn_t)0,
+					XFS_LOG_FORCE | XFS_LOG_SYNC, NULL);
 		}
 
 	} else {
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 7f40628..0804207 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -113,13 +113,8 @@
 struct xfs_trans;
 struct xfs_dquot_acct;
 
-typedef struct xfs_ail_entry {
-	struct xfs_log_item	*ail_forw;	/* AIL forw pointer */
-	struct xfs_log_item	*ail_back;	/* AIL back pointer */
-} xfs_ail_entry_t;
-
 typedef struct xfs_log_item {
-	xfs_ail_entry_t			li_ail;		/* AIL pointers */
+	struct list_head		li_ail;		/* AIL pointers */
 	xfs_lsn_t			li_lsn;		/* last on-disk lsn */
 	struct xfs_log_item_desc	*li_desc;	/* ptr to current desc*/
 	struct xfs_mount		*li_mountp;	/* ptr to fs mount */
@@ -341,7 +336,6 @@
 	unsigned int		t_rtx_res;	/* # of rt extents resvd */
 	unsigned int		t_rtx_res_used;	/* # of resvd rt extents used */
 	xfs_log_ticket_t	t_ticket;	/* log mgr ticket */
-	sema_t			t_sema;		/* sema for commit completion */
 	xfs_lsn_t		t_lsn;		/* log seq num of start of
 						 * transaction. */
 	xfs_lsn_t		t_commit_lsn;	/* log seq num of end of
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 76d470d..1f77c00 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,13 +28,13 @@
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 
-STATIC void xfs_ail_insert(xfs_ail_entry_t *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_delete(xfs_ail_entry_t *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_entry_t *);
-STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *);
+STATIC void xfs_ail_insert(xfs_ail_t *, xfs_log_item_t *);
+STATIC xfs_log_item_t * xfs_ail_delete(xfs_ail_t *, xfs_log_item_t *);
+STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_t *);
+STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_t *, xfs_log_item_t *);
 
 #ifdef DEBUG
-STATIC void xfs_ail_check(xfs_ail_entry_t *, xfs_log_item_t *);
+STATIC void xfs_ail_check(xfs_ail_t *, xfs_log_item_t *);
 #else
 #define	xfs_ail_check(a,l)
 #endif /* DEBUG */
@@ -57,7 +57,7 @@
 	xfs_log_item_t	*lip;
 
 	spin_lock(&mp->m_ail_lock);
-	lip = xfs_ail_min(&(mp->m_ail.xa_ail));
+	lip = xfs_ail_min(&mp->m_ail);
 	if (lip == NULL) {
 		lsn = (xfs_lsn_t)0;
 	} else {
@@ -91,7 +91,7 @@
 {
 	xfs_log_item_t		*lip;
 
-	lip = xfs_ail_min(&mp->m_ail.xa_ail);
+	lip = xfs_ail_min(&mp->m_ail);
 	if (lip && !XFS_FORCED_SHUTDOWN(mp)) {
 		if (XFS_LSN_CMP(threshold_lsn, mp->m_ail.xa_target) > 0)
 			xfsaild_wakeup(mp, threshold_lsn);
@@ -111,15 +111,17 @@
 {
 	xfs_log_item_t	*lip;
 
-	lip = xfs_ail_min(&(mp->m_ail.xa_ail));
+	lip = xfs_ail_min(&mp->m_ail);
 	*gen = (int)mp->m_ail.xa_gen;
 	if (lsn == 0)
 		return lip;
 
-	while (lip && (XFS_LSN_CMP(lip->li_lsn, lsn) < 0))
-		lip = lip->li_ail.ail_forw;
+	list_for_each_entry(lip, &mp->m_ail.xa_ail, li_ail) {
+		if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
+			return lip;
+	}
 
-	return lip;
+	return NULL;
 }
 
 /*
@@ -329,7 +331,7 @@
 	 * the call to xfs_log_move_tail() doesn't do anything if there's
 	 * not enough free space to wake people up so we're safe calling it.
 	 */
-	min_lip = xfs_ail_min(&mp->m_ail.xa_ail);
+	min_lip = xfs_ail_min(&mp->m_ail);
 
 	if (min_lip == lip)
 		xfs_log_move_tail(mp, 1);
@@ -357,15 +359,13 @@
 	xfs_log_item_t	*lip,
 	xfs_lsn_t	lsn) __releases(mp->m_ail_lock)
 {
-	xfs_ail_entry_t		*ailp;
 	xfs_log_item_t		*dlip=NULL;
 	xfs_log_item_t		*mlip;	/* ptr to minimum lip */
 
-	ailp = &(mp->m_ail.xa_ail);
-	mlip = xfs_ail_min(ailp);
+	mlip = xfs_ail_min(&mp->m_ail);
 
 	if (lip->li_flags & XFS_LI_IN_AIL) {
-		dlip = xfs_ail_delete(ailp, lip);
+		dlip = xfs_ail_delete(&mp->m_ail, lip);
 		ASSERT(dlip == lip);
 	} else {
 		lip->li_flags |= XFS_LI_IN_AIL;
@@ -373,11 +373,11 @@
 
 	lip->li_lsn = lsn;
 
-	xfs_ail_insert(ailp, lip);
+	xfs_ail_insert(&mp->m_ail, lip);
 	mp->m_ail.xa_gen++;
 
 	if (mlip == dlip) {
-		mlip = xfs_ail_min(&(mp->m_ail.xa_ail));
+		mlip = xfs_ail_min(&mp->m_ail);
 		spin_unlock(&mp->m_ail_lock);
 		xfs_log_move_tail(mp, mlip->li_lsn);
 	} else {
@@ -407,14 +407,12 @@
 	xfs_mount_t	*mp,
 	xfs_log_item_t	*lip) __releases(mp->m_ail_lock)
 {
-	xfs_ail_entry_t		*ailp;
 	xfs_log_item_t		*dlip;
 	xfs_log_item_t		*mlip;
 
 	if (lip->li_flags & XFS_LI_IN_AIL) {
-		ailp = &(mp->m_ail.xa_ail);
-		mlip = xfs_ail_min(ailp);
-		dlip = xfs_ail_delete(ailp, lip);
+		mlip = xfs_ail_min(&mp->m_ail);
+		dlip = xfs_ail_delete(&mp->m_ail, lip);
 		ASSERT(dlip == lip);
 
 
@@ -423,7 +421,7 @@
 		mp->m_ail.xa_gen++;
 
 		if (mlip == dlip) {
-			mlip = xfs_ail_min(&(mp->m_ail.xa_ail));
+			mlip = xfs_ail_min(&mp->m_ail);
 			spin_unlock(&mp->m_ail_lock);
 			xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0));
 		} else {
@@ -440,7 +438,7 @@
 		else {
 			xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
 		"%s: attempting to delete a log item that is not in the AIL",
-					__FUNCTION__);
+					__func__);
 			spin_unlock(&mp->m_ail_lock);
 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 		}
@@ -461,7 +459,7 @@
 {
 	xfs_log_item_t	*lip;
 
-	lip = xfs_ail_min(&(mp->m_ail.xa_ail));
+	lip = xfs_ail_min(&mp->m_ail);
 	*gen = (int)mp->m_ail.xa_gen;
 
 	return lip;
@@ -485,9 +483,9 @@
 
 	ASSERT(mp && lip && gen);
 	if (mp->m_ail.xa_gen == *gen) {
-		nlip = xfs_ail_next(&(mp->m_ail.xa_ail), lip);
+		nlip = xfs_ail_next(&mp->m_ail, lip);
 	} else {
-		nlip = xfs_ail_min(&(mp->m_ail).xa_ail);
+		nlip = xfs_ail_min(&mp->m_ail);
 		*gen = (int)mp->m_ail.xa_gen;
 		if (restarts != NULL) {
 			XFS_STATS_INC(xs_push_ail_restarts);
@@ -517,8 +515,7 @@
 xfs_trans_ail_init(
 	xfs_mount_t	*mp)
 {
-	mp->m_ail.xa_ail.ail_forw = (xfs_log_item_t*)&mp->m_ail.xa_ail;
-	mp->m_ail.xa_ail.ail_back = (xfs_log_item_t*)&mp->m_ail.xa_ail;
+	INIT_LIST_HEAD(&mp->m_ail.xa_ail);
 	return xfsaild_start(mp);
 }
 
@@ -537,7 +534,7 @@
  */
 STATIC void
 xfs_ail_insert(
-	xfs_ail_entry_t	*base,
+	xfs_ail_t	*ailp,
 	xfs_log_item_t	*lip)
 /* ARGSUSED */
 {
@@ -546,27 +543,22 @@
 	/*
 	 * If the list is empty, just insert the item.
 	 */
-	if (base->ail_back == (xfs_log_item_t*)base) {
-		base->ail_forw = lip;
-		base->ail_back = lip;
-		lip->li_ail.ail_forw = (xfs_log_item_t*)base;
-		lip->li_ail.ail_back = (xfs_log_item_t*)base;
+	if (list_empty(&ailp->xa_ail)) {
+		list_add(&lip->li_ail, &ailp->xa_ail);
 		return;
 	}
 
-	next_lip = base->ail_back;
-	while ((next_lip != (xfs_log_item_t*)base) &&
-	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) > 0)) {
-		next_lip = next_lip->li_ail.ail_back;
+	list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
+		if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)
+			break;
 	}
-	ASSERT((next_lip == (xfs_log_item_t*)base) ||
-	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
-	lip->li_ail.ail_forw = next_lip->li_ail.ail_forw;
-	lip->li_ail.ail_back = next_lip;
-	next_lip->li_ail.ail_forw = lip;
-	lip->li_ail.ail_forw->li_ail.ail_back = lip;
 
-	xfs_ail_check(base, lip);
+	ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
+	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
+
+	list_add(&lip->li_ail, &next_lip->li_ail);
+
+	xfs_ail_check(ailp, lip);
 	return;
 }
 
@@ -576,15 +568,13 @@
 /*ARGSUSED*/
 STATIC xfs_log_item_t *
 xfs_ail_delete(
-	xfs_ail_entry_t	*base,
+	xfs_ail_t	*ailp,
 	xfs_log_item_t	*lip)
 /* ARGSUSED */
 {
-	xfs_ail_check(base, lip);
-	lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back;
-	lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw;
-	lip->li_ail.ail_forw = NULL;
-	lip->li_ail.ail_back = NULL;
+	xfs_ail_check(ailp, lip);
+
+	list_del(&lip->li_ail);
 
 	return lip;
 }
@@ -595,14 +585,13 @@
  */
 STATIC xfs_log_item_t *
 xfs_ail_min(
-	xfs_ail_entry_t	*base)
+	xfs_ail_t	*ailp)
 /* ARGSUSED */
 {
-	register xfs_log_item_t *forw = base->ail_forw;
-	if (forw == (xfs_log_item_t*)base) {
+	if (list_empty(&ailp->xa_ail))
 		return NULL;
-	}
-	return forw;
+
+	return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
 }
 
 /*
@@ -612,15 +601,14 @@
  */
 STATIC xfs_log_item_t *
 xfs_ail_next(
-	xfs_ail_entry_t	*base,
+	xfs_ail_t	*ailp,
 	xfs_log_item_t	*lip)
 /* ARGSUSED */
 {
-	if (lip->li_ail.ail_forw == (xfs_log_item_t*)base) {
+	if (lip->li_ail.next == &ailp->xa_ail)
 		return NULL;
-	}
-	return lip->li_ail.ail_forw;
 
+	return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
 }
 
 #ifdef DEBUG
@@ -629,57 +617,40 @@
  */
 STATIC void
 xfs_ail_check(
-	xfs_ail_entry_t *base,
+	xfs_ail_t 	*ailp,
 	xfs_log_item_t	*lip)
 {
 	xfs_log_item_t	*prev_lip;
 
-	prev_lip = base->ail_forw;
-	if (prev_lip == (xfs_log_item_t*)base) {
-		/*
-		 * Make sure the pointers are correct when the list
-		 * is empty.
-		 */
-		ASSERT(base->ail_back == (xfs_log_item_t*)base);
+	if (list_empty(&ailp->xa_ail))
 		return;
-	}
 
 	/*
 	 * Check the next and previous entries are valid.
 	 */
 	ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
-	prev_lip = lip->li_ail.ail_back;
-	if (prev_lip != (xfs_log_item_t*)base) {
-		ASSERT(prev_lip->li_ail.ail_forw == lip);
+	prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
+	if (&prev_lip->li_ail != &ailp->xa_ail)
 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-	}
-	prev_lip = lip->li_ail.ail_forw;
-	if (prev_lip != (xfs_log_item_t*)base) {
-		ASSERT(prev_lip->li_ail.ail_back == lip);
+
+	prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
+	if (&prev_lip->li_ail != &ailp->xa_ail)
 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
-	}
 
 
 #ifdef XFS_TRANS_DEBUG
 	/*
-	 * Walk the list checking forward and backward pointers,
-	 * lsn ordering, and that every entry has the XFS_LI_IN_AIL
-	 * flag set. This is really expensive, so only do it when
-	 * specifically debugging the transaction subsystem.
+	 * Walk the list checking lsn ordering, and that every entry has the
+	 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
+	 * when specifically debugging the transaction subsystem.
 	 */
-	prev_lip = (xfs_log_item_t*)base;
-	while (lip != (xfs_log_item_t*)base) {
-		if (prev_lip != (xfs_log_item_t*)base) {
-			ASSERT(prev_lip->li_ail.ail_forw == lip);
+	prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+	list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
+		if (&prev_lip->li_ail != &ailp->xa_ail)
 			ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-		}
-		ASSERT(lip->li_ail.ail_back == prev_lip);
 		ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
 		prev_lip = lip;
-		lip = lip->li_ail.ail_forw;
 	}
-	ASSERT(lip == (xfs_log_item_t*)base);
-	ASSERT(base->ail_back == prev_lip);
 #endif /* XFS_TRANS_DEBUG */
 }
 #endif /* DEBUG */
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 60b6b89..cb0c583 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -304,7 +304,8 @@
 	if (tp == NULL) {
 		bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
 		if (!bp)
-			return XFS_ERROR(ENOMEM);
+			return (flags & XFS_BUF_TRYLOCK) ?
+					EAGAIN : XFS_ERROR(ENOMEM);
 
 		if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) {
 			xfs_ioerror_alert("xfs_trans_read_buf", mp,
@@ -353,17 +354,15 @@
 			ASSERT(!XFS_BUF_ISASYNC(bp));
 			XFS_BUF_READ(bp);
 			xfsbdstrat(tp->t_mountp, bp);
-			xfs_iowait(bp);
-			if (XFS_BUF_GETERROR(bp) != 0) {
+			error = xfs_iowait(bp);
+			if (error) {
 				xfs_ioerror_alert("xfs_trans_read_buf", mp,
 						  bp, blkno);
-				error = XFS_BUF_GETERROR(bp);
 				xfs_buf_relse(bp);
 				/*
-				 * We can gracefully recover from most
-				 * read errors. Ones we can't are those
-				 * that happen after the transaction's
-				 * already dirty.
+				 * We can gracefully recover from most read
+				 * errors. Ones we can't are those that happen
+				 * after the transaction's already dirty.
 				 */
 				if (tp->t_flags & XFS_TRANS_DIRTY)
 					xfs_force_shutdown(tp->t_mountp,
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index 5c89be4..0f51916 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -160,4 +160,9 @@
 	XFS_BTNUM_MAX
 } xfs_btnum_t;
 
+struct xfs_name {
+	const char	*name;
+	int		len;
+};
+
 #endif	/* __XFS_TYPES_H__ */
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 18a85e7..2b8dc7e 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -40,34 +40,12 @@
 #include "xfs_itable.h"
 #include "xfs_utils.h"
 
-/*
- * xfs_get_dir_entry is used to get a reference to an inode given
- * its parent directory inode and the name of the file.	 It does
- * not lock the child inode, and it unlocks the directory before
- * returning.  The directory's generation number is returned for
- * use by a later call to xfs_lock_dir_and_entry.
- */
-int
-xfs_get_dir_entry(
-	bhv_vname_t	*dentry,
-	xfs_inode_t	**ipp)
-{
-	bhv_vnode_t	*vp;
-
-	vp = VNAME_TO_VNODE(dentry);
-
-	*ipp = xfs_vtoi(vp);
-	if (!*ipp)
-		return XFS_ERROR(ENOENT);
-	VN_HOLD(vp);
-	return 0;
-}
 
 int
 xfs_dir_lookup_int(
 	xfs_inode_t	*dp,
 	uint		lock_mode,
-	bhv_vname_t	*dentry,
+	struct xfs_name	*name,
 	xfs_ino_t	*inum,
 	xfs_inode_t	**ipp)
 {
@@ -75,7 +53,7 @@
 
 	xfs_itrace_entry(dp);
 
-	error = xfs_dir_lookup(NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum);
+	error = xfs_dir_lookup(NULL, dp, name, inum);
 	if (!error) {
 		/*
 		 * Unlock the directory. We do this because we can't
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index f857fcc..175b126 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -21,15 +21,14 @@
 #define IRELE(ip)	VN_RELE(XFS_ITOV(ip))
 #define IHOLD(ip)	VN_HOLD(XFS_ITOV(ip))
 
-extern int xfs_get_dir_entry (bhv_vname_t *, xfs_inode_t **);
-extern int xfs_dir_lookup_int (xfs_inode_t *, uint, bhv_vname_t *, xfs_ino_t *,
-				xfs_inode_t **);
-extern int xfs_truncate_file (xfs_mount_t *, xfs_inode_t *);
-extern int xfs_dir_ialloc (xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
+extern int xfs_dir_lookup_int(xfs_inode_t *, uint, struct xfs_name *,
+				xfs_ino_t *, xfs_inode_t **);
+extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *);
+extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
 				xfs_dev_t, cred_t *, prid_t, int,
 				xfs_inode_t **, int *);
-extern int xfs_droplink (xfs_trans_t *, xfs_inode_t *);
-extern int xfs_bumplink (xfs_trans_t *, xfs_inode_t *);
-extern void xfs_bump_ino_vers2 (xfs_trans_t *, xfs_inode_t *);
+extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *);
+extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *);
+extern void xfs_bump_ino_vers2(xfs_trans_t *, xfs_inode_t *);
 
 #endif	/* __XFS_UTILS_H__ */
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 7094caf..fc48158 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -43,7 +43,6 @@
 #include "xfs_error.h"
 #include "xfs_bmap.h"
 #include "xfs_rw.h"
-#include "xfs_refcache.h"
 #include "xfs_buf_item.h"
 #include "xfs_log_priv.h"
 #include "xfs_dir2_trace.h"
@@ -56,6 +55,7 @@
 #include "xfs_fsops.h"
 #include "xfs_vnodeops.h"
 #include "xfs_vfsops.h"
+#include "xfs_utils.h"
 
 
 int __init
@@ -69,15 +69,17 @@
 	/*
 	 * Initialize all of the zone allocators we use.
 	 */
+	xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
+						"xfs_log_ticket");
 	xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
-						 "xfs_bmap_free_item");
+						"xfs_bmap_free_item");
 	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
-					    "xfs_btree_cur");
-	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
-	xfs_da_state_zone =
-		kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state");
+						"xfs_btree_cur");
+	xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
+						"xfs_da_state");
 	xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
 	xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
+	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
 	xfs_acl_zone_init(xfs_acl_zone, "xfs_acl");
 	xfs_mru_cache_init();
 	xfs_filestream_init();
@@ -113,9 +115,6 @@
 	xfs_ili_zone =
 		kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
 					KM_ZONE_SPREAD, NULL);
-	xfs_icluster_zone =
-		kmem_zone_init_flags(sizeof(xfs_icluster_t), "xfs_icluster",
-					KM_ZONE_SPREAD, NULL);
 
 	/*
 	 * Allocate global trace buffers.
@@ -153,11 +152,9 @@
 	extern kmem_zone_t	*xfs_inode_zone;
 	extern kmem_zone_t	*xfs_efd_zone;
 	extern kmem_zone_t	*xfs_efi_zone;
-	extern kmem_zone_t	*xfs_icluster_zone;
 
 	xfs_cleanup_procfs();
 	xfs_sysctl_unregister();
-	xfs_refcache_destroy();
 	xfs_filestream_uninit();
 	xfs_mru_cache_uninit();
 	xfs_acl_zone_destroy(xfs_acl_zone);
@@ -189,7 +186,6 @@
 	kmem_zone_destroy(xfs_efi_zone);
 	kmem_zone_destroy(xfs_ifork_zone);
 	kmem_zone_destroy(xfs_ili_zone);
-	kmem_zone_destroy(xfs_icluster_zone);
 }
 
 /*
@@ -573,7 +569,7 @@
 #ifdef HAVE_DMAPI
 	if (mp->m_flags & XFS_MOUNT_DMAPI) {
 		error = XFS_SEND_PREUNMOUNT(mp,
-				rvp, DM_RIGHT_NULL, rvp, DM_RIGHT_NULL,
+				rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL,
 				NULL, NULL, 0, 0,
 				(mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))?
 					0:DM_FLAGS_UNWANTED);
@@ -584,11 +580,6 @@
 					0 : DM_FLAGS_UNWANTED;
 	}
 #endif
-	/*
-	 * First blow any referenced inode from this file system
-	 * out of the reference cache, and delete the timer.
-	 */
-	xfs_refcache_purge_mp(mp);
 
 	/*
 	 * Blow away any referenced inode in the filestreams cache.
@@ -607,7 +598,7 @@
 	/*
 	 * Drop the reference count
 	 */
-	VN_RELE(rvp);
+	IRELE(rip);
 
 	/*
 	 * If we're forcing a shutdown, typically because of a media error,
@@ -629,7 +620,7 @@
 		/* Note: mp structure must still exist for
 		 * XFS_SEND_UNMOUNT() call.
 		 */
-		XFS_SEND_UNMOUNT(mp, error == 0 ? rvp : NULL,
+		XFS_SEND_UNMOUNT(mp, error == 0 ? rip : NULL,
 			DM_RIGHT_NULL, 0, error, unmount_event_flags);
 	}
 	if (xfs_unmountfs_needed) {
@@ -646,13 +637,12 @@
 	return XFS_ERROR(error);
 }
 
-STATIC int
+STATIC void
 xfs_quiesce_fs(
 	xfs_mount_t		*mp)
 {
 	int			count = 0, pincount;
 
-	xfs_refcache_purge_mp(mp);
 	xfs_flush_buftarg(mp->m_ddev_targp, 0);
 	xfs_finish_reclaim_all(mp, 0);
 
@@ -671,8 +661,6 @@
 			count++;
 		}
 	} while (count < 2);
-
-	return 0;
 }
 
 /*
@@ -684,6 +672,8 @@
 xfs_attr_quiesce(
 	xfs_mount_t	*mp)
 {
+	int	error = 0;
+
 	/* wait for all modifications to complete */
 	while (atomic_read(&mp->m_active_trans) > 0)
 		delay(100);
@@ -694,7 +684,11 @@
 	ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0);
 
 	/* Push the superblock and write an unmount record */
-	xfs_log_sbcount(mp, 1);
+	error = xfs_log_sbcount(mp, 1);
+	if (error)
+		xfs_fs_cmn_err(CE_WARN, mp,
+				"xfs_attr_quiesce: failed to log sb changes. "
+				"Frozen image may not be consistent.");
 	xfs_log_unmount_write(mp);
 	xfs_unmountfs_writesb(mp);
 }
@@ -790,8 +784,8 @@
 		goto fscorrupt_out2;
 
 	if (rbmip) {
-		VN_RELE(XFS_ITOV(rbmip));
-		VN_RELE(XFS_ITOV(rsumip));
+		IRELE(rbmip);
+		IRELE(rsumip);
 	}
 
 	xfs_iunlock(rip, XFS_ILOCK_EXCL);
@@ -1169,10 +1163,10 @@
 			 * above, then wait until after we've unlocked
 			 * the inode to release the reference.  This is
 			 * because we can be already holding the inode
-			 * lock when VN_RELE() calls xfs_inactive().
+			 * lock when IRELE() calls xfs_inactive().
 			 *
 			 * Make sure to drop the mount lock before calling
-			 * VN_RELE() so that we don't trip over ourselves if
+			 * IRELE() so that we don't trip over ourselves if
 			 * we have to go for the mount lock again in the
 			 * inactive code.
 			 */
@@ -1180,7 +1174,7 @@
 				IPOINTER_INSERT(ip, mp);
 			}
 
-			VN_RELE(vp);
+			IRELE(ip);
 
 			vnode_refed = B_FALSE;
 		}
@@ -1323,30 +1317,8 @@
 	}
 
 	/*
-	 * If this is the periodic sync, then kick some entries out of
-	 * the reference cache.  This ensures that idle entries are
-	 * eventually kicked out of the cache.
-	 */
-	if (flags & SYNC_REFCACHE) {
-		if (flags & SYNC_WAIT)
-			xfs_refcache_purge_mp(mp);
-		else
-			xfs_refcache_purge_some(mp);
-	}
-
-	/*
-	 * If asked, update the disk superblock with incore counter values if we
-	 * are using non-persistent counters so that they don't get too far out
-	 * of sync if we crash or get a forced shutdown. We don't want to force
-	 * this to disk, just get a transaction into the iclogs....
-	 */
-	if (flags & SYNC_SUPER)
-		xfs_log_sbcount(mp, 0);
-
-	/*
 	 * Now check to see if the log needs a "dummy" transaction.
 	 */
-
 	if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) {
 		xfs_trans_t *tp;
 		xfs_inode_t *ip;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 64c5953..6650601 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -48,7 +48,6 @@
 #include "xfs_quota.h"
 #include "xfs_utils.h"
 #include "xfs_rtalloc.h"
-#include "xfs_refcache.h"
 #include "xfs_trans_space.h"
 #include "xfs_log_priv.h"
 #include "xfs_filestream.h"
@@ -327,7 +326,7 @@
 		if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
 		    !(flags & ATTR_DMI)) {
 			int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR;
-			code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, vp,
+			code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip,
 				vap->va_size, 0, dmflags, NULL);
 			if (code) {
 				lock_flags = 0;
@@ -634,6 +633,15 @@
 	 * Truncate file.  Must have write permission and not be a directory.
 	 */
 	if (mask & XFS_AT_SIZE) {
+		/*
+		 * Only change the c/mtime if we are changing the size
+		 * or we are explicitly asked to change it. This handles
+		 * the semantic difference between truncate() and ftruncate()
+		 * as implemented in the VFS.
+		 */
+		if (vap->va_size != ip->i_size || (mask & XFS_AT_CTIME))
+			timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+
 		if (vap->va_size > ip->i_size) {
 			xfs_igrow_finish(tp, ip, vap->va_size,
 			    !(flags & ATTR_DMI));
@@ -662,10 +670,6 @@
 			 */
 			xfs_iflags_set(ip, XFS_ITRUNCATED);
 		}
-		/*
-		 * Have to do this even if the file's size doesn't change.
-		 */
-		timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
 	}
 
 	/*
@@ -877,7 +881,7 @@
 
 	if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) &&
 	    !(flags & ATTR_DMI)) {
-		(void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, vp, DM_RIGHT_NULL,
+		(void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
 					NULL, DM_RIGHT_NULL, NULL, NULL,
 					0, 0, AT_DELAY_FLAG(flags));
 	}
@@ -1443,28 +1447,22 @@
 	tp = *tpp;
 	mp = ip->i_mount;
 	ASSERT(ip->i_d.di_forkoff != 0);
-	xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	if (error)
+		goto error_unlock;
 
 	error = xfs_attr_inactive(ip);
-	if (error) {
-		*tpp = NULL;
-		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-		return error; /* goto out */
-	}
+	if (error)
+		goto error_unlock;
 
 	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
 	error = xfs_trans_reserve(tp, 0,
 				  XFS_IFREE_LOG_RES(mp),
 				  0, XFS_TRANS_PERM_LOG_RES,
 				  XFS_INACTIVE_LOG_COUNT);
-	if (error) {
-		ASSERT(XFS_FORCED_SHUTDOWN(mp));
-		xfs_trans_cancel(tp, 0);
-		*tpp = NULL;
-		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-		return error;
-	}
+	if (error)
+		goto error_cancel;
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
@@ -1475,6 +1473,14 @@
 
 	*tpp = tp;
 	return 0;
+
+error_cancel:
+	ASSERT(XFS_FORCED_SHUTDOWN(mp));
+	xfs_trans_cancel(tp, 0);
+error_unlock:
+	*tpp = NULL;
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	return error;
 }
 
 int
@@ -1520,12 +1526,6 @@
 			xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
 	}
 
-#ifdef HAVE_REFCACHE
-	/* If we are in the NFS reference cache then don't do this now */
-	if (ip->i_refcache)
-		return 0;
-#endif
-
 	if (ip->i_d.di_nlink != 0) {
 		if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
 		     ((ip->i_size > 0) || (VN_CACHED(vp) > 0 ||
@@ -1588,9 +1588,8 @@
 
 	mp = ip->i_mount;
 
-	if (ip->i_d.di_nlink == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_DESTROY)) {
-		(void) XFS_SEND_DESTROY(mp, vp, DM_RIGHT_NULL);
-	}
+	if (ip->i_d.di_nlink == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_DESTROY))
+		XFS_SEND_DESTROY(mp, ip, DM_RIGHT_NULL);
 
 	error = 0;
 
@@ -1744,11 +1743,18 @@
 		XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
 
 		/*
-		 * Just ignore errors at this point.  There is
-		 * nothing we can do except to try to keep going.
+		 * Just ignore errors at this point.  There is nothing we can
+		 * do except to try to keep going. Make sure it's not a silent
+		 * error.
 		 */
-		(void) xfs_bmap_finish(&tp,  &free_list, &committed);
-		(void) xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+		error = xfs_bmap_finish(&tp,  &free_list, &committed);
+		if (error)
+			xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
+				"xfs_bmap_finish() returned error %d", error);
+		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+		if (error)
+			xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
+				"xfs_trans_commit() returned error %d", error);
 	}
 	/*
 	 * Release the dquots held by inode, if any.
@@ -1765,8 +1771,8 @@
 int
 xfs_lookup(
 	xfs_inode_t		*dp,
-	bhv_vname_t		*dentry,
-	bhv_vnode_t		**vpp)
+	struct xfs_name		*name,
+	xfs_inode_t		**ipp)
 {
 	xfs_inode_t		*ip;
 	xfs_ino_t		e_inum;
@@ -1779,9 +1785,9 @@
 		return XFS_ERROR(EIO);
 
 	lock_mode = xfs_ilock_map_shared(dp);
-	error = xfs_dir_lookup_int(dp, lock_mode, dentry, &e_inum, &ip);
+	error = xfs_dir_lookup_int(dp, lock_mode, name, &e_inum, &ip);
 	if (!error) {
-		*vpp = XFS_ITOV(ip);
+		*ipp = ip;
 		xfs_itrace_ref(ip);
 	}
 	xfs_iunlock_map_shared(dp, lock_mode);
@@ -1791,19 +1797,16 @@
 int
 xfs_create(
 	xfs_inode_t		*dp,
-	bhv_vname_t		*dentry,
+	struct xfs_name		*name,
 	mode_t			mode,
 	xfs_dev_t		rdev,
-	bhv_vnode_t		**vpp,
+	xfs_inode_t		**ipp,
 	cred_t			*credp)
 {
-	char			*name = VNAME(dentry);
-	xfs_mount_t	        *mp = dp->i_mount;
-	bhv_vnode_t		*dir_vp = XFS_ITOV(dp);
+	xfs_mount_t		*mp = dp->i_mount;
 	xfs_inode_t		*ip;
-	bhv_vnode_t	        *vp = NULL;
 	xfs_trans_t		*tp;
-	int                     error;
+	int			error;
 	xfs_bmap_free_t		free_list;
 	xfs_fsblock_t		first_block;
 	boolean_t		unlock_dp_on_error = B_FALSE;
@@ -1813,17 +1816,14 @@
 	xfs_prid_t		prid;
 	struct xfs_dquot	*udqp, *gdqp;
 	uint			resblks;
-	int			namelen;
 
-	ASSERT(!*vpp);
+	ASSERT(!*ipp);
 	xfs_itrace_entry(dp);
 
-	namelen = VNAMELEN(dentry);
-
 	if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
 		error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
-				dir_vp, DM_RIGHT_NULL, NULL,
-				DM_RIGHT_NULL, name, NULL,
+				dp, DM_RIGHT_NULL, NULL,
+				DM_RIGHT_NULL, name->name, NULL,
 				mode, 0, 0);
 
 		if (error)
@@ -1855,7 +1855,7 @@
 
 	tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
 	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
-	resblks = XFS_CREATE_SPACE_RES(mp, namelen);
+	resblks = XFS_CREATE_SPACE_RES(mp, name->len);
 	/*
 	 * Initially assume that the file does not exist and
 	 * reserve the resources for that case.  If that is not
@@ -1888,7 +1888,8 @@
 	if (error)
 		goto error_return;
 
-	if (resblks == 0 && (error = xfs_dir_canenter(tp, dp, name, namelen)))
+	error = xfs_dir_canenter(tp, dp, name, resblks);
+	if (error)
 		goto error_return;
 	error = xfs_dir_ialloc(&tp, dp, mode, 1,
 			rdev, credp, prid, resblks > 0,
@@ -1914,11 +1915,11 @@
 	 * the transaction cancel unlocking dp so don't do it explicitly in the
 	 * error path.
 	 */
-	VN_HOLD(dir_vp);
+	IHOLD(dp);
 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
 	unlock_dp_on_error = B_FALSE;
 
-	error = xfs_dir_createname(tp, dp, name, namelen, ip->i_ino,
+	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
 					&first_block, &free_list, resblks ?
 					resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
 	if (error) {
@@ -1952,7 +1953,6 @@
 	 * vnode to the caller, we bump the vnode ref count now.
 	 */
 	IHOLD(ip);
-	vp = XFS_ITOV(ip);
 
 	error = xfs_bmap_finish(&tp, &free_list, &committed);
 	if (error) {
@@ -1970,17 +1970,17 @@
 	XFS_QM_DQRELE(mp, udqp);
 	XFS_QM_DQRELE(mp, gdqp);
 
-	*vpp = vp;
+	*ipp = ip;
 
 	/* Fallthrough to std_return with error = 0  */
 
 std_return:
-	if ((*vpp || (error != 0 && dm_event_sent != 0)) &&
+	if ((*ipp || (error != 0 && dm_event_sent != 0)) &&
 	    DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
 		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
-			dir_vp, DM_RIGHT_NULL,
-			*vpp ? vp:NULL,
-			DM_RIGHT_NULL, name, NULL,
+			dp, DM_RIGHT_NULL,
+			*ipp ? ip : NULL,
+			DM_RIGHT_NULL, name->name, NULL,
 			mode, error, 0);
 	}
 	return error;
@@ -2272,46 +2272,32 @@
 int
 xfs_remove(
 	xfs_inode_t             *dp,
-	bhv_vname_t		*dentry)
+	struct xfs_name		*name,
+	xfs_inode_t		*ip)
 {
-	bhv_vnode_t		*dir_vp = XFS_ITOV(dp);
-	char			*name = VNAME(dentry);
 	xfs_mount_t		*mp = dp->i_mount;
-	xfs_inode_t             *ip;
 	xfs_trans_t             *tp = NULL;
 	int                     error = 0;
 	xfs_bmap_free_t         free_list;
 	xfs_fsblock_t           first_block;
 	int			cancel_flags;
 	int			committed;
-	int			dm_di_mode = 0;
 	int			link_zero;
 	uint			resblks;
-	int			namelen;
 
 	xfs_itrace_entry(dp);
 
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return XFS_ERROR(EIO);
 
-	namelen = VNAMELEN(dentry);
-
-	if (!xfs_get_dir_entry(dentry, &ip)) {
-	        dm_di_mode = ip->i_d.di_mode;
-		IRELE(ip);
-	}
-
 	if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) {
-		error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_vp,
-					DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
-					name, NULL, dm_di_mode, 0, 0);
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dp, DM_RIGHT_NULL,
+					NULL, DM_RIGHT_NULL, name->name, NULL,
+					ip->i_d.di_mode, 0, 0);
 		if (error)
 			return error;
 	}
 
-	/* From this point on, return through std_return */
-	ip = NULL;
-
 	/*
 	 * We need to get a reference to ip before we get our log
 	 * reservation. The reason for this is that we cannot call
@@ -2324,13 +2310,7 @@
 	 * when we call xfs_iget.  Instead we get an unlocked reference
 	 * to the inode before getting our log reservation.
 	 */
-	error = xfs_get_dir_entry(dentry, &ip);
-	if (error) {
-		REMOVE_DEBUG_TRACE(__LINE__);
-		goto std_return;
-	}
-
-	dm_di_mode = ip->i_d.di_mode;
+	IHOLD(ip);
 
 	xfs_itrace_entry(ip);
 	xfs_itrace_ref(ip);
@@ -2398,7 +2378,7 @@
 	 * Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
 	 */
 	XFS_BMAP_INIT(&free_list, &first_block);
-	error = xfs_dir_removename(tp, dp, name, namelen, ip->i_ino,
+	error = xfs_dir_removename(tp, dp, name, ip->i_ino,
 					&first_block, &free_list, 0);
 	if (error) {
 		ASSERT(error != ENOENT);
@@ -2449,14 +2429,6 @@
 	}
 
 	/*
-	 * Before we drop our extra reference to the inode, purge it
-	 * from the refcache if it is there.  By waiting until afterwards
-	 * to do the IRELE, we ensure that we won't go inactive in the
-	 * xfs_refcache_purge_ip routine (although that would be OK).
-	 */
-	xfs_refcache_purge_ip(ip);
-
-	/*
 	 * If we are using filestreams, kill the stream association.
 	 * If the file is still open it may get a new one but that
 	 * will get killed on last close in xfs_close() so we don't
@@ -2472,9 +2444,9 @@
  std_return:
 	if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
 		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
-				dir_vp, DM_RIGHT_NULL,
+				dp, DM_RIGHT_NULL,
 				NULL, DM_RIGHT_NULL,
-				name, NULL, dm_di_mode, error, 0);
+				name->name, NULL, ip->i_d.di_mode, error, 0);
 	}
 	return error;
 
@@ -2495,14 +2467,6 @@
 	cancel_flags |= XFS_TRANS_ABORT;
 	xfs_trans_cancel(tp, cancel_flags);
 
-	/*
-	 * Before we drop our extra reference to the inode, purge it
-	 * from the refcache if it is there.  By waiting until afterwards
-	 * to do the IRELE, we ensure that we won't go inactive in the
-	 * xfs_refcache_purge_ip routine (although that would be OK).
-	 */
-	xfs_refcache_purge_ip(ip);
-
 	IRELE(ip);
 
 	goto std_return;
@@ -2511,12 +2475,10 @@
 int
 xfs_link(
 	xfs_inode_t		*tdp,
-	bhv_vnode_t		*src_vp,
-	bhv_vname_t		*dentry)
+	xfs_inode_t		*sip,
+	struct xfs_name		*target_name)
 {
-	bhv_vnode_t		*target_dir_vp = XFS_ITOV(tdp);
 	xfs_mount_t		*mp = tdp->i_mount;
-	xfs_inode_t		*sip = xfs_vtoi(src_vp);
 	xfs_trans_t		*tp;
 	xfs_inode_t		*ips[2];
 	int			error;
@@ -2525,23 +2487,20 @@
 	int			cancel_flags;
 	int			committed;
 	int			resblks;
-	char			*target_name = VNAME(dentry);
-	int			target_namelen;
 
 	xfs_itrace_entry(tdp);
-	xfs_itrace_entry(xfs_vtoi(src_vp));
+	xfs_itrace_entry(sip);
 
-	target_namelen = VNAMELEN(dentry);
-	ASSERT(!VN_ISDIR(src_vp));
+	ASSERT(!S_ISDIR(sip->i_d.di_mode));
 
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return XFS_ERROR(EIO);
 
 	if (DM_EVENT_ENABLED(tdp, DM_EVENT_LINK)) {
 		error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK,
-					target_dir_vp, DM_RIGHT_NULL,
-					src_vp, DM_RIGHT_NULL,
-					target_name, NULL, 0, 0, 0);
+					tdp, DM_RIGHT_NULL,
+					sip, DM_RIGHT_NULL,
+					target_name->name, NULL, 0, 0, 0);
 		if (error)
 			return error;
 	}
@@ -2556,7 +2515,7 @@
 
 	tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
 	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
-	resblks = XFS_LINK_SPACE_RES(mp, target_namelen);
+	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
 	error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0,
 			XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
 	if (error == ENOSPC) {
@@ -2584,8 +2543,8 @@
 	 * xfs_trans_cancel will both unlock the inodes and
 	 * decrement the associated ref counts.
 	 */
-	VN_HOLD(src_vp);
-	VN_HOLD(target_dir_vp);
+	IHOLD(sip);
+	IHOLD(tdp);
 	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
 
@@ -2608,15 +2567,14 @@
 		goto error_return;
 	}
 
-	if (resblks == 0 &&
-	    (error = xfs_dir_canenter(tp, tdp, target_name, target_namelen)))
+	error = xfs_dir_canenter(tp, tdp, target_name, resblks);
+	if (error)
 		goto error_return;
 
 	XFS_BMAP_INIT(&free_list, &first_block);
 
-	error = xfs_dir_createname(tp, tdp, target_name, target_namelen,
-				   sip->i_ino, &first_block, &free_list,
-				   resblks);
+	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
+					&first_block, &free_list, resblks);
 	if (error)
 		goto abort_return;
 	xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -2650,9 +2608,9 @@
 std_return:
 	if (DM_EVENT_ENABLED(sip, DM_EVENT_POSTLINK)) {
 		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK,
-				target_dir_vp, DM_RIGHT_NULL,
-				src_vp, DM_RIGHT_NULL,
-				target_name, NULL, 0, error, 0);
+				tdp, DM_RIGHT_NULL,
+				sip, DM_RIGHT_NULL,
+				target_name->name, NULL, 0, error, 0);
 	}
 	return error;
 
@@ -2669,17 +2627,13 @@
 int
 xfs_mkdir(
 	xfs_inode_t             *dp,
-	bhv_vname_t		*dentry,
+	struct xfs_name		*dir_name,
 	mode_t			mode,
-	bhv_vnode_t		**vpp,
+	xfs_inode_t		**ipp,
 	cred_t			*credp)
 {
-	bhv_vnode_t		*dir_vp = XFS_ITOV(dp);
-	char			*dir_name = VNAME(dentry);
-	int			dir_namelen = VNAMELEN(dentry);
 	xfs_mount_t		*mp = dp->i_mount;
 	xfs_inode_t		*cdp;	/* inode of created dir */
-	bhv_vnode_t		*cvp;	/* vnode of created dir */
 	xfs_trans_t		*tp;
 	int			cancel_flags;
 	int			error;
@@ -2700,8 +2654,8 @@
 
 	if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
 		error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
-					dir_vp, DM_RIGHT_NULL, NULL,
-					DM_RIGHT_NULL, dir_name, NULL,
+					dp, DM_RIGHT_NULL, NULL,
+					DM_RIGHT_NULL, dir_name->name, NULL,
 					mode, 0, 0);
 		if (error)
 			return error;
@@ -2730,7 +2684,7 @@
 
 	tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
 	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
-	resblks = XFS_MKDIR_SPACE_RES(mp, dir_namelen);
+	resblks = XFS_MKDIR_SPACE_RES(mp, dir_name->len);
 	error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0,
 				  XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT);
 	if (error == ENOSPC) {
@@ -2762,8 +2716,8 @@
 	if (error)
 		goto error_return;
 
-	if (resblks == 0 &&
-	    (error = xfs_dir_canenter(tp, dp, dir_name, dir_namelen)))
+	error = xfs_dir_canenter(tp, dp, dir_name, resblks);
+	if (error)
 		goto error_return;
 	/*
 	 * create the directory inode.
@@ -2786,15 +2740,15 @@
 	 * from here on will result in the transaction cancel
 	 * unlocking dp so don't do it explicitly in the error path.
 	 */
-	VN_HOLD(dir_vp);
+	IHOLD(dp);
 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
 	unlock_dp_on_error = B_FALSE;
 
 	XFS_BMAP_INIT(&free_list, &first_block);
 
-	error = xfs_dir_createname(tp, dp, dir_name, dir_namelen, cdp->i_ino,
-				   &first_block, &free_list, resblks ?
-				   resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
+	error = xfs_dir_createname(tp, dp, dir_name, cdp->i_ino,
+					&first_block, &free_list, resblks ?
+					resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
 	if (error) {
 		ASSERT(error != ENOSPC);
 		goto error1;
@@ -2817,11 +2771,9 @@
 	if (error)
 		goto error2;
 
-	cvp = XFS_ITOV(cdp);
-
 	created = B_TRUE;
 
-	*vpp = cvp;
+	*ipp = cdp;
 	IHOLD(cdp);
 
 	/*
@@ -2858,10 +2810,10 @@
 	if ((created || (error != 0 && dm_event_sent != 0)) &&
 	    DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
 		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
-					dir_vp, DM_RIGHT_NULL,
-					created ? XFS_ITOV(cdp):NULL,
+					dp, DM_RIGHT_NULL,
+					created ? cdp : NULL,
 					DM_RIGHT_NULL,
-					dir_name, NULL,
+					dir_name->name, NULL,
 					mode, error, 0);
 	}
 	return error;
@@ -2885,20 +2837,17 @@
 int
 xfs_rmdir(
 	xfs_inode_t             *dp,
-	bhv_vname_t		*dentry)
+	struct xfs_name		*name,
+	xfs_inode_t		*cdp)
 {
 	bhv_vnode_t		*dir_vp = XFS_ITOV(dp);
-	char			*name = VNAME(dentry);
-	int			namelen = VNAMELEN(dentry);
 	xfs_mount_t		*mp = dp->i_mount;
-  	xfs_inode_t             *cdp;   /* child directory */
 	xfs_trans_t             *tp;
 	int                     error;
 	xfs_bmap_free_t         free_list;
 	xfs_fsblock_t           first_block;
 	int			cancel_flags;
 	int			committed;
-	int			dm_di_mode = S_IFDIR;
 	int			last_cdp_link;
 	uint			resblks;
 
@@ -2907,24 +2856,15 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return XFS_ERROR(EIO);
 
-	if (!xfs_get_dir_entry(dentry, &cdp)) {
-	        dm_di_mode = cdp->i_d.di_mode;
-		IRELE(cdp);
-	}
-
 	if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) {
 		error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE,
-					dir_vp, DM_RIGHT_NULL,
-					NULL, DM_RIGHT_NULL,
-					name, NULL, dm_di_mode, 0, 0);
+					dp, DM_RIGHT_NULL,
+					NULL, DM_RIGHT_NULL, name->name,
+					NULL, cdp->i_d.di_mode, 0, 0);
 		if (error)
 			return XFS_ERROR(error);
 	}
 
-	/* Return through std_return after this point. */
-
-	cdp = NULL;
-
 	/*
 	 * We need to get a reference to cdp before we get our log
 	 * reservation.  The reason for this is that we cannot call
@@ -2937,13 +2877,7 @@
 	 * when we call xfs_iget.  Instead we get an unlocked reference
 	 * to the inode before getting our log reservation.
 	 */
-	error = xfs_get_dir_entry(dentry, &cdp);
-	if (error) {
-		REMOVE_DEBUG_TRACE(__LINE__);
-		goto std_return;
-	}
-	mp = dp->i_mount;
-	dm_di_mode = cdp->i_d.di_mode;
+	IHOLD(cdp);
 
 	/*
 	 * Get the dquots for the inodes.
@@ -3020,7 +2954,7 @@
 		goto error_return;
 	}
 
-	error = xfs_dir_removename(tp, dp, name, namelen, cdp->i_ino,
+	error = xfs_dir_removename(tp, dp, name, cdp->i_ino,
 					&first_block, &free_list, resblks);
 	if (error)
 		goto error1;
@@ -3098,9 +3032,9 @@
  std_return:
 	if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
 		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
-					dir_vp, DM_RIGHT_NULL,
+					dp, DM_RIGHT_NULL,
 					NULL, DM_RIGHT_NULL,
-					name, NULL, dm_di_mode,
+					name->name, NULL, cdp->i_d.di_mode,
 					error, 0);
 	}
 	return error;
@@ -3118,13 +3052,12 @@
 int
 xfs_symlink(
 	xfs_inode_t		*dp,
-	bhv_vname_t		*dentry,
-	char			*target_path,
+	struct xfs_name		*link_name,
+	const char		*target_path,
 	mode_t			mode,
-	bhv_vnode_t		**vpp,
+	xfs_inode_t		**ipp,
 	cred_t			*credp)
 {
-	bhv_vnode_t		*dir_vp = XFS_ITOV(dp);
 	xfs_mount_t		*mp = dp->i_mount;
 	xfs_trans_t		*tp;
 	xfs_inode_t		*ip;
@@ -3140,17 +3073,15 @@
 	int			nmaps;
 	xfs_bmbt_irec_t		mval[SYMLINK_MAPS];
 	xfs_daddr_t		d;
-	char			*cur_chunk;
+	const char		*cur_chunk;
 	int			byte_cnt;
 	int			n;
 	xfs_buf_t		*bp;
 	xfs_prid_t		prid;
 	struct xfs_dquot	*udqp, *gdqp;
 	uint			resblks;
-	char			*link_name = VNAME(dentry);
-	int			link_namelen;
 
-	*vpp = NULL;
+	*ipp = NULL;
 	error = 0;
 	ip = NULL;
 	tp = NULL;
@@ -3160,44 +3091,17 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return XFS_ERROR(EIO);
 
-	link_namelen = VNAMELEN(dentry);
-
 	/*
 	 * Check component lengths of the target path name.
 	 */
 	pathlen = strlen(target_path);
 	if (pathlen >= MAXPATHLEN)      /* total string too long */
 		return XFS_ERROR(ENAMETOOLONG);
-	if (pathlen >= MAXNAMELEN) {    /* is any component too long? */
-		int len, total;
-		char *path;
-
-		for (total = 0, path = target_path; total < pathlen;) {
-			/*
-			 * Skip any slashes.
-			 */
-			while(*path == '/') {
-				total++;
-				path++;
-			}
-
-			/*
-			 * Count up to the next slash or end of path.
-			 * Error out if the component is bigger than MAXNAMELEN.
-			 */
-			for(len = 0; *path != '/' && total < pathlen;total++, path++) {
-				if (++len >= MAXNAMELEN) {
-					error = ENAMETOOLONG;
-					return error;
-				}
-			}
-		}
-	}
 
 	if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) {
-		error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dir_vp,
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp,
 					DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
-					link_name, target_path, 0, 0, 0);
+					link_name->name, target_path, 0, 0, 0);
 		if (error)
 			return error;
 	}
@@ -3229,7 +3133,7 @@
 		fs_blocks = 0;
 	else
 		fs_blocks = XFS_B_TO_FSB(mp, pathlen);
-	resblks = XFS_SYMLINK_SPACE_RES(mp, link_namelen, fs_blocks);
+	resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
 	error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
 			XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
 	if (error == ENOSPC && fs_blocks == 0) {
@@ -3263,8 +3167,8 @@
 	/*
 	 * Check for ability to enter directory entry, if no space reserved.
 	 */
-	if (resblks == 0 &&
-	    (error = xfs_dir_canenter(tp, dp, link_name, link_namelen)))
+	error = xfs_dir_canenter(tp, dp, link_name, resblks);
+	if (error)
 		goto error_return;
 	/*
 	 * Initialize the bmap freelist prior to calling either
@@ -3289,7 +3193,7 @@
 	 * transaction cancel unlocking dp so don't do it explicitly in the
 	 * error path.
 	 */
-	VN_HOLD(dir_vp);
+	IHOLD(dp);
 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
 	unlock_dp_on_error = B_FALSE;
 
@@ -3356,8 +3260,8 @@
 	/*
 	 * Create the directory entry for the symlink.
 	 */
-	error = xfs_dir_createname(tp, dp, link_name, link_namelen, ip->i_ino,
-				   &first_block, &free_list, resblks);
+	error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
+					&first_block, &free_list, resblks);
 	if (error)
 		goto error1;
 	xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -3399,19 +3303,14 @@
 std_return:
 	if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTSYMLINK)) {
 		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK,
-					dir_vp, DM_RIGHT_NULL,
-					error ? NULL : XFS_ITOV(ip),
-					DM_RIGHT_NULL, link_name, target_path,
-					0, error, 0);
+					dp, DM_RIGHT_NULL,
+					error ? NULL : ip,
+					DM_RIGHT_NULL, link_name->name,
+					target_path, 0, error, 0);
 	}
 
-	if (!error) {
-		bhv_vnode_t *vp;
-
-		ASSERT(ip);
-		vp = XFS_ITOV(ip);
-		*vpp = vp;
-	}
+	if (!error)
+		*ipp = ip;
 	return error;
 
  error2:
@@ -3431,60 +3330,11 @@
 }
 
 int
-xfs_rwlock(
-	xfs_inode_t	*ip,
-	bhv_vrwlock_t	locktype)
-{
-	if (S_ISDIR(ip->i_d.di_mode))
-		return 1;
-	if (locktype == VRWLOCK_WRITE) {
-		xfs_ilock(ip, XFS_IOLOCK_EXCL);
-	} else if (locktype == VRWLOCK_TRY_READ) {
-		return xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED);
-	} else if (locktype == VRWLOCK_TRY_WRITE) {
-		return xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL);
-	} else {
-		ASSERT((locktype == VRWLOCK_READ) ||
-		       (locktype == VRWLOCK_WRITE_DIRECT));
-		xfs_ilock(ip, XFS_IOLOCK_SHARED);
-	}
-
-	return 1;
-}
-
-
-void
-xfs_rwunlock(
-	xfs_inode_t     *ip,
-	bhv_vrwlock_t	locktype)
-{
- 	if (S_ISDIR(ip->i_d.di_mode))
-  		return;
-	if (locktype == VRWLOCK_WRITE) {
-		/*
-		 * In the write case, we may have added a new entry to
-		 * the reference cache.  This might store a pointer to
-		 * an inode to be released in this inode.  If it is there,
-		 * clear the pointer and release the inode after unlocking
-		 * this one.
-		 */
-		xfs_refcache_iunlock(ip, XFS_IOLOCK_EXCL);
-	} else {
-		ASSERT((locktype == VRWLOCK_READ) ||
-		       (locktype == VRWLOCK_WRITE_DIRECT));
-		xfs_iunlock(ip, XFS_IOLOCK_SHARED);
-	}
-	return;
-}
-
-
-int
 xfs_inode_flush(
 	xfs_inode_t	*ip,
 	int		flags)
 {
 	xfs_mount_t	*mp = ip->i_mount;
-	xfs_inode_log_item_t *iip = ip->i_itemp;
 	int		error = 0;
 
 	if (XFS_FORCED_SHUTDOWN(mp))
@@ -3494,33 +3344,9 @@
 	 * Bypass inodes which have already been cleaned by
 	 * the inode flush clustering code inside xfs_iflush
 	 */
-	if ((ip->i_update_core == 0) &&
-	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)))
+	if (xfs_inode_clean(ip))
 		return 0;
 
-	if (flags & FLUSH_LOG) {
-		if (iip && iip->ili_last_lsn) {
-			xlog_t		*log = mp->m_log;
-			xfs_lsn_t	sync_lsn;
-			int		log_flags = XFS_LOG_FORCE;
-
-			spin_lock(&log->l_grant_lock);
-			sync_lsn = log->l_last_sync_lsn;
-			spin_unlock(&log->l_grant_lock);
-
-			if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) {
-				if (flags & FLUSH_SYNC)
-					log_flags |= XFS_LOG_SYNC;
-				error = xfs_log_force(mp, iip->ili_last_lsn, log_flags);
-				if (error)
-					return error;
-			}
-
-			if (ip->i_update_core == 0)
-				return 0;
-		}
-	}
-
 	/*
 	 * We make this non-blocking if the inode is contended,
 	 * return EAGAIN to indicate to the caller that they
@@ -3528,30 +3354,22 @@
 	 * blocking on inodes inside another operation right
 	 * now, they get caught later by xfs_sync.
 	 */
-	if (flags & FLUSH_INODE) {
-		int	flush_flags;
-
-		if (flags & FLUSH_SYNC) {
-			xfs_ilock(ip, XFS_ILOCK_SHARED);
-			xfs_iflock(ip);
-		} else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
-			if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) {
-				xfs_iunlock(ip, XFS_ILOCK_SHARED);
-				return EAGAIN;
-			}
-		} else {
+	if (flags & FLUSH_SYNC) {
+		xfs_ilock(ip, XFS_ILOCK_SHARED);
+		xfs_iflock(ip);
+	} else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+		if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) {
+			xfs_iunlock(ip, XFS_ILOCK_SHARED);
 			return EAGAIN;
 		}
-
-		if (flags & FLUSH_SYNC)
-			flush_flags = XFS_IFLUSH_SYNC;
-		else
-			flush_flags = XFS_IFLUSH_ASYNC;
-
-		error = xfs_iflush(ip, flush_flags);
-		xfs_iunlock(ip, XFS_ILOCK_SHARED);
+	} else {
+		return EAGAIN;
 	}
 
+	error = xfs_iflush(ip, (flags & FLUSH_SYNC) ? XFS_IFLUSH_SYNC
+						    : XFS_IFLUSH_ASYNC_NOBLOCK);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
 	return error;
 }
 
@@ -3694,12 +3512,12 @@
 	 * We get the flush lock regardless, though, just to make sure
 	 * we don't free it while it is being flushed.
 	 */
-	if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
-		if (!locked) {
-			xfs_ilock(ip, XFS_ILOCK_EXCL);
-			xfs_iflock(ip);
-		}
+	if (!locked) {
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		xfs_iflock(ip);
+	}
 
+	if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 		if (ip->i_update_core ||
 		    ((ip->i_itemp != NULL) &&
 		     (ip->i_itemp->ili_format.ilf_fields != 0))) {
@@ -3719,17 +3537,11 @@
 		ASSERT(ip->i_update_core == 0);
 		ASSERT(ip->i_itemp == NULL ||
 		       ip->i_itemp->ili_format.ilf_fields == 0);
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	} else if (locked) {
-		/*
-		 * We are not interested in doing an iflush if we're
-		 * in the process of shutting down the filesystem forcibly.
-		 * So, just reclaim the inode.
-		 */
-		xfs_ifunlock(ip);
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 	}
 
+	xfs_ifunlock(ip);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
  reclaim:
 	xfs_ireclaim(ip);
 	return 0;
@@ -3845,9 +3657,8 @@
 		end_dmi_offset = offset+len;
 		if (end_dmi_offset > ip->i_size)
 			end_dmi_offset = ip->i_size;
-		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOV(ip),
-			offset, end_dmi_offset - offset,
-			0, NULL);
+		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, offset,
+				      end_dmi_offset - offset, 0, NULL);
 		if (error)
 			return error;
 	}
@@ -3956,8 +3767,8 @@
 	if (error == ENOSPC && (attr_flags & ATTR_DMI) == 0 &&
 	    DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) {
 		error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE,
-				XFS_ITOV(ip), DM_RIGHT_NULL,
-				XFS_ITOV(ip), DM_RIGHT_NULL,
+				ip, DM_RIGHT_NULL,
+				ip, DM_RIGHT_NULL,
 				NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */
 		if (error == 0)
 			goto retry;	/* Maybe DMAPI app. has made space */
@@ -4021,7 +3832,8 @@
 		XFS_BUF_READ(bp);
 		XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock));
 		xfsbdstrat(mp, bp);
-		if ((error = xfs_iowait(bp))) {
+		error = xfs_iowait(bp);
+		if (error) {
 			xfs_ioerror_alert("xfs_zero_remaining_bytes(read)",
 					  mp, bp, XFS_BUF_ADDR(bp));
 			break;
@@ -4033,7 +3845,8 @@
 		XFS_BUF_UNREAD(bp);
 		XFS_BUF_WRITE(bp);
 		xfsbdstrat(mp, bp);
-		if ((error = xfs_iowait(bp))) {
+		error = xfs_iowait(bp);
+		if (error) {
 			xfs_ioerror_alert("xfs_zero_remaining_bytes(write)",
 					  mp, bp, XFS_BUF_ADDR(bp));
 			break;
@@ -4102,7 +3915,7 @@
 	    DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
 		if (end_dmi_offset > ip->i_size)
 			end_dmi_offset = ip->i_size;
-		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, vp,
+		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip,
 				offset, end_dmi_offset - offset,
 				AT_DELAY_FLAG(attr_flags), NULL);
 		if (error)
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index 4e3970f..24c5392 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -23,31 +23,32 @@
 		xfs_off_t stop);
 int xfs_release(struct xfs_inode *ip);
 int xfs_inactive(struct xfs_inode *ip);
-int xfs_lookup(struct xfs_inode *dp, bhv_vname_t *dentry,
-		bhv_vnode_t **vpp);
-int xfs_create(struct xfs_inode *dp, bhv_vname_t *dentry, mode_t mode,
-		xfs_dev_t rdev, bhv_vnode_t **vpp, struct cred *credp);
-int xfs_remove(struct xfs_inode *dp, bhv_vname_t	*dentry);
-int xfs_link(struct xfs_inode *tdp, bhv_vnode_t *src_vp,
-		bhv_vname_t *dentry);
-int xfs_mkdir(struct xfs_inode *dp, bhv_vname_t *dentry,
-		mode_t mode, bhv_vnode_t **vpp, struct cred *credp);
-int xfs_rmdir(struct xfs_inode *dp, bhv_vname_t *dentry);
+int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
+		struct xfs_inode **ipp);
+int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode,
+		xfs_dev_t rdev, struct xfs_inode **ipp, struct cred *credp);
+int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
+		struct xfs_inode *ip);
+int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
+		struct xfs_name *target_name);
+int xfs_mkdir(struct xfs_inode *dp, struct xfs_name *dir_name,
+		mode_t mode, struct xfs_inode **ipp, struct cred *credp);
+int xfs_rmdir(struct xfs_inode *dp, struct xfs_name *name,
+		struct xfs_inode *cdp);
 int xfs_readdir(struct xfs_inode	*dp, void *dirent, size_t bufsize,
 		       xfs_off_t *offset, filldir_t filldir);
-int xfs_symlink(struct xfs_inode *dp, bhv_vname_t *dentry,
-		char *target_path, mode_t mode, bhv_vnode_t **vpp,
+int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
+		const char *target_path, mode_t mode, struct xfs_inode **ipp,
 		struct cred *credp);
-int xfs_rwlock(struct xfs_inode *ip, bhv_vrwlock_t locktype);
-void xfs_rwunlock(struct xfs_inode *ip, bhv_vrwlock_t locktype);
 int xfs_inode_flush(struct xfs_inode *ip, int flags);
 int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
 int xfs_reclaim(struct xfs_inode *ip);
 int xfs_change_file_space(struct xfs_inode *ip, int cmd,
 		xfs_flock64_t *bf, xfs_off_t offset,
 		struct cred *credp, int	attr_flags);
-int xfs_rename(struct xfs_inode *src_dp, bhv_vname_t *src_vname,
-		bhv_vnode_t *target_dir_vp, bhv_vname_t *target_vname);
+int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
+		struct xfs_inode *src_ip, struct xfs_inode *target_dp,
+		struct xfs_name *target_name);
 int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value,
 		int *valuelenp, int flags, cred_t *cred);
 int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value,
diff --git a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h
index b7bf68d..f44129a 100644
--- a/include/asm-alpha/ide.h
+++ b/include/asm-alpha/ide.h
@@ -13,9 +13,6 @@
 
 #ifdef __KERNEL__
 
-
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
 static inline int ide_default_irq(unsigned long base)
 {
 	switch (base) {
@@ -40,14 +37,6 @@
 	}
 }
 
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_PCI
-#define ide_init_default_irq(base)	(0)
-#else
-#define ide_init_default_irq(base)	ide_default_irq(base)
-#endif
-
 #include <asm-generic/ide_iops.h>
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index f1e9278..d9b2034 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -1,149 +1 @@
-#ifndef _ALPHA_SEMAPHORE_H
-#define _ALPHA_SEMAPHORE_H
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1996, 2000 Richard Henderson
- */
-
-#include <asm/current.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/compiler.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	atomic_t count;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)			\
-{								\
-	.count	= ATOMIC_INIT(n),				\
-  	.wait	= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait),	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count)		\
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
-	/*
-	 * Logically, 
-	 *   *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-	 * except that gcc produces better initializing by parts yet.
-	 */
-
-	atomic_set(&sem->count, val);
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern void down(struct semaphore *);
-extern void __down_failed(struct semaphore *);
-extern int  down_interruptible(struct semaphore *);
-extern int  __down_failed_interruptible(struct semaphore *);
-extern int  down_trylock(struct semaphore *);
-extern void up(struct semaphore *);
-extern void __up_wakeup(struct semaphore *);
-
-/*
- * Hidden out of line code is fun, but extremely messy.  Rely on newer
- * compilers to do a respectable job with this.  The contention cases
- * are handled out of line in arch/alpha/kernel/semaphore.c.
- */
-
-static inline void __down(struct semaphore *sem)
-{
-	long count;
-	might_sleep();
-	count = atomic_dec_return(&sem->count);
-	if (unlikely(count < 0))
-		__down_failed(sem);
-}
-
-static inline int __down_interruptible(struct semaphore *sem)
-{
-	long count;
-	might_sleep();
-	count = atomic_dec_return(&sem->count);
-	if (unlikely(count < 0))
-		return __down_failed_interruptible(sem);
-	return 0;
-}
-
-/*
- * down_trylock returns 0 on success, 1 if we failed to get the lock.
- */
-
-static inline int __down_trylock(struct semaphore *sem)
-{
-	long ret;
-
-	/* "Equivalent" C:
-
-	   do {
-		ret = ldl_l;
-		--ret;
-		if (ret < 0)
-			break;
-		ret = stl_c = ret;
-	   } while (ret == 0);
-	*/
-	__asm__ __volatile__(
-		"1:	ldl_l	%0,%1\n"
-		"	subl	%0,1,%0\n"
-		"	blt	%0,2f\n"
-		"	stl_c	%0,%1\n"
-		"	beq	%0,3f\n"
-		"	mb\n"
-		"2:\n"
-		".subsection 2\n"
-		"3:	br	1b\n"
-		".previous"
-		: "=&r" (ret), "=m" (sem->count)
-		: "m" (sem->count));
-
-	return ret < 0;
-}
-
-static inline void __up(struct semaphore *sem)
-{
-	if (unlikely(atomic_inc_return(&sem->count) <= 0))
-		__up_wakeup(sem);
-}
-
-#if !defined(CONFIG_DEBUG_SEMAPHORE)
-extern inline void down(struct semaphore *sem)
-{
-	__down(sem);
-}
-extern inline int down_interruptible(struct semaphore *sem)
-{
-	return __down_interruptible(sem);
-}
-extern inline int down_trylock(struct semaphore *sem)
-{
-	return __down_trylock(sem);
-}
-extern inline void up(struct semaphore *sem)
-{
-	__up(sem);
-}
-#endif
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h
index efd9a5e..90d14ee 100644
--- a/include/asm-arm/arch-iop13xx/adma.h
+++ b/include/asm-arm/arch-iop13xx/adma.h
@@ -454,11 +454,6 @@
 	__raw_writel(adma_accr, ADMA_ACCR(chan));
 }
 
-static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
-{
-	do { } while (0);
-}
-
 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
 {
 	return __raw_readl(ADMA_ACSR(chan));
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h
index 5c529e6..84d635b 100644
--- a/include/asm-arm/hardware/iop3xx-adma.h
+++ b/include/asm-arm/hardware/iop3xx-adma.h
@@ -767,20 +767,12 @@
 static inline void iop_chan_append(struct iop_adma_chan *chan)
 {
 	u32 dma_chan_ctrl;
-	/* workaround dropped interrupts on 3xx */
-	mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3));
 
 	dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
 	dma_chan_ctrl |= 0x2;
 	__raw_writel(dma_chan_ctrl, DMA_CCR(chan));
 }
 
-static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
-{
-	if (!busy)
-		del_timer(&chan->cleanup_watchdog);
-}
-
 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
 {
 	return __raw_readl(DMA_CSR(chan));
diff --git a/include/asm-arm/hardware/iop_adma.h b/include/asm-arm/hardware/iop_adma.h
index ca8e71f..cb7e361 100644
--- a/include/asm-arm/hardware/iop_adma.h
+++ b/include/asm-arm/hardware/iop_adma.h
@@ -51,7 +51,6 @@
  * @common: common dmaengine channel object members
  * @last_used: place holder for allocation to continue from where it left off
  * @all_slots: complete domain of slots usable by the channel
- * @cleanup_watchdog: workaround missed interrupts on iop3xx
  * @slots_allocated: records the actual size of the descriptor slot pool
  * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
  */
@@ -65,7 +64,6 @@
 	struct dma_chan common;
 	struct iop_adma_desc_slot *last_used;
 	struct list_head all_slots;
-	struct timer_list cleanup_watchdog;
 	int slots_allocated;
 	struct tasklet_struct irq_tasklet;
 };
diff --git a/include/asm-arm/ide.h b/include/asm-arm/ide.h
index f348fcf..88f4d23 100644
--- a/include/asm-arm/ide.h
+++ b/include/asm-arm/ide.h
@@ -17,14 +17,6 @@
 #define MAX_HWIFS	4
 #endif
 
-#if !defined(CONFIG_ARCH_L7200)
-# ifdef CONFIG_ARCH_CLPS7500
-#  define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-# else
-#  define ide_default_io_ctl(base)	(0)
-# endif
-#endif /* !ARCH_L7200 */
-
 #define __ide_mm_insw(port,addr,len)	readsw(port,addr,len)
 #define __ide_mm_insl(port,addr,len)	readsl(port,addr,len)
 #define __ide_mm_outsw(port,addr,len)	writesw(port,addr,len)
diff --git a/include/asm-arm/semaphore-helper.h b/include/asm-arm/semaphore-helper.h
deleted file mode 100644
index 1d7f198..0000000
--- a/include/asm-arm/semaphore-helper.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef ASMARM_SEMAPHORE_HELPER_H
-#define ASMARM_SEMAPHORE_HELPER_H
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (atomic_read(&sem->count) <= 0)
-		sem->waking++;
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->waking > 0) {
-		sem->waking--;
-		ret = 1;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking non zero interruptible
- *	1	got the lock
- *	0	go to sleep
- *	-EINTR	interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
-						struct task_struct *tsk)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->waking > 0) {
-		sem->waking--;
-		ret = 1;
-	} else if (signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;	
-}
-
-/*
- * waking_non_zero_try_lock:
- *	1	failed to lock
- *	0	got the lock
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 1;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->waking <= 0)
-		atomic_inc(&sem->count);
-	else {
-		sem->waking--;
-		ret = 0;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-#endif
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
index 1c8b441..d9b2034 100644
--- a/include/asm-arm/semaphore.h
+++ b/include/asm-arm/semaphore.h
@@ -1,98 +1 @@
-/*
- * linux/include/asm-arm/semaphore.h
- */
-#ifndef __ASM_ARM_SEMAPHORE_H
-#define __ASM_ARM_SEMAPHORE_H
-
-#include <linux/linkage.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/atomic.h>
-#include <asm/locks.h>
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INIT(name, cnt)				\
-{								\
-	.count	= ATOMIC_INIT(cnt),				\
-	.wait	= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait),	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count)	\
-	struct semaphore name = __SEMAPHORE_INIT(name,count)
-
-#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
-	atomic_set(&sem->count, val);
-	sem->sleepers = 0;
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-/*
- * special register calling convention
- */
-asmlinkage void __down_failed(void);
-asmlinkage int  __down_interruptible_failed(void);
-asmlinkage int  __down_trylock_failed(void);
-asmlinkage void __up_wakeup(void);
-
-extern void __down(struct semaphore * sem);
-extern int  __down_interruptible(struct semaphore * sem);
-extern int  __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down" is the actual routine that waits...
- */
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-	__down_op(sem, __down_failed);
-}
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_interruptible" is the actual routine that waits...
- */
-static inline int down_interruptible (struct semaphore * sem)
-{
-	might_sleep();
-	return __down_op_ret(sem, __down_interruptible_failed);
-}
-
-static inline int down_trylock(struct semaphore *sem)
-{
-	return __down_op_ret(sem, __down_trylock_failed);
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
-	__up_op(sem, __up_wakeup);
-}
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-avr32/semaphore.h b/include/asm-avr32/semaphore.h
index feaf1d4..d9b2034 100644
--- a/include/asm-avr32/semaphore.h
+++ b/include/asm-avr32/semaphore.h
@@ -1,108 +1 @@
-/*
- * SMP- and interrupt-safe semaphores.
- *
- * Copyright (C) 2006 Atmel Corporation
- *
- * Based on include/asm-i386/semaphore.h
- *   Copyright (C) 1996 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_AVR32_SEMAPHORE_H
-#define __ASM_AVR32_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	atomic_set(&sem->count, val);
-	sem->sleepers = 0;
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-void __down(struct semaphore * sem);
-int  __down_interruptible(struct semaphore * sem);
-void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/i386/kernel/semaphore.c
- */
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-	if (unlikely(atomic_dec_return (&sem->count) < 0))
-		__down (sem);
-}
-
-/*
- * Interruptible try to acquire a semaphore.  If we obtained
- * it, return zero.  If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-
-	might_sleep();
-	if (unlikely(atomic_dec_return (&sem->count) < 0))
-		ret = __down_interruptible (sem);
-	return ret;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
-	return atomic_dec_if_positive(&sem->count) < 0;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
-	if (unlikely(atomic_inc_return (&sem->count) <= 0))
-		__up (sem);
-}
-
-#endif /*__ASM_AVR32_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-blackfin/ide.h b/include/asm-blackfin/ide.h
index 121e272..5b88de1 100644
--- a/include/asm-blackfin/ide.h
+++ b/include/asm-blackfin/ide.h
@@ -19,10 +19,6 @@
 
 #define MAX_HWIFS	1
 
-/* Legacy ... BLK_DEV_IDECS */
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
-
 #include <asm-generic/ide_iops.h>
 
 /****************************************************************************/
diff --git a/include/asm-blackfin/semaphore-helper.h b/include/asm-blackfin/semaphore-helper.h
deleted file mode 100644
index 9082b0d..0000000
--- a/include/asm-blackfin/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Based on M68K version,	Lineo Inc.	May 2001 */
-
-#ifndef _BFIN_SEMAPHORE_HELPER_H
-#define _BFIN_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- */
-
-#include <asm/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore *sem)
-{
-	atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
-	int ret;
-	unsigned long flags = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 0;
-	if (atomic_read(&sem->waking) > 0) {
-		atomic_dec(&sem->waking);
-		ret = 1;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- *	1	got the lock
- *	0	go to sleep
- *	-EINTR	interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
-						struct task_struct *tsk)
-{
-	int ret = 0;
-	unsigned long flags = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (atomic_read(&sem->waking) > 0) {
-		atomic_dec(&sem->waking);
-		ret = 1;
-	} else if (signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- *	1	failed to lock
- *	0	got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
-	int ret = 1;
-	unsigned long flags = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (atomic_read(&sem->waking) > 0) {
-		atomic_dec(&sem->waking);
-		ret = 0;
-	} else
-		atomic_inc(&sem->count);
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-#endif				/* _BFIN_SEMAPHORE_HELPER_H */
diff --git a/include/asm-blackfin/semaphore.h b/include/asm-blackfin/semaphore.h
index 533f90f..d9b2034 100644
--- a/include/asm-blackfin/semaphore.h
+++ b/include/asm-blackfin/semaphore.h
@@ -1,105 +1 @@
-#ifndef _BFIN_SEMAPHORE_H
-#define _BFIN_SEMAPHORE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * BFIN version by akbar hussain Lineo Inc  April 2001
- *
- */
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.sleepers	= 0,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-asmlinkage void __down(struct semaphore *sem);
-asmlinkage int __down_interruptible(struct semaphore *sem);
-asmlinkage int __down_trylock(struct semaphore *sem);
-asmlinkage void __up(struct semaphore *sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits.
- */
-static inline void down(struct semaphore *sem)
-{
-	might_sleep();
-	if (atomic_dec_return(&sem->count) < 0)
-		__down(sem);
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
-	int ret = 0;
-
-	might_sleep();
-	if (atomic_dec_return(&sem->count) < 0)
-		ret = __down_interruptible(sem);
-	return (ret);
-}
-
-static inline int down_trylock(struct semaphore *sem)
-{
-	int ret = 0;
-
-	if (atomic_dec_return(&sem->count) < 0)
-		ret = __down_trylock(sem);
-	return ret;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore *sem)
-{
-	if (atomic_inc_return(&sem->count) <= 0)
-		__up(sem);
-}
-
-#endif				/* __ASSEMBLY__ */
-#endif				/* _BFIN_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h
deleted file mode 100644
index 27bfeca1..0000000
--- a/include/asm-cris/semaphore-helper.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* $Id: semaphore-helper.h,v 1.3 2001/03/26 15:00:33 orjanf Exp $
- *
- * SMP- and interrupt-safe semaphores helper functions. Generic versions, no
- * optimizations whatsoever... 
- *
- */
-
-#ifndef _ASM_SEMAPHORE_HELPER_H
-#define _ASM_SEMAPHORE_HELPER_H
-
-#include <asm/atomic.h>
-#include <linux/errno.h>
-
-#define read(a) ((a)->counter)
-#define inc(a) (((a)->counter)++)
-#define dec(a) (((a)->counter)--)
-
-#define count_inc(a) ((*(a))++)
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
-	atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	local_irq_save(flags);
-	if (read(&sem->waking) > 0) {
-		dec(&sem->waking);
-		ret = 1;
-	}
-	local_irq_restore(flags);
-	return ret;
-}
-
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
-						struct task_struct *tsk)
-{
-	int ret = 0;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	if (read(&sem->waking) > 0) {
-		dec(&sem->waking);
-		ret = 1;
-	} else if (signal_pending(tsk)) {
-		inc(&sem->count);
-		ret = -EINTR;
-	}
-	local_irq_restore(flags);
-	return ret;
-}
-
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
-        int ret = 1;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	if (read(&sem->waking) <= 0)
-		inc(&sem->count);
-	else {
-		dec(&sem->waking);
-		ret = 0;
-	}
-	local_irq_restore(flags);
-	return ret;
-}
-
-#endif /* _ASM_SEMAPHORE_HELPER_H */
-
-
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
index 31a4ac4..d9b2034 100644
--- a/include/asm-cris/semaphore.h
+++ b/include/asm-cris/semaphore.h
@@ -1,133 +1 @@
-/* $Id: semaphore.h,v 1.3 2001/05/08 13:54:09 bjornw Exp $ */
-
-/* On the i386 these are coded in asm, perhaps we should as well. Later.. */
-
-#ifndef _CRIS_SEMAPHORE_H
-#define _CRIS_SEMAPHORE_H
-
-#define RW_LOCK_BIAS             0x01000000
-
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * CRIS semaphores, implemented in C-only so far. 
- */
-
-struct semaphore {
-	atomic_t count;
-	atomic_t waking;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.waking		= ATOMIC_INIT(0),				\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)    \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-        struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-        sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-        sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-/* notice - we probably can do cli/sti here instead of saving */
-
-static inline void down(struct semaphore * sem)
-{
-	unsigned long flags;
-	int failed;
-
-	might_sleep();
-
-	/* atomically decrement the semaphores count, and if its negative, we wait */
-	cris_atomic_save(sem, flags);
-	failed = --(sem->count.counter) < 0;
-	cris_atomic_restore(sem, flags);
-	if(failed) {
-		__down(sem);
-	}
-}
-
-/*
- * This version waits in interruptible state so that the waiting
- * process can be killed.  The down_interruptible routine
- * returns negative for signalled and zero for semaphore acquired.
- */
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	unsigned long flags;
-	int failed;
-
-	might_sleep();
-
-	/* atomically decrement the semaphores count, and if its negative, we wait */
-	cris_atomic_save(sem, flags);
-	failed = --(sem->count.counter) < 0;
-	cris_atomic_restore(sem, flags);
-	if(failed)
-		failed = __down_interruptible(sem);
-	return(failed);
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
-	unsigned long flags;
-	int failed;
-
-	cris_atomic_save(sem, flags);
-	failed = --(sem->count.counter) < 0;
-	cris_atomic_restore(sem, flags);
-	if(failed)
-		failed = __down_trylock(sem);
-	return(failed);
-
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{  
-	unsigned long flags;
-	int wakeup;
-
-	/* atomically increment the semaphores count, and if it was negative, we wake people */
-	cris_atomic_save(sem, flags);
-	wakeup = ++(sem->count.counter) <= 0;
-	cris_atomic_restore(sem, flags);
-	if(wakeup) {
-		__up(sem);
-	}
-}
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
index d7aaa19..d9b2034 100644
--- a/include/asm-frv/semaphore.h
+++ b/include/asm-frv/semaphore.h
@@ -1,155 +1 @@
-/* semaphore.h: semaphores for the FR-V
- *
- * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _ASM_SEMAPHORE_H
-#define _ASM_SEMAPHORE_H
-
-#define RW_LOCK_BIAS		 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-/*
- * the semaphore definition
- * - if counter is >0 then there are tokens available on the semaphore for down to collect
- * - if counter is <=0 then there are no spare tokens, and anyone that wants one must wait
- * - if wait_list is not empty, then there are processes waiting for the semaphore
- */
-struct semaphore {
-	unsigned		counter;
-	spinlock_t		wait_lock;
-	struct list_head	wait_list;
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	unsigned		__magic;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
-{ count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore *sem, unsigned long flags);
-extern int  __down_interruptible(struct semaphore *sem, unsigned long flags);
-extern void __up(struct semaphore *sem);
-
-static inline void down(struct semaphore *sem)
-{
-	unsigned long flags;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	CHECK_MAGIC(sem->__magic);
-#endif
-
-	spin_lock_irqsave(&sem->wait_lock, flags);
-	if (likely(sem->counter > 0)) {
-		sem->counter--;
-		spin_unlock_irqrestore(&sem->wait_lock, flags);
-	}
-	else {
-		__down(sem, flags);
-	}
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 0;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	CHECK_MAGIC(sem->__magic);
-#endif
-
-	spin_lock_irqsave(&sem->wait_lock, flags);
-	if (likely(sem->counter > 0)) {
-		sem->counter--;
-		spin_unlock_irqrestore(&sem->wait_lock, flags);
-	}
-	else {
-		ret = __down_interruptible(sem, flags);
-	}
-	return ret;
-}
-
-/*
- * non-blockingly attempt to down() a semaphore.
- * - returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore *sem)
-{
-	unsigned long flags;
-	int success = 0;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	CHECK_MAGIC(sem->__magic);
-#endif
-
-	spin_lock_irqsave(&sem->wait_lock, flags);
-	if (sem->counter > 0) {
-		sem->counter--;
-		success = 1;
-	}
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-	return !success;
-}
-
-static inline void up(struct semaphore *sem)
-{
-	unsigned long flags;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-	CHECK_MAGIC(sem->__magic);
-#endif
-
-	spin_lock_irqsave(&sem->wait_lock, flags);
-	if (!list_empty(&sem->wait_list))
-		__up(sem);
-	else
-		sem->counter++;
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-static inline int sem_getcount(struct semaphore *sem)
-{
-	return sem->counter;
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-h8300/semaphore-helper.h b/include/asm-h8300/semaphore-helper.h
deleted file mode 100644
index 4fea36b..0000000
--- a/include/asm-h8300/semaphore-helper.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef _H8300_SEMAPHORE_HELPER_H
-#define _H8300_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * based on
- * m68k version by Andreas Schwab
- */
-
-#include <linux/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
-	atomic_inc((atomic_t *)&sem->sleepers);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
-	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 0;
-	if (sem->sleepers > 0) {
-		sem->sleepers--;
-		ret = 1;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- *	1	got the lock
- *	0	go to sleep
- *	-EINTR	interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
-						struct task_struct *tsk)
-{
-	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 0;
-	if (sem->sleepers > 0) {
-		sem->sleepers--;
-		ret = 1;
-	} else if (signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- *	1	failed to lock
- *	0	got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
-	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 1;
-	if (sem->sleepers <= 0)
-		atomic_inc(&sem->count);
-	else {
-		sem->sleepers--;
-		ret = 0;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-#endif
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h
index f3ffff8..d9b2034 100644
--- a/include/asm-h8300/semaphore.h
+++ b/include/asm-h8300/semaphore.h
@@ -1,190 +1 @@
-#ifndef _H8300_SEMAPHORE_H
-#define _H8300_SEMAPHORE_H
-
-#define RW_LOCK_BIAS		 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * H8/300 version by Yoshinori Sato
- */
-
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.sleepers	= 0,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
-asmlinkage int  __down_failed_trylock(void  /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int  __down_interruptible(struct semaphore * sem);
-asmlinkage int  __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/m68k/lib/semaphore.S
- */
-static inline void down(struct semaphore * sem)
-{
-	register atomic_t *count asm("er0");
-
-	might_sleep();
-
-	count = &(sem->count);
-	__asm__ __volatile__(
-		"stc ccr,r3l\n\t"
-		"orc #0x80,ccr\n\t"
-		"mov.l %2, er1\n\t"
-		"dec.l #1,er1\n\t"
-		"mov.l er1,%0\n\t"
-		"bpl 1f\n\t"
-		"ldc r3l,ccr\n\t"
-		"mov.l %1,er0\n\t"
-		"jsr @___down\n\t"
-		"bra 2f\n"
-		"1:\n\t"
-		"ldc r3l,ccr\n"
-		"2:"
-		: "=m"(*count)
-		: "g"(sem),"m"(*count)
-		: "cc",  "er1", "er2", "er3");
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	register atomic_t *count asm("er0");
-
-	might_sleep();
-
-	count = &(sem->count);
-	__asm__ __volatile__(
-		"stc ccr,r1l\n\t"
-		"orc #0x80,ccr\n\t"
-		"mov.l %3, er2\n\t"
-		"dec.l #1,er2\n\t"
-		"mov.l er2,%1\n\t"
-		"bpl 1f\n\t"
-		"ldc r1l,ccr\n\t"
-		"mov.l %2,er0\n\t"
-		"jsr @___down_interruptible\n\t"
-		"bra 2f\n"
-		"1:\n\t"
-		"ldc r1l,ccr\n\t"
-		"sub.l %0,%0\n\t"
-		"2:\n\t"
-		: "=r" (count),"=m" (*count)
-		: "g"(sem),"m"(*count)
-		: "cc", "er1", "er2", "er3");
-	return (int)count;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
-	register atomic_t *count asm("er0");
-
-	count = &(sem->count);
-	__asm__ __volatile__(
-		"stc ccr,r3l\n\t"
-		"orc #0x80,ccr\n\t"
-		"mov.l %3,er2\n\t"
-		"dec.l #1,er2\n\t"
-		"mov.l er2,%0\n\t"
-		"bpl 1f\n\t"
-		"ldc r3l,ccr\n\t"
-		"jmp @3f\n\t"
-		LOCK_SECTION_START(".align 2\n\t")
-		"3:\n\t"
-		"mov.l %2,er0\n\t"
-		"jsr @___down_trylock\n\t"
-		"jmp @2f\n\t"
-		LOCK_SECTION_END
-		"1:\n\t"
-		"ldc r3l,ccr\n\t"
-		"sub.l %1,%1\n"
-		"2:"
-		: "=m" (*count),"=r"(count)
-		: "g"(sem),"m"(*count)
-		: "cc", "er1","er2", "er3");
-	return (int)count;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
-	register atomic_t *count asm("er0");
-
-	count = &(sem->count);
-	__asm__ __volatile__(
-		"stc ccr,r3l\n\t"
-		"orc #0x80,ccr\n\t"
-		"mov.l %2,er1\n\t"
-		"inc.l #1,er1\n\t"
-		"mov.l er1,%0\n\t"
-		"ldc r3l,ccr\n\t"
-		"sub.l er2,er2\n\t"
-		"cmp.l er2,er1\n\t"
-		"bgt 1f\n\t"
-		"mov.l %1,er0\n\t"
-		"jsr @___up\n"
-		"1:"
-		: "=m"(*count)
-		: "g"(sem),"m"(*count)
-		: "cc", "er1", "er2", "er3");
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index cd1cc39..fcfad32 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -35,6 +35,7 @@
 #include <linux/init.h>
 #include <linux/numa.h>
 #include <asm/system.h>
+#include <asm/numa.h>
 
 #define COMPILER_DEPENDENT_INT64	long
 #define COMPILER_DEPENDENT_UINT64	unsigned long
@@ -115,7 +116,11 @@
 extern void set_cpei_target_cpu(unsigned int cpu);
 extern unsigned int get_cpei_target_cpu(void);
 extern void prefill_possible_map(void);
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
 extern int additional_cpus;
+#else
+#define additional_cpus 0
+#endif
 
 #ifdef CONFIG_ACPI_NUMA
 #if MAX_NUMNODES > 256
@@ -129,6 +134,34 @@
 
 #define acpi_unlazy_tlb(x)
 
+#ifdef CONFIG_ACPI_NUMA
+extern cpumask_t early_cpu_possible_map;
+#define for_each_possible_early_cpu(cpu)  \
+	for_each_cpu_mask((cpu), early_cpu_possible_map)
+
+static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
+{
+	int low_cpu, high_cpu;
+	int cpu;
+	int next_nid = 0;
+
+	low_cpu = cpus_weight(early_cpu_possible_map);
+
+	high_cpu = max(low_cpu, min_cpus);
+	high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
+
+	for (cpu = low_cpu; cpu < high_cpu; cpu++) {
+		cpu_set(cpu, early_cpu_possible_map);
+		if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
+			node_cpuid[cpu].nid = next_nid;
+			next_nid++;
+			if (next_nid >= num_online_nodes())
+				next_nid = 0;
+		}
+	}
+}
+#endif /* CONFIG_ACPI_NUMA */
+
 #endif /*__KERNEL__*/
 
 #endif /*_ASM_ACPI_H*/
diff --git a/include/asm-ia64/cputime.h b/include/asm-ia64/cputime.h
index 72400a7..f9abdec 100644
--- a/include/asm-ia64/cputime.h
+++ b/include/asm-ia64/cputime.h
@@ -1,6 +1,110 @@
+/*
+ * include/asm-ia64/cputime.h:
+ *		Definitions for measuring cputime on ia64 machines.
+ *
+ * Based on <asm-powerpc/cputime.h>.
+ *
+ * Copyright (C) 2007 FUJITSU LIMITED
+ * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
+ * Otherwise we measure cpu time in jiffies using the generic definitions.
+ */
+
 #ifndef __IA64_CPUTIME_H
 #define __IA64_CPUTIME_H
 
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
 #include <asm-generic/cputime.h>
+#else
 
+#include <linux/time.h>
+#include <linux/jiffies.h>
+#include <asm/processor.h>
+
+typedef u64 cputime_t;
+typedef u64 cputime64_t;
+
+#define cputime_zero			((cputime_t)0)
+#define cputime_max			((~((cputime_t)0) >> 1) - 1)
+#define cputime_add(__a, __b)		((__a) +  (__b))
+#define cputime_sub(__a, __b)		((__a) -  (__b))
+#define cputime_div(__a, __n)		((__a) /  (__n))
+#define cputime_halve(__a)		((__a) >> 1)
+#define cputime_eq(__a, __b)		((__a) == (__b))
+#define cputime_gt(__a, __b)		((__a) >  (__b))
+#define cputime_ge(__a, __b)		((__a) >= (__b))
+#define cputime_lt(__a, __b)		((__a) <  (__b))
+#define cputime_le(__a, __b)		((__a) <= (__b))
+
+#define cputime64_zero			((cputime64_t)0)
+#define cputime64_add(__a, __b)		((__a) + (__b))
+#define cputime64_sub(__a, __b)		((__a) - (__b))
+#define cputime_to_cputime64(__ct)	(__ct)
+
+/*
+ * Convert cputime <-> jiffies (HZ)
+ */
+#define cputime_to_jiffies(__ct)	((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies_to_cputime(__jif)	((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime64_to_jiffies64(__ct)	((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies64_to_cputime64(__jif)	((__jif) * (NSEC_PER_SEC / HZ))
+
+/*
+ * Convert cputime <-> milliseconds
+ */
+#define cputime_to_msecs(__ct)		((__ct) / NSEC_PER_MSEC)
+#define msecs_to_cputime(__msecs)	((__msecs) * NSEC_PER_MSEC)
+
+/*
+ * Convert cputime <-> seconds
+ */
+#define cputime_to_secs(__ct)		((__ct) / NSEC_PER_SEC)
+#define secs_to_cputime(__secs)		((__secs) * NSEC_PER_SEC)
+
+/*
+ * Convert cputime <-> timespec (nsec)
+ */
+static inline cputime_t timespec_to_cputime(const struct timespec *val)
+{
+	cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+	return (ret + val->tv_nsec);
+}
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+{
+	val->tv_sec  = ct / NSEC_PER_SEC;
+	val->tv_nsec = ct % NSEC_PER_SEC;
+}
+
+/*
+ * Convert cputime <-> timeval (msec)
+ */
+static inline cputime_t timeval_to_cputime(struct timeval *val)
+{
+	cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+	return (ret + val->tv_usec * NSEC_PER_USEC);
+}
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+{
+	val->tv_sec = ct / NSEC_PER_SEC;
+	val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC;
+}
+
+/*
+ * Convert cputime <-> clock (USER_HZ)
+ */
+#define cputime_to_clock_t(__ct)	((__ct) / (NSEC_PER_SEC / USER_HZ))
+#define clock_t_to_cputime(__x)		((__x) * (NSEC_PER_SEC / USER_HZ))
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct)      cputime_to_clock_t((cputime_t)__ct)
+
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
 #endif /* __IA64_CPUTIME_H */
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h
index f8e83ec..5e0c1a6 100644
--- a/include/asm-ia64/elf.h
+++ b/include/asm-ia64/elf.h
@@ -26,6 +26,7 @@
 #define ELF_ARCH	EM_IA_64
 
 #define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
 
 /* Least-significant four bits of ELF header's e_flags are OS-specific.  The bits are
    interpreted as follows by Linux: */
@@ -154,6 +155,30 @@
 #define ELF_NGREG	128	/* we really need just 72 but let's leave some headroom... */
 #define ELF_NFPREG	128	/* f0 and f1 could be omitted, but so what... */
 
+/* elf_gregset_t register offsets */
+#define ELF_GR_0_OFFSET     0
+#define ELF_NAT_OFFSET     (32 * sizeof(elf_greg_t))
+#define ELF_PR_OFFSET      (33 * sizeof(elf_greg_t))
+#define ELF_BR_0_OFFSET    (34 * sizeof(elf_greg_t))
+#define ELF_CR_IIP_OFFSET  (42 * sizeof(elf_greg_t))
+#define ELF_CFM_OFFSET     (43 * sizeof(elf_greg_t))
+#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
+#define ELF_GR_OFFSET(i)   (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_BR_OFFSET(i)   (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_AR_RSC_OFFSET  (45 * sizeof(elf_greg_t))
+#define ELF_AR_BSP_OFFSET  (46 * sizeof(elf_greg_t))
+#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
+#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
+#define ELF_AR_CCV_OFFSET  (49 * sizeof(elf_greg_t))
+#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
+#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
+#define ELF_AR_PFS_OFFSET  (52 * sizeof(elf_greg_t))
+#define ELF_AR_LC_OFFSET   (53 * sizeof(elf_greg_t))
+#define ELF_AR_EC_OFFSET   (54 * sizeof(elf_greg_t))
+#define ELF_AR_CSD_OFFSET  (55 * sizeof(elf_greg_t))
+#define ELF_AR_SSD_OFFSET  (56 * sizeof(elf_greg_t))
+#define ELF_AR_END_OFFSET  (57 * sizeof(elf_greg_t))
+
 typedef unsigned long elf_fpxregset_t;
 
 typedef unsigned long elf_greg_t;
@@ -183,12 +208,6 @@
 
 struct task_struct;
 
-extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
-extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
-
-#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs)
-#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
-
 #define GATE_EHDR	((const struct elfhdr *) GATE_ADDR)
 
 /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h
index 1ccf238..8fa3f8c 100644
--- a/include/asm-ia64/ide.h
+++ b/include/asm-ia64/ide.h
@@ -16,8 +16,6 @@
 
 #include <linux/irq.h>
 
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
 static inline int ide_default_irq(unsigned long base)
 {
 	switch (base) {
@@ -46,14 +44,6 @@
 	}
 }
 
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_PCI
-#define ide_init_default_irq(base)	(0)
-#else
-#define ide_init_default_irq(base)	ide_default_irq(base)
-#endif
-
 #include <asm-generic/ide_iops.h>
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index d03bf9f..ef71b57 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -30,8 +30,12 @@
 #include <asm/break.h>
 
 #define __ARCH_WANT_KPROBES_INSN_SLOT
-#define MAX_INSN_SIZE   1
+#define MAX_INSN_SIZE   2	/* last half is for kprobe-booster */
 #define BREAK_INST	(long)(__IA64_BREAK_KPROBE << 6)
+#define NOP_M_INST	(long)(1<<27)
+#define BRL_INST(i1, i2) ((long)((0xcL << 37) |	/* brl */ \
+				(0x1L << 12) |	/* many */ \
+				(((i1) & 1) << 36) | ((i2) << 13))) /* imm */
 
 typedef union cmp_inst {
 	struct {
@@ -112,6 +116,7 @@
  #define INST_FLAG_FIX_RELATIVE_IP_ADDR		1
  #define INST_FLAG_FIX_BRANCH_REG		2
  #define INST_FLAG_BREAK_INST			4
+ #define INST_FLAG_BOOSTABLE			8
  	unsigned long inst_flag;
  	unsigned short target_br_reg;
 	unsigned short slot;
diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h
index 7e55a58..aefcdfe 100644
--- a/include/asm-ia64/kregs.h
+++ b/include/asm-ia64/kregs.h
@@ -31,6 +31,9 @@
 #define IA64_TR_PALCODE		1	/* itr1: maps PALcode as required by EFI */
 #define IA64_TR_CURRENT_STACK	1	/* dtr1: maps kernel's memory- & register-stacks */
 
+#define IA64_TR_ALLOC_BASE	2 	/* itr&dtr: Base of dynamic TR resource*/
+#define IA64_TR_ALLOC_MAX	32 	/* Max number for dynamic use*/
+
 /* Processor status register bits: */
 #define IA64_PSR_BE_BIT		1
 #define IA64_PSR_UP_BIT		2
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index f93308f..7245a57 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -35,6 +35,7 @@
 extern void reserve_memory (void);
 extern void find_initrd (void);
 extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
+extern int filter_memory (unsigned long start, unsigned long end, void *arg);
 extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e);
 extern int find_max_min_low_pfn (unsigned long , unsigned long, void *);
 
@@ -56,7 +57,7 @@
 
 #define IGNORE_PFN0	1	/* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
 
-extern int register_active_ranges(u64 start, u64 end, void *arg);
+extern int register_active_ranges(u64 start, u64 len, int nid);
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
 # define LARGE_GAP	0x40000000 /* Use virtual mem map if hole is > than this */
diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h
index 6a8a27c..3499ff5 100644
--- a/include/asm-ia64/numa.h
+++ b/include/asm-ia64/numa.h
@@ -22,6 +22,8 @@
 
 #include <asm/mmzone.h>
 
+#define NUMA_NO_NODE	-1
+
 extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
 extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
 extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 8a695d3..67b0290 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -13,6 +13,7 @@
  * Copyright (C) 1999 VA Linux Systems
  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ * Copyright (C) 2008 Silicon Graphics, Inc. (SGI)
  *
  * 99/10/01	davidm	Make sure we pass zero for reserved parameters.
  * 00/03/07	davidm	Updated pal_cache_flush() to be in sync with PAL v2.6.
@@ -73,6 +74,8 @@
 #define PAL_CACHE_SHARED_INFO	43	/* returns information on caches shared by logical processor */
 #define PAL_GET_HW_POLICY	48	/* Get current hardware resource sharing policy */
 #define PAL_SET_HW_POLICY	49	/* Set current hardware resource sharing policy */
+#define PAL_VP_INFO		50	/* Information about virtual processor features */
+#define PAL_MC_HW_TRACKING	51	/* Hardware tracking status */
 
 #define PAL_COPY_PAL		256	/* relocate PAL procedures and PAL PMI */
 #define PAL_HALT_INFO		257	/* return the low power capabilities of processor */
@@ -504,7 +507,8 @@
 			wiv		: 1,	/* Way field valid */
 			reserved2	: 1,
 			dp		: 1,	/* Data poisoned on MBE */
-			reserved3	: 8,
+			reserved3	: 6,
+			hlth		: 2,	/* Health indicator */
 
 			index		: 20,	/* Cache line index */
 			reserved4	: 2,
@@ -542,7 +546,9 @@
 			dtc		: 1,	/* Fail in data TC */
 			itc		: 1,	/* Fail in inst. TC */
 			op		: 4,	/* Cache operation */
-			reserved3	: 30,
+			reserved3	: 6,
+			hlth		: 2,	/* Health indicator */
+			reserved4	: 22,
 
 			is		: 1,	/* instruction set (1 == ia32) */
 			iv		: 1,	/* instruction set field valid */
@@ -633,7 +639,8 @@
 			way		: 6,	/* Way of structure */
 			wv		: 1,	/* way valid */
 			xv		: 1,	/* index valid */
-			reserved1	: 8,
+			reserved1	: 6,
+			hlth		: 2,	/* Health indicator */
 			index		: 8,	/* Index or set of the uarch
 						 * structure that failed.
 						 */
@@ -1213,14 +1220,12 @@
 
 /* Return the machine check dynamic processor state */
 static inline s64
-ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
+ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size)
 {
 	struct ia64_pal_retval iprv;
-	PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0);
+	PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0);
 	if (size)
 		*size = iprv.v0;
-	if (pds)
-		*pds = iprv.v1;
 	return iprv.status;
 }
 
@@ -1281,15 +1286,41 @@
 	return iprv.status;
 }
 
+typedef union pal_hw_tracking_u {
+	u64			pht_data;
+	struct {
+		u64		itc	:4,	/* Instruction cache tracking */
+				dct	:4,	/* Date cache tracking */
+				itt	:4,	/* Instruction TLB tracking */
+				ddt	:4,	/* Data TLB tracking */
+				reserved:48;
+	} pal_hw_tracking_s;
+} pal_hw_tracking_u_t;
+
+/*
+ * Hardware tracking status.
+ */
+static inline s64
+ia64_pal_mc_hw_tracking (u64 *status)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0);
+	if (status)
+		*status = iprv.v0;
+	return iprv.status;
+}
+
 /* Register a platform dependent location with PAL to which it can save
  * minimal processor state in the event of a machine check or initialization
  * event.
  */
 static inline s64
-ia64_pal_mc_register_mem (u64 physical_addr)
+ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size)
 {
 	struct ia64_pal_retval iprv;
-	PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0);
+	PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0);
+	if (req_size)
+		*req_size = iprv.v0;
 	return iprv.status;
 }
 
@@ -1631,6 +1662,29 @@
 	return iprv.status;
 }
 
+typedef union pal_vp_info_u {
+	u64			pvi_val;
+	struct {
+		u64		index:		48,	/* virtual feature set info */
+				vmm_id:		16;	/* feature set id */
+	} pal_vp_info_s;
+} pal_vp_info_u_t;
+
+/*
+ * Returns infomation about virtual processor features
+ */
+static inline s64
+ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0);
+	if (vp_info)
+		*vp_info = iprv.v0;
+	if (vmm_id)
+		*vmm_id = iprv.v1;
+	return iprv.status;
+}
+
 typedef union pal_itr_valid_u {
 	u64			piv_val;
 	struct {
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index e6204f1..ed70862 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -371,7 +371,7 @@
 /* The offset in the 1-level directory is given by the 3 region bits
    (61..63) and the level-1 bits.  */
 static inline pgd_t*
-pgd_offset (struct mm_struct *mm, unsigned long address)
+pgd_offset (const struct mm_struct *mm, unsigned long address)
 {
 	return mm->pgd + pgd_index(address);
 }
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index f4904db..89594b4 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -296,6 +296,9 @@
     EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
 #define SAL_PLAT_BUS_ERR_SECT_GUID  \
     EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \
+    EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \
+		0xca, 0x4d)
 
 #define MAX_CACHE_ERRORS	6
 #define MAX_TLB_ERRORS		6
@@ -879,6 +882,24 @@
 
 extern void ia64_sal_handler_init(void *entry_point, void *gpval);
 
+#define PALO_MAX_TLB_PURGES	0xFFFF
+#define PALO_SIG	"PALO"
+
+struct palo_table {
+	u8  signature[4];	/* Should be "PALO" */
+	u32 length;
+	u8  minor_revision;
+	u8  major_revision;
+	u8  checksum;
+	u8  reserved1[5];
+	u16 max_tlb_purges;
+	u8  reserved2[6];
+};
+
+#define NPTCG_FROM_PAL			0
+#define NPTCG_FROM_PALO			1
+#define NPTCG_FROM_KERNEL_PARAMETER	2
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_IA64_SAL_H */
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index d8393d1..d9b2034 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -1,99 +1 @@
-#ifndef _ASM_IA64_SEMAPHORE_H
-#define _ASM_IA64_SEMAPHORE_H
-
-/*
- * Copyright (C) 1998-2000 Hewlett-Packard Co
- * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/atomic.h>
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.sleepers	= 0,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count)					\
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
-
-#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void
-sema_init (struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void
-init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void
-init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern void __down (struct semaphore * sem);
-extern int  __down_interruptible (struct semaphore * sem);
-extern int  __down_trylock (struct semaphore * sem);
-extern void __up (struct semaphore * sem);
-
-/*
- * Atomically decrement the semaphore's count.  If it goes negative,
- * block the calling thread in the TASK_UNINTERRUPTIBLE state.
- */
-static inline void
-down (struct semaphore *sem)
-{
-	might_sleep();
-	if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
-		__down(sem);
-}
-
-/*
- * Atomically decrement the semaphore's count.  If it goes negative,
- * block the calling thread in the TASK_INTERRUPTIBLE state.
- */
-static inline int
-down_interruptible (struct semaphore * sem)
-{
-	int ret = 0;
-
-	might_sleep();
-	if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
-		ret = __down_interruptible(sem);
-	return ret;
-}
-
-static inline int
-down_trylock (struct semaphore *sem)
-{
-	int ret = 0;
-
-	if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
-		ret = __down_trylock(sem);
-	return ret;
-}
-
-static inline void
-up (struct semaphore * sem)
-{
-	if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
-		__up(sem);
-}
-
-#endif /* _ASM_IA64_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 4fa733d..ec5f355 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,6 +38,9 @@
 	return lid.f.id << 8 | lid.f.eid;
 }
 
+extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+				  void *info, int wait);
+
 #define hard_smp_processor_id()		ia64_get_lid()
 
 #ifdef CONFIG_SMP
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 595112b..dff8128 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -210,6 +210,13 @@
 extern void ia64_save_extra (struct task_struct *task);
 extern void ia64_load_extra (struct task_struct *task);
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next);
+# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n)
+#else
+# define IA64_ACCOUNT_ON_SWITCH(p,n)
+#endif
+
 #ifdef CONFIG_PERFMON
   DECLARE_PER_CPU(unsigned long, pfm_syst_info);
 # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
@@ -222,6 +229,7 @@
 	 || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
 
 #define __switch_to(prev,next,last) do {							 \
+	IA64_ACCOUNT_ON_SWITCH(prev, next);							 \
 	if (IA64_HAS_EXTRA_STATE(prev))								 \
 		ia64_save_extra(prev);								 \
 	if (IA64_HAS_EXTRA_STATE(next))								 \
@@ -266,6 +274,10 @@
 
 void default_idle(void);
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 93d83cb..6da8069 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -31,6 +31,12 @@
 	mm_segment_t addr_limit;	/* user-level address space limit */
 	int preempt_count;		/* 0=premptable, <0=BUG; will also serve as bh-counter */
 	struct restart_block restart_block;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	__u64 ac_stamp;
+	__u64 ac_leave;
+	__u64 ac_stime;
+	__u64 ac_utime;
+#endif
 };
 
 #define THREAD_SIZE			KERNEL_STACK_SIZE
@@ -62,9 +68,17 @@
 #define task_stack_page(tsk)	((void *)(tsk))
 
 #define __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#define setup_thread_stack(p, org)			\
+	*task_thread_info(p) = *task_thread_info(org);	\
+	task_thread_info(p)->ac_stime = 0;		\
+	task_thread_info(p)->ac_utime = 0;		\
+	task_thread_info(p)->task = (p);
+#else
 #define setup_thread_stack(p, org) \
 	*task_thread_info(p) = *task_thread_info(org); \
 	task_thread_info(p)->task = (p);
+#endif
 #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
 
 #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h
index 26edcb7..20d8a39 100644
--- a/include/asm-ia64/tlb.h
+++ b/include/asm-ia64/tlb.h
@@ -64,6 +64,32 @@
 	struct page 		*pages[FREE_PTE_NR];
 };
 
+struct ia64_tr_entry {
+	u64 ifa;
+	u64 itir;
+	u64 pte;
+	u64 rr;
+}; /*Record for tr entry!*/
+
+extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
+extern void ia64_ptr_entry(u64 target_mask, int slot);
+
+extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+
+/*
+ region register macros
+*/
+#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
+#define RR_VE(val)	(((val) & 0x0000000000000001) << 0)
+#define RR_VE_MASK	0x0000000000000001L
+#define RR_VE_SHIFT	0
+#define RR_TO_PS(val)	(((val) >> 2) & 0x000000000000003f)
+#define RR_PS(val)	(((val) & 0x000000000000003f) << 2)
+#define RR_PS_MASK	0x00000000000000fcL
+#define RR_PS_SHIFT	2
+#define RR_RID_MASK	0x00000000ffffff00L
+#define RR_TO_RID(val) 	((val >> 8) & 0xffffff)
+
 /* Users of the generic TLB shootdown code must declare this storage space. */
 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
 
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h
index 7774a1c..3be25df 100644
--- a/include/asm-ia64/tlbflush.h
+++ b/include/asm-ia64/tlbflush.h
@@ -17,6 +17,7 @@
  * Now for some TLB flushing routines.  This is the kind of stuff that
  * can be very expensive, so try to avoid them whenever possible.
  */
+extern void setup_ptcg_sem(int max_purges, int from_palo);
 
 /*
  * Flush everything (kernel mapping may also have changed due to
diff --git a/include/asm-m32r/ide.h b/include/asm-m32r/ide.h
index 5d2044e..1e7f647 100644
--- a/include/asm-m32r/ide.h
+++ b/include/asm-m32r/ide.h
@@ -23,8 +23,6 @@
 # endif
 #endif
 
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
 static __inline__ int ide_default_irq(unsigned long base)
 {
 	switch (base) {
@@ -65,14 +63,6 @@
 	}
 }
 
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_BLK_DEV_IDEPCI
-#define ide_init_default_irq(base)     (0)
-#else
-#define ide_init_default_irq(base)     ide_default_irq(base)
-#endif
-
 #include <asm-generic/ide_iops.h>
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
index b5bf95a..d9b2034 100644
--- a/include/asm-m32r/semaphore.h
+++ b/include/asm-m32r/semaphore.h
@@ -1,144 +1 @@
-#ifndef _ASM_M32R_SEMAPHORE_H
-#define _ASM_M32R_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * Copyright (C) 1996  Linus Torvalds
- * Copyright (C) 2004, 2006  Hirokazu Takata <takata at linux-m32r.org>
- */
-
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-#include <asm/assembler.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.sleepers	= 0,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- *	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
- */
-	atomic_set(&sem->count, val);
-	sem->sleepers = 0;
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
-asmlinkage int  __down_failed_trylock(void  /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int  __down_interruptible(struct semaphore * sem);
-asmlinkage int  __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/*
- * Atomically decrement the semaphore's count.  If it goes negative,
- * block the calling thread in the TASK_UNINTERRUPTIBLE state.
- */
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-	if (unlikely(atomic_dec_return(&sem->count) < 0))
-		__down(sem);
-}
-
-/*
- * Interruptible try to acquire a semaphore.  If we obtained
- * it, return zero.  If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int result = 0;
-
-	might_sleep();
-	if (unlikely(atomic_dec_return(&sem->count) < 0))
-		result = __down_interruptible(sem);
-
-	return result;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
-	unsigned long flags;
-	long count;
-	int result = 0;
-
-	local_irq_save(flags);
-	__asm__ __volatile__ (
-		"# down_trylock			\n\t"
-		DCACHE_CLEAR("%0", "r4", "%1")
-		M32R_LOCK" %0, @%1;		\n\t"
-		"addi	%0, #-1;		\n\t"
-		M32R_UNLOCK" %0, @%1;		\n\t"
-		: "=&r" (count)
-		: "r" (&sem->count)
-		: "memory"
-#ifdef CONFIG_CHIP_M32700_TS1
-		, "r4"
-#endif	/* CONFIG_CHIP_M32700_TS1 */
-	);
-	local_irq_restore(flags);
-
-	if (unlikely(count < 0))
-		result = __down_trylock(sem);
-
-	return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
-	if (unlikely(atomic_inc_return(&sem->count) <= 0))
-		__up(sem);
-}
-
-#endif  /* __KERNEL__ */
-
-#endif  /* _ASM_M32R_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-m68k/semaphore-helper.h b/include/asm-m68k/semaphore-helper.h
deleted file mode 100644
index eef30ba..0000000
--- a/include/asm-m68k/semaphore-helper.h
+++ /dev/null
@@ -1,142 +0,0 @@
-#ifndef _M68K_SEMAPHORE_HELPER_H
-#define _M68K_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-#include <linux/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
-	atomic_inc(&sem->waking);
-}
-
-#ifndef CONFIG_RMW_INSNS
-extern spinlock_t semaphore_wake_lock;
-#endif
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
-	int ret;
-#ifndef CONFIG_RMW_INSNS
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 0;
-	if (atomic_read(&sem->waking) > 0) {
-		atomic_dec(&sem->waking);
-		ret = 1;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-#else
-	int tmp1, tmp2;
-
-	__asm__ __volatile__
-	  ("1:	movel	%1,%2\n"
-	   "    jle	2f\n"
-	   "	subql	#1,%2\n"
-	   "	casl	%1,%2,%3\n"
-	   "	jne	1b\n"
-	   "	moveq	#1,%0\n"
-	   "2:"
-	   : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
-	   : "m" (sem->waking), "0" (0), "1" (sem->waking));
-#endif
-
-	return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- *	1	got the lock
- *	0	go to sleep
- *	-EINTR	interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
-						struct task_struct *tsk)
-{
-	int ret;
-#ifndef CONFIG_RMW_INSNS
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 0;
-	if (atomic_read(&sem->waking) > 0) {
-		atomic_dec(&sem->waking);
-		ret = 1;
-	} else if (signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-#else
-	int tmp1, tmp2;
-
-	__asm__ __volatile__
-	  ("1:	movel	%1,%2\n"
-	   "	jle	2f\n"
-	   "	subql	#1,%2\n"
-	   "	casl	%1,%2,%3\n"
-	   "	jne	1b\n"
-	   "	moveq	#1,%0\n"
-	   "	jra	%a4\n"
-	   "2:"
-	   : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
-	   : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
-	if (signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-next:
-#endif
-
-	return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- *	1	failed to lock
- *	0	got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
-	int ret;
-#ifndef CONFIG_RMW_INSNS
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 1;
-	if (atomic_read(&sem->waking) > 0) {
-		atomic_dec(&sem->waking);
-		ret = 0;
-	} else
-		atomic_inc(&sem->count);
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-#else
-	int tmp1, tmp2;
-
-	__asm__ __volatile__
-	  ("1:	movel	%1,%2\n"
-	   "    jle	2f\n"
-	   "	subql	#1,%2\n"
-	   "	casl	%1,%2,%3\n"
-	   "	jne	1b\n"
-	   "	moveq	#0,%0\n"
-	   "2:"
-	   : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
-	   : "m" (sem->waking), "0" (1), "1" (sem->waking));
-	if (ret)
-		atomic_inc(&sem->count);
-#endif
-	return ret;
-}
-
-#endif
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h
index 64d6b11..d9b2034 100644
--- a/include/asm-m68k/semaphore.h
+++ b/include/asm-m68k/semaphore.h
@@ -1,163 +1 @@
-#ifndef _M68K_SEMAPHORE_H
-#define _M68K_SEMAPHORE_H
-
-#define RW_LOCK_BIAS		 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/stringify.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-
-struct semaphore {
-	atomic_t count;
-	atomic_t waking;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.waking		= ATOMIC_INIT(0),				\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
-asmlinkage int  __down_failed_trylock(void  /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int  __down_interruptible(struct semaphore * sem);
-asmlinkage int  __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/m68k/lib/semaphore.S
- */
-static inline void down(struct semaphore *sem)
-{
-	register struct semaphore *sem1 __asm__ ("%a1") = sem;
-
-	might_sleep();
-	__asm__ __volatile__(
-		"| atomic down operation\n\t"
-		"subql #1,%0@\n\t"
-		"jmi 2f\n\t"
-		"1:\n"
-		LOCK_SECTION_START(".even\n\t")
-		"2:\tpea 1b\n\t"
-		"jbra __down_failed\n"
-		LOCK_SECTION_END
-		: /* no outputs */
-		: "a" (sem1)
-		: "memory");
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
-	register struct semaphore *sem1 __asm__ ("%a1") = sem;
-	register int result __asm__ ("%d0");
-
-	might_sleep();
-	__asm__ __volatile__(
-		"| atomic interruptible down operation\n\t"
-		"subql #1,%1@\n\t"
-		"jmi 2f\n\t"
-		"clrl %0\n"
-		"1:\n"
-		LOCK_SECTION_START(".even\n\t")
-		"2:\tpea 1b\n\t"
-		"jbra __down_failed_interruptible\n"
-		LOCK_SECTION_END
-		: "=d" (result)
-		: "a" (sem1)
-		: "memory");
-	return result;
-}
-
-static inline int down_trylock(struct semaphore *sem)
-{
-	register struct semaphore *sem1 __asm__ ("%a1") = sem;
-	register int result __asm__ ("%d0");
-
-	__asm__ __volatile__(
-		"| atomic down trylock operation\n\t"
-		"subql #1,%1@\n\t"
-		"jmi 2f\n\t"
-		"clrl %0\n"
-		"1:\n"
-		LOCK_SECTION_START(".even\n\t")
-		"2:\tpea 1b\n\t"
-		"jbra __down_failed_trylock\n"
-		LOCK_SECTION_END
-		: "=d" (result)
-		: "a" (sem1)
-		: "memory");
-	return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore *sem)
-{
-	register struct semaphore *sem1 __asm__ ("%a1") = sem;
-
-	__asm__ __volatile__(
-		"| atomic up operation\n\t"
-		"addql #1,%0@\n\t"
-		"jle 2f\n"
-		"1:\n"
-		LOCK_SECTION_START(".even\n\t")
-		"2:\t"
-		"pea 1b\n\t"
-		"jbra __up_wakeup\n"
-		LOCK_SECTION_END
-		: /* no outputs */
-		: "a" (sem1)
-		: "memory");
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-m68knommu/semaphore-helper.h b/include/asm-m68knommu/semaphore-helper.h
deleted file mode 100644
index 43da7bc..0000000
--- a/include/asm-m68knommu/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef _M68K_SEMAPHORE_HELPER_H
-#define _M68K_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
-	atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
-	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 0;
-	if (atomic_read(&sem->waking) > 0) {
-		atomic_dec(&sem->waking);
-		ret = 1;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- *	1	got the lock
- *	0	go to sleep
- *	-EINTR	interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
-						struct task_struct *tsk)
-{
-	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 0;
-	if (atomic_read(&sem->waking) > 0) {
-		atomic_dec(&sem->waking);
-		ret = 1;
-	} else if (signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- *	1	failed to lock
- *	0	got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
-	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	ret = 1;
-	if (atomic_read(&sem->waking) > 0) {
-		atomic_dec(&sem->waking);
-		ret = 0;
-	} else
-		atomic_inc(&sem->count);
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-#endif
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h
index 5779eb6..d9b2034 100644
--- a/include/asm-m68knommu/semaphore.h
+++ b/include/asm-m68knommu/semaphore.h
@@ -1,153 +1 @@
-#ifndef _M68K_SEMAPHORE_H
-#define _M68K_SEMAPHORE_H
-
-#define RW_LOCK_BIAS		 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-
-struct semaphore {
-	atomic_t count;
-	atomic_t waking;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.waking		= ATOMIC_INIT(0),				\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
-asmlinkage int  __down_failed_trylock(void  /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int  __down_interruptible(struct semaphore * sem);
-asmlinkage int  __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/m68k/lib/semaphore.S
- */
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-	__asm__ __volatile__(
-		"| atomic down operation\n\t"
-		"movel	%0, %%a1\n\t"
-		"lea	%%pc@(1f), %%a0\n\t"
-		"subql	#1, %%a1@\n\t"
-		"jmi __down_failed\n"
-		"1:"
-		: /* no outputs */
-		: "g" (sem)
-		: "cc", "%a0", "%a1", "memory");
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int ret;
-
-	might_sleep();
-	__asm__ __volatile__(
-		"| atomic down operation\n\t"
-		"movel	%1, %%a1\n\t"
-		"lea	%%pc@(1f), %%a0\n\t"
-		"subql	#1, %%a1@\n\t"
-		"jmi __down_failed_interruptible\n\t"
-		"clrl	%%d0\n"
-		"1: movel	%%d0, %0\n"
-		: "=d" (ret)
-		: "g" (sem)
-		: "cc", "%d0", "%a0", "%a1", "memory");
-	return(ret);
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
-	register struct semaphore *sem1 __asm__ ("%a1") = sem;
-	register int result __asm__ ("%d0");
-
-	__asm__ __volatile__(
-		"| atomic down trylock operation\n\t"
-		"subql #1,%1@\n\t"
-		"jmi 2f\n\t"
-		"clrl %0\n"
-		"1:\n"
-		".section .text.lock,\"ax\"\n"
-		".even\n"
-		"2:\tpea 1b\n\t"
-		"jbra __down_failed_trylock\n"
-		".previous"
-		: "=d" (result)
-		: "a" (sem1)
-		: "memory");
-	return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
-	__asm__ __volatile__(
-		"| atomic up operation\n\t"
-		"movel	%0, %%a1\n\t"
-		"lea	%%pc@(1f), %%a0\n\t"
-		"addql	#1, %%a1@\n\t"
-		"jle __up_wakeup\n"
-		"1:"
-		: /* no outputs */
-		: "g" (sem)
-		: "cc", "%a0", "%a1", "memory");
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-mips/mach-generic/ide.h b/include/asm-mips/mach-generic/ide.h
index 4ec2b93..0f6c251 100644
--- a/include/asm-mips/mach-generic/ide.h
+++ b/include/asm-mips/mach-generic/ide.h
@@ -27,8 +27,6 @@
 # endif
 #endif
 
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
 static __inline__ int ide_probe_legacy(void)
 {
 #ifdef CONFIG_PCI
@@ -98,14 +96,6 @@
 	}
 }
 
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_BLK_DEV_IDEPCI
-#define ide_init_default_irq(base)	(0)
-#else
-#define ide_init_default_irq(base)	ide_default_irq(base)
-#endif
-
 /* MIPS port and memory-mapped I/O string operations.  */
 static inline void __ide_flush_prologue(void)
 {
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index fdf8042..d9b2034 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -1,108 +1 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1996  Linus Torvalds
- * Copyright (C) 1998, 99, 2000, 01, 04  Ralf Baechle
- * Copyright (C) 1999, 2000, 01  Silicon Graphics, Inc.
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
- *
- * In all honesty, little of the old MIPS code left - the PPC64 variant was
- * just looking nice and portable so I ripped it.  Credits to whoever wrote
- * it.
- */
-#ifndef __ASM_SEMAPHORE_H
-#define __ASM_SEMAPHORE_H
-
-/*
- * Remove spinlock-based RW semaphores; RW semaphore definitions are
- * now in rwsem.h and we use the generic lib/rwsem.c implementation.
- * Rework semaphores to use atomic_dec_if_positive.
- * -- Paul Mackerras (paulus@samba.org)
- */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	/*
-	 * Note that any negative value of count is equivalent to 0,
-	 * but additionally indicates that some process(es) might be
-	 * sleeping on `wait'.
-	 */
-	atomic_t count;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
-
-#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
-	atomic_set(&sem->count, val);
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int  __down_interruptible(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-
-	/*
-	 * Try to get the semaphore, take the slow path if we fail.
-	 */
-	if (unlikely(atomic_dec_return(&sem->count) < 0))
-		__down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-
-	might_sleep();
-
-	if (unlikely(atomic_dec_return(&sem->count) < 0))
-		ret = __down_interruptible(sem);
-	return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
-	return atomic_dec_if_positive(&sem->count) < 0;
-}
-
-static inline void up(struct semaphore * sem)
-{
-	if (unlikely(atomic_inc_return(&sem->count) <= 0))
-		__up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASM_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-mn10300/semaphore.h b/include/asm-mn10300/semaphore.h
index 5a9e1ad..d9b2034 100644
--- a/include/asm-mn10300/semaphore.h
+++ b/include/asm-mn10300/semaphore.h
@@ -1,169 +1 @@
-/* MN10300 Semaphores
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_SEMAPHORE_H
-#define _ASM_SEMAPHORE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#define SEMAPHORE_DEBUG		0
-
-/*
- * the semaphore definition
- * - if count is >0 then there are tokens available on the semaphore for down
- *   to collect
- * - if count is <=0 then there are no spare tokens, and anyone that wants one
- *   must wait
- * - if wait_list is not empty, then there are processes waiting for the
- *   semaphore
- */
-struct semaphore {
-	atomic_t		count;		/* it's not really atomic, it's
-						 * just that certain modules
-						 * expect to be able to access
-						 * it directly */
-	spinlock_t		wait_lock;
-	struct list_head	wait_list;
-#if SEMAPHORE_DEBUG
-	unsigned		__magic;
-#endif
-};
-
-#if SEMAPHORE_DEBUG
-# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-
-#define __SEMAPHORE_INITIALIZER(name, init_count)			\
-{									\
-	.count		= ATOMIC_INIT(init_count),			\
-	.wait_lock	= __SPIN_LOCK_UNLOCKED((name).wait_lock),	\
-	.wait_list	= LIST_HEAD_INIT((name).wait_list)		\
-	__SEM_DEBUG_INIT(name)						\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore *sem, unsigned long flags);
-extern int  __down_interruptible(struct semaphore *sem, unsigned long flags);
-extern void __up(struct semaphore *sem);
-
-static inline void down(struct semaphore *sem)
-{
-	unsigned long flags;
-	int count;
-
-#if SEMAPHORE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-
-	spin_lock_irqsave(&sem->wait_lock, flags);
-	count = atomic_read(&sem->count);
-	if (likely(count > 0)) {
-		atomic_set(&sem->count, count - 1);
-		spin_unlock_irqrestore(&sem->wait_lock, flags);
-	} else {
-		__down(sem, flags);
-	}
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
-	unsigned long flags;
-	int count, ret = 0;
-
-#if SEMAPHORE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-
-	spin_lock_irqsave(&sem->wait_lock, flags);
-	count = atomic_read(&sem->count);
-	if (likely(count > 0)) {
-		atomic_set(&sem->count, count - 1);
-		spin_unlock_irqrestore(&sem->wait_lock, flags);
-	} else {
-		ret = __down_interruptible(sem, flags);
-	}
-	return ret;
-}
-
-/*
- * non-blockingly attempt to down() a semaphore.
- * - returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore *sem)
-{
-	unsigned long flags;
-	int count, success = 0;
-
-#if SEMAPHORE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-
-	spin_lock_irqsave(&sem->wait_lock, flags);
-	count = atomic_read(&sem->count);
-	if (likely(count > 0)) {
-		atomic_set(&sem->count, count - 1);
-		success = 1;
-	}
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-	return !success;
-}
-
-static inline void up(struct semaphore *sem)
-{
-	unsigned long flags;
-
-#if SEMAPHORE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-
-	spin_lock_irqsave(&sem->wait_lock, flags);
-	if (!list_empty(&sem->wait_list))
-		__up(sem);
-	else
-		atomic_set(&sem->count, atomic_read(&sem->count) + 1);
-	spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-static inline int sem_getcount(struct semaphore *sem)
-{
-	return atomic_read(&sem->count);
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-parisc/ide.h b/include/asm-parisc/ide.h
index be8760f..db0c944 100644
--- a/include/asm-parisc/ide.h
+++ b/include/asm-parisc/ide.h
@@ -17,8 +17,6 @@
 #define MAX_HWIFS	2
 #endif
 
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
 #define ide_request_irq(irq,hand,flg,dev,id)	request_irq((irq),(hand),(flg),(dev),(id))
 #define ide_free_irq(irq,dev_id)		free_irq((irq), (dev_id))
 #define ide_request_region(from,extent,name)	request_region((from), (extent), (name))
diff --git a/include/asm-parisc/semaphore-helper.h b/include/asm-parisc/semaphore-helper.h
deleted file mode 100644
index 387f7c1..0000000
--- a/include/asm-parisc/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef _ASM_PARISC_SEMAPHORE_HELPER_H
-#define _ASM_PARISC_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- */
-
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * which we have.  Let the rest of the losers suck eggs.
- */
-static __inline__ void wake_one_more(struct semaphore * sem)
-{
-	atomic_inc((atomic_t *)&sem->waking);
-}
-
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->waking > 0) {
-		sem->waking--;
-		ret = 1;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- *	1	got the lock
- *	0	go to sleep
- *	-EINTR	interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
-						struct task_struct *tsk)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->waking > 0) {
-		sem->waking--;
-		ret = 1;
-	} else if (signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- *	1	failed to lock
- *	0	got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 1;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->waking <= 0)
-		atomic_inc(&sem->count);
-	else {
-		sem->waking--;
-		ret = 0;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-#endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
index a16271c..d9b2034 100644
--- a/include/asm-parisc/semaphore.h
+++ b/include/asm-parisc/semaphore.h
@@ -1,145 +1 @@
-/*    SMP- and interrupt-safe semaphores.
- *    PA-RISC version by Matthew Wilcox
- *
- *    Linux/PA-RISC Project (http://www.parisc-linux.org/)
- *    Copyright (C) 1996 Linus Torvalds
- *    Copyright (C) 1999-2001 Matthew Wilcox < willy at debian d0T org >
- *    Copyright (C) 2000 Grant Grundler < grundler a debian org >
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; either version 2 of the License, or
- *    (at your option) any later version.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *    GNU General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#ifndef _ASM_PARISC_SEMAPHORE_H
-#define _ASM_PARISC_SEMAPHORE_H
-
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-
-/*
- * The `count' is initialised to the number of people who are allowed to
- * take the lock.  (Normally we want a mutex, so this is `1').  if
- * `count' is positive, the lock can be taken.  if it's 0, no-one is
- * waiting on it.  if it's -1, at least one task is waiting.
- */
-struct semaphore {
-	spinlock_t	sentry;
-	int		count;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.sentry		= SPIN_LOCK_UNLOCKED,				\
-	.count		= n,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-static inline int sem_getcount(struct semaphore *sem)
-{
-	return sem->count;
-}
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int  __down_interruptible(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/* Semaphores can be `tried' from irq context.  So we have to disable
- * interrupts while we're messing with the semaphore.  Sorry.
- */
-
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-	spin_lock_irq(&sem->sentry);
-	if (sem->count > 0) {
-		sem->count--;
-	} else {
-		__down(sem);
-	}
-	spin_unlock_irq(&sem->sentry);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-	might_sleep();
-	spin_lock_irq(&sem->sentry);
-	if (sem->count > 0) {
-		sem->count--;
-	} else {
-		ret = __down_interruptible(sem);
-	}
-	spin_unlock_irq(&sem->sentry);
-	return ret;
-}
-
-/*
- * down_trylock returns 0 on success, 1 if we failed to get the lock.
- * May not sleep, but must preserve irq state
- */
-static inline int down_trylock(struct semaphore * sem)
-{
-	unsigned long flags;
-	int count;
-
-	spin_lock_irqsave(&sem->sentry, flags);
-	count = sem->count - 1;
-	if (count >= 0)
-		sem->count = count;
-	spin_unlock_irqrestore(&sem->sentry, flags);
-	return (count < 0);
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&sem->sentry, flags);
-	if (sem->count < 0) {
-		__up(sem);
-	} else {
-		sem->count++;
-	}
-	spin_unlock_irqrestore(&sem->sentry, flags);
-}
-
-#endif /* _ASM_PARISC_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h
index 6d50310..3d90bf7 100644
--- a/include/asm-powerpc/ide.h
+++ b/include/asm-powerpc/ide.h
@@ -31,39 +31,48 @@
 #include <linux/hdreg.h>
 #include <linux/ioport.h>
 
-struct ide_machdep_calls {
-        int         (*default_irq)(unsigned long base);
-        unsigned long (*default_io_base)(int index);
-        void        (*ide_init_hwif)(hw_regs_t *hw,
-                                     unsigned long data_port,
-                                     unsigned long ctrl_port,
-                                     int *irq);
-};
-
-extern struct ide_machdep_calls ppc_ide_md;
-
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
+/* FIXME: use ide_platform host driver */
 static __inline__ int ide_default_irq(unsigned long base)
 {
-	if (ppc_ide_md.default_irq)
-		return ppc_ide_md.default_irq(base);
+#ifdef CONFIG_PPLUS
+	switch (base) {
+	case 0x1f0:	return 14;
+	case 0x170:	return 15;
+	}
+#endif
+#ifdef CONFIG_PPC_PREP
+	switch (base) {
+	case 0x1f0:	return 13;
+	case 0x170:	return 13;
+	case 0x1e8:	return 11;
+	case 0x168:	return 10;
+	case 0xfff0:	return 14;	/* MCP(N)750 ide0 */
+	case 0xffe0:	return 15;	/* MCP(N)750 ide1 */
+	}
+#endif
 	return 0;
 }
 
+/* FIXME: use ide_platform host driver */
 static __inline__ unsigned long ide_default_io_base(int index)
 {
-	if (ppc_ide_md.default_io_base)
-		return ppc_ide_md.default_io_base(index);
+#ifdef CONFIG_PPLUS
+	switch (index) {
+	case 0:		return 0x1f0;
+	case 1:		return 0x170;
+	}
+#endif
+#ifdef CONFIG_PPC_PREP
+	switch (index) {
+	case 0:		return 0x1f0;
+	case 1:		return 0x170;
+	case 2:		return 0x1e8;
+	case 3:		return 0x168;
+	}
+#endif
 	return 0;
 }
 
-#ifdef CONFIG_PCI
-#define ide_init_default_irq(base)	(0)
-#else
-#define ide_init_default_irq(base)	ide_default_irq(base)
-#endif
-
 #ifdef CONFIG_BLK_DEV_MPC8xx_IDE
 #define IDE_ARCH_ACK_INTR  1
 #define ide_ack_intr(hwif) ((hwif)->ack_intr ? (hwif)->ack_intr(hwif) : 1)
@@ -71,8 +80,6 @@
 
 #endif /* __powerpc64__ */
 
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_POWERPC_IDE_H */
diff --git a/include/asm-powerpc/mediabay.h b/include/asm-powerpc/mediabay.h
index de83fe1..df111c3 100644
--- a/include/asm-powerpc/mediabay.h
+++ b/include/asm-powerpc/mediabay.h
@@ -22,10 +22,14 @@
 /* Number of bays in the machine or 0 */
 extern int media_bay_count;
 
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
+#include <linux/ide.h>
+
 int check_media_bay_by_base(unsigned long base, int what);
 /* called by IDE PMAC host driver to register IDE controller for media bay */
 int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base,
-			    int irq, int index);
+			    int irq, ide_hwif_t *hwif);
+#endif
 
 #endif /* __KERNEL__ */
 #endif /* _PPC_MEDIABAY_H */
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h
index 48dd32e..d9b2034 100644
--- a/include/asm-powerpc/semaphore.h
+++ b/include/asm-powerpc/semaphore.h
@@ -1,94 +1 @@
-#ifndef _ASM_POWERPC_SEMAPHORE_H
-#define _ASM_POWERPC_SEMAPHORE_H
-
-/*
- * Remove spinlock-based RW semaphores; RW semaphore definitions are
- * now in rwsem.h and we use the generic lib/rwsem.c implementation.
- * Rework semaphores to use atomic_dec_if_positive.
- * -- Paul Mackerras (paulus@samba.org)
- */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	/*
-	 * Note that any negative value of count is equivalent to 0,
-	 * but additionally indicates that some process(es) might be
-	 * sleeping on `wait'.
-	 */
-	atomic_t count;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	atomic_set(&sem->count, val);
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int  __down_interruptible(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-
-	/*
-	 * Try to get the semaphore, take the slow path if we fail.
-	 */
-	if (unlikely(atomic_dec_return(&sem->count) < 0))
-		__down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-
-	might_sleep();
-
-	if (unlikely(atomic_dec_return(&sem->count) < 0))
-		ret = __down_interruptible(sem);
-	return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
-	return atomic_dec_if_positive(&sem->count) < 0;
-}
-
-static inline void up(struct semaphore * sem)
-{
-	if (unlikely(atomic_inc_return(&sem->count) <= 0))
-		__up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_POWERPC_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 123b557..0818ecd 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -397,6 +397,10 @@
 
 extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
 
+/* Function from drivers/s390/cio/chsc.c */
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
+int chsc_sstpi(void *page, void *result, size_t size);
+
 #endif
 
 #endif
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h
index 352dde1..e5a6a9b 100644
--- a/include/asm-s390/cpu.h
+++ b/include/asm-s390/cpu.h
@@ -22,4 +22,12 @@
 
 DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
 
+void s390_idle_leave(void);
+
+static inline void s390_idle_check(void)
+{
+	if ((&__get_cpu_var(s390_idle))->in_idle)
+		s390_idle_leave();
+}
+
 #endif /* _ASM_S390_CPU_H_ */
diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h
index c00dd2b..335baf4 100644
--- a/include/asm-s390/debug.h
+++ b/include/asm-s390/debug.h
@@ -73,6 +73,7 @@
 	struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
 	struct debug_view* views[DEBUG_MAX_VIEWS];	
 	char name[DEBUG_MAX_NAME_LEN];
+	mode_t mode;
 } debug_info_t;
 
 typedef int (debug_header_proc_t) (debug_info_t* id,
@@ -122,6 +123,10 @@
 debug_info_t* debug_register(char* name, int pages, int nr_areas,
                              int buf_size);
 
+debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
+				  int buf_size, mode_t mode, uid_t uid,
+				  gid_t gid);
+
 void debug_unregister(debug_info_t* id);
 
 void debug_set_level(debug_info_t* id, int new_level);
diff --git a/include/asm-s390/extmem.h b/include/asm-s390/extmem.h
index c8802c9..33837d7 100644
--- a/include/asm-s390/extmem.h
+++ b/include/asm-s390/extmem.h
@@ -22,11 +22,12 @@
 #define SEGMENT_SHARED 0
 #define SEGMENT_EXCLUSIVE 1
 
-extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length);
-extern void segment_unload(char *name);
-extern void segment_save(char *name);
-extern int segment_type (char* name);
-extern int segment_modify_shared (char *name, int do_nonshared);
+int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length);
+void segment_unload(char *name);
+void segment_save(char *name);
+int segment_type (char* name);
+int segment_modify_shared (char *name, int do_nonshared);
+void segment_warning(int rc, char *seg_name);
 
 #endif
 #endif
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
index 31beb18..4b7cb96 100644
--- a/include/asm-s390/hardirq.h
+++ b/include/asm-s390/hardirq.h
@@ -32,6 +32,6 @@
 
 #define HARDIRQ_BITS	8
 
-extern void account_ticks(u64 time);
+void clock_comparator_work(void);
 
 #endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 801a6fd..5de3efb 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -56,6 +56,8 @@
 #define __LC_IO_INT_WORD                0x0C0
 #define __LC_MCCK_CODE                  0x0E8
 
+#define __LC_LAST_BREAK 		0x110
+
 #define __LC_RETURN_PSW                 0x200
 
 #define __LC_SAVE_AREA                  0xC00
@@ -80,7 +82,6 @@
 #define __LC_CPUID                      0xC60
 #define __LC_CPUADDR                    0xC68
 #define __LC_IPLDEV                     0xC7C
-#define __LC_JIFFY_TIMER		0xC80
 #define __LC_CURRENT			0xC90
 #define __LC_INT_CLOCK			0xC98
 #else /* __s390x__ */
@@ -103,7 +104,6 @@
 #define __LC_CPUID			0xD80
 #define __LC_CPUADDR			0xD88
 #define __LC_IPLDEV                     0xDB8
-#define __LC_JIFFY_TIMER		0xDC0
 #define __LC_CURRENT			0xDD8
 #define __LC_INT_CLOCK			0xDE8
 #endif /* __s390x__ */
@@ -276,7 +276,7 @@
 	/* entry.S sensitive area end */
 
         /* SMP info area: defined by DJB */
-        __u64        jiffy_timer;              /* 0xc80 */
+	__u64	     clock_comparator;	       /* 0xc80 */
 	__u32        ext_call_fast;            /* 0xc88 */
 	__u32        percpu_offset;            /* 0xc8c */
 	__u32        current_task;	       /* 0xc90 */
@@ -368,11 +368,12 @@
 	/* entry.S sensitive area end */
 
         /* SMP info area: defined by DJB */
-        __u64        jiffy_timer;              /* 0xdc0 */
+	__u64	     clock_comparator;	       /* 0xdc0 */
 	__u64        ext_call_fast;            /* 0xdc8 */
 	__u64        percpu_offset;            /* 0xdd0 */
 	__u64        current_task;	       /* 0xdd8 */
-	__u64        softirq_pending;	       /* 0xde0 */
+	__u32	     softirq_pending;	       /* 0xde0 */
+	__u32	     pad_0x0de4;	       /* 0xde4 */
 	__u64        int_clock;                /* 0xde8 */
         __u8         pad12[0xe00-0xdf0];       /* 0xdf0 */
 
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 51d8891..8eaf343 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -175,6 +175,13 @@
 extern void show_registers(struct pt_regs *regs);
 extern void show_code(struct pt_regs *regs);
 extern void show_trace(struct task_struct *task, unsigned long *sp);
+#ifdef CONFIG_64BIT
+extern void show_last_breaking_event(struct pt_regs *regs);
+#else
+static inline void show_last_breaking_event(struct pt_regs *regs)
+{
+}
+#endif
 
 unsigned long get_wchan(struct task_struct *p);
 #define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 0e7001a..d9b2034 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -1,107 +1 @@
-/*
- *  include/asm-s390/semaphore.h
- *
- *  S390 version
- *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *
- *  Derived from "include/asm-i386/semaphore.h"
- *    (C) Copyright 1996 Linus Torvalds
- */
-
-#ifndef _S390_SEMAPHORE_H
-#define _S390_SEMAPHORE_H
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	/*
-	 * Note that any negative value of count is equivalent to 0,
-	 * but additionally indicates that some process(es) might be
-	 * sleeping on `wait'.
-	 */
-	atomic_t count;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
-	{ ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	atomic_set(&sem->count, val);
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int  __down_interruptible(struct semaphore * sem);
-asmlinkage int  __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-	if (atomic_dec_return(&sem->count) < 0)
-		__down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-
-	might_sleep();
-	if (atomic_dec_return(&sem->count) < 0)
-		ret = __down_interruptible(sem);
-	return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
-	int old_val, new_val;
-
-	/*
-	 * This inline assembly atomically implements the equivalent
-	 * to the following C code:
-	 *   old_val = sem->count.counter;
-	 *   if ((new_val = old_val) > 0)
-	 *       sem->count.counter = --new_val;
-	 * In the ppc code this is called atomic_dec_if_positive.
-	 */
-	asm volatile(
-		"	l	%0,0(%3)\n"
-		"0:	ltr	%1,%0\n"
-		"	jle	1f\n"
-		"	ahi	%1,-1\n"
-		"	cs	%0,%1,0(%3)\n"
-		"	jl	0b\n"
-		"1:"
-		: "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
-		: "a" (&sem->count.counter), "m" (sem->count.counter)
-		: "cc", "memory");
-	return old_val <= 0;
-}
-
-static inline void up(struct semaphore * sem)
-{
-	if (atomic_inc_return(&sem->count) <= 0)
-		__up(sem);
-}
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index c7b7432..6f3821a 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -90,6 +90,9 @@
 extern void cpu_die (void) __attribute__ ((noreturn));
 extern int __cpu_up (unsigned int cpu);
 
+extern struct mutex smp_cpu_state_mutex;
+extern int smp_cpu_polarization[];
+
 extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
 	void *info, int wait);
 #endif
diff --git a/include/asm-s390/sysinfo.h b/include/asm-s390/sysinfo.h
new file mode 100644
index 0000000..abe10ae
--- /dev/null
+++ b/include/asm-s390/sysinfo.h
@@ -0,0 +1,116 @@
+/*
+ * definition for store system information stsi
+ *
+ * Copyright IBM Corp. 2001,2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Ulrich Weigand <weigand@de.ibm.com>
+ *		 Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+struct sysinfo_1_1_1 {
+	char reserved_0[32];
+	char manufacturer[16];
+	char type[4];
+	char reserved_1[12];
+	char model_capacity[16];
+	char sequence[16];
+	char plant[4];
+	char model[16];
+	char model_perm_cap[16];
+	char model_temp_cap[16];
+	char model_cap_rating[4];
+	char model_perm_cap_rating[4];
+	char model_temp_cap_rating[4];
+};
+
+struct sysinfo_1_2_1 {
+	char reserved_0[80];
+	char sequence[16];
+	char plant[4];
+	char reserved_1[2];
+	unsigned short cpu_address;
+};
+
+struct sysinfo_1_2_2 {
+	char format;
+	char reserved_0[1];
+	unsigned short acc_offset;
+	char reserved_1[24];
+	unsigned int secondary_capability;
+	unsigned int capability;
+	unsigned short cpus_total;
+	unsigned short cpus_configured;
+	unsigned short cpus_standby;
+	unsigned short cpus_reserved;
+	unsigned short adjustment[0];
+};
+
+struct sysinfo_1_2_2_extension {
+	unsigned int alt_capability;
+	unsigned short alt_adjustment[0];
+};
+
+struct sysinfo_2_2_1 {
+	char reserved_0[80];
+	char sequence[16];
+	char plant[4];
+	unsigned short cpu_id;
+	unsigned short cpu_address;
+};
+
+struct sysinfo_2_2_2 {
+	char reserved_0[32];
+	unsigned short lpar_number;
+	char reserved_1;
+	unsigned char characteristics;
+	unsigned short cpus_total;
+	unsigned short cpus_configured;
+	unsigned short cpus_standby;
+	unsigned short cpus_reserved;
+	char name[8];
+	unsigned int caf;
+	char reserved_2[16];
+	unsigned short cpus_dedicated;
+	unsigned short cpus_shared;
+};
+
+#define LPAR_CHAR_DEDICATED	(1 << 7)
+#define LPAR_CHAR_SHARED	(1 << 6)
+#define LPAR_CHAR_LIMITED	(1 << 5)
+
+struct sysinfo_3_2_2 {
+	char reserved_0[31];
+	unsigned char count;
+	struct {
+		char reserved_0[4];
+		unsigned short cpus_total;
+		unsigned short cpus_configured;
+		unsigned short cpus_standby;
+		unsigned short cpus_reserved;
+		char name[8];
+		unsigned int caf;
+		char cpi[16];
+		char reserved_1[24];
+
+	} vm[8];
+};
+
+static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
+{
+	register int r0 asm("0") = (fc << 28) | sel1;
+	register int r1 asm("1") = sel2;
+
+	asm volatile(
+		"   stsi 0(%2)\n"
+		"0: jz   2f\n"
+		"1: lhi  %0,%3\n"
+		"2:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
+		: "cc", "memory");
+	return r0;
+}
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 15aba30..92098df 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -406,6 +406,8 @@
 #define local_mcck_enable()  __set_psw_mask(psw_kernel_bits)
 #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
 
+int stfle(unsigned long long *list, int doublewords);
+
 #ifdef CONFIG_SMP
 
 extern void smp_ctl_set_bit(int cr, int bit);
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 98229db..d744c3d 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -62,16 +62,18 @@
 	return clk;
 }
 
-static inline void get_clock_extended(void *dest)
+static inline unsigned long long get_clock_xt(void)
 {
-	typedef struct { unsigned long long clk[2]; } __clock_t;
+	unsigned char clk[16];
 
 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
-	asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc");
+	asm volatile("stcke %0" : "=Q" (clk) : : "cc");
 #else /* __GNUC__ */
-	asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest))
-				   : "a" ((__clock_t *)dest) : "cc");
+	asm volatile("stcke 0(%1)" : "=m" (clk)
+				   : "a" (clk) : "cc");
 #endif /* __GNUC__ */
+
+	return *((unsigned long long *)&clk[1]);
 }
 
 static inline cycles_t get_cycles(void)
@@ -81,5 +83,6 @@
 
 int get_sync_clock(unsigned long long *clock);
 void init_cpu_timer(void);
+unsigned long long monotonic_clock(void);
 
 #endif
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 35fb4f9..9e57a93 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -13,12 +13,14 @@
 	asm volatile("ptlb" : : : "memory");
 }
 
+#ifdef CONFIG_SMP
 /*
  * Flush all tlb entries on all cpus.
  */
+void smp_ptlb_all(void);
+
 static inline void __tlb_flush_global(void)
 {
-	extern void smp_ptlb_all(void);
 	register unsigned long reg2 asm("2");
 	register unsigned long reg3 asm("3");
 	register unsigned long reg4 asm("4");
@@ -39,6 +41,25 @@
 		: : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
 }
 
+static inline void __tlb_flush_full(struct mm_struct *mm)
+{
+	cpumask_t local_cpumask;
+
+	preempt_disable();
+	/*
+	 * If the process only ran on the local cpu, do a local flush.
+	 */
+	local_cpumask = cpumask_of_cpu(smp_processor_id());
+	if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
+		__tlb_flush_local();
+	else
+		__tlb_flush_global();
+	preempt_enable();
+}
+#else
+#define __tlb_flush_full(mm)	__tlb_flush_local()
+#endif
+
 /*
  * Flush all tlb entries of a page table on all cpus.
  */
@@ -51,8 +72,6 @@
 
 static inline void __tlb_flush_mm(struct mm_struct * mm)
 {
-	cpumask_t local_cpumask;
-
 	if (unlikely(cpus_empty(mm->cpu_vm_mask)))
 		return;
 	/*
@@ -69,16 +88,7 @@
 				 mm->context.asce_bits);
 		return;
 	}
-	preempt_disable();
-	/*
-	 * If the process only ran on the local cpu, do a local flush.
-	 */
-	local_cpumask = cpumask_of_cpu(smp_processor_id());
-	if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
-		__tlb_flush_local();
-	else
-		__tlb_flush_global();
-	preempt_enable();
+	__tlb_flush_full(mm);
 }
 
 static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
diff --git a/include/asm-s390/topology.h b/include/asm-s390/topology.h
index 613aa64..8e97b06 100644
--- a/include/asm-s390/topology.h
+++ b/include/asm-s390/topology.h
@@ -1,6 +1,29 @@
 #ifndef _ASM_S390_TOPOLOGY_H
 #define _ASM_S390_TOPOLOGY_H
 
+#include <linux/cpumask.h>
+
+#define mc_capable()	(1)
+
+cpumask_t cpu_coregroup_map(unsigned int cpu);
+
+int topology_set_cpu_management(int fc);
+void topology_schedule_update(void);
+
+#define POLARIZATION_UNKNWN	(-1)
+#define POLARIZATION_HRZ	(0)
+#define POLARIZATION_VL		(1)
+#define POLARIZATION_VM		(2)
+#define POLARIZATION_VH		(3)
+
+#ifdef CONFIG_SMP
+void s390_init_cpu_topology(void);
+#else
+static inline void s390_init_cpu_topology(void)
+{
+};
+#endif
+
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/include/asm-sh/ide.h b/include/asm-sh/ide.h
index 9f8e914..58e0bdd 100644
--- a/include/asm-sh/ide.h
+++ b/include/asm-sh/ide.h
@@ -14,9 +14,6 @@
 
 #ifdef __KERNEL__
 
-
-#define ide_default_io_ctl(base)	(0)
-
 #include <asm-generic/ide_iops.h>
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-sh/semaphore-helper.h b/include/asm-sh/semaphore-helper.h
deleted file mode 100644
index bd8230c..0000000
--- a/include/asm-sh/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef __ASM_SH_SEMAPHORE_HELPER_H
-#define __ASM_SH_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- */
-
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * which we have.  Let the rest of the losers suck eggs.
- */
-static __inline__ void wake_one_more(struct semaphore * sem)
-{
-	atomic_inc((atomic_t *)&sem->sleepers);
-}
-
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->sleepers > 0) {
-		sem->sleepers--;
-		ret = 1;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- *	1	got the lock
- *	0	go to sleep
- *	-EINTR	interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
-						struct task_struct *tsk)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->sleepers > 0) {
-		sem->sleepers--;
-		ret = 1;
-	} else if (signal_pending(tsk)) {
-		atomic_inc(&sem->count);
-		ret = -EINTR;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- *	1	failed to lock
- *	0	got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
-	unsigned long flags;
-	int ret = 1;
-
-	spin_lock_irqsave(&semaphore_wake_lock, flags);
-	if (sem->sleepers <= 0)
-		atomic_inc(&sem->count);
-	else {
-		sem->sleepers--;
-		ret = 0;
-	}
-	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-	return ret;
-}
-
-#endif /* __ASM_SH_SEMAPHORE_HELPER_H */
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h
index 9e5a37c..d9b2034 100644
--- a/include/asm-sh/semaphore.h
+++ b/include/asm-sh/semaphore.h
@@ -1,115 +1 @@
-#ifndef __ASM_SH_SEMAPHORE_H
-#define __ASM_SH_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-/*
- * SMP- and interrupt-safe semaphores.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * SuperH verison by Niibe Yutaka
- *  (Currently no asm implementation but generic C code...)
- */
-
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/wait.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.sleepers	= 0,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- *	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
- */
-	atomic_set(&sem->count, val);
-	sem->sleepers = 0;
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-#if 0
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
-asmlinkage int  __down_failed_trylock(void  /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-#endif
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int  __down_interruptible(struct semaphore * sem);
-asmlinkage int  __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-	if (atomic_dec_return(&sem->count) < 0)
-		__down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-
-	might_sleep();
-	if (atomic_dec_return(&sem->count) < 0)
-		ret = __down_interruptible(sem);
-	return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
-	int ret = 0;
-
-	if (atomic_dec_return(&sem->count) < 0)
-		ret = __down_trylock(sem);
-	return ret;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
-	if (atomic_inc_return(&sem->count) <= 0)
-		__up(sem);
-}
-
-#endif
-#endif /* __ASM_SH_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-sparc/ide.h b/include/asm-sparc/ide.h
index 4076cb5..afd1736 100644
--- a/include/asm-sparc/ide.h
+++ b/include/asm-sparc/ide.h
@@ -17,8 +17,6 @@
 #undef  MAX_HWIFS
 #define MAX_HWIFS	2
 
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
 #define __ide_insl(data_reg, buffer, wcount) \
 	__ide_insw(data_reg, buffer, (wcount)<<1)
 #define __ide_outsl(data_reg, buffer, wcount) \
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
index 8018f9f..d9b2034 100644
--- a/include/asm-sparc/semaphore.h
+++ b/include/asm-sparc/semaphore.h
@@ -1,192 +1 @@
-#ifndef _SPARC_SEMAPHORE_H
-#define _SPARC_SEMAPHORE_H
-
-/* Dinky, good for nothing, just barely irq safe, Sparc semaphores. */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	atomic24_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC24_INIT(n),				\
-	.sleepers	= 0,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	atomic24_set(&sem->count, val);
-	sem->sleepers = 0;
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
-	register volatile int *ptr asm("g1");
-	register int increment asm("g2");
-
-	might_sleep();
-
-	ptr = &(sem->count.counter);
-	increment = 1;
-
-	__asm__ __volatile__(
-	"mov	%%o7, %%g4\n\t"
-	"call	___atomic24_sub\n\t"
-	" add	%%o7, 8, %%o7\n\t"
-	"tst	%%g2\n\t"
-	"bl	2f\n\t"
-	" nop\n"
-	"1:\n\t"
-	".subsection 2\n"
-	"2:\n\t"
-	"save	%%sp, -64, %%sp\n\t"
-	"mov	%%g1, %%l1\n\t"
-	"mov	%%g5, %%l5\n\t"
-	"call	%3\n\t"
-	" mov	%%g1, %%o0\n\t"
-	"mov	%%l1, %%g1\n\t"
-	"ba	1b\n\t"
-	" restore %%l5, %%g0, %%g5\n\t"
-	".previous\n"
-	: "=&r" (increment)
-	: "0" (increment), "r" (ptr), "i" (__down)
-	: "g3", "g4", "g7", "memory", "cc");
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	register volatile int *ptr asm("g1");
-	register int increment asm("g2");
-
-	might_sleep();
-
-	ptr = &(sem->count.counter);
-	increment = 1;
-
-	__asm__ __volatile__(
-	"mov	%%o7, %%g4\n\t"
-	"call	___atomic24_sub\n\t"
-	" add	%%o7, 8, %%o7\n\t"
-	"tst	%%g2\n\t"
-	"bl	2f\n\t"
-	" clr	%%g2\n"
-	"1:\n\t"
-	".subsection 2\n"
-	"2:\n\t"
-	"save	%%sp, -64, %%sp\n\t"
-	"mov	%%g1, %%l1\n\t"
-	"mov	%%g5, %%l5\n\t"
-	"call	%3\n\t"
-	" mov	%%g1, %%o0\n\t"
-	"mov	%%l1, %%g1\n\t"
-	"mov	%%l5, %%g5\n\t"
-	"ba	1b\n\t"
-	" restore %%o0, %%g0, %%g2\n\t"
-	".previous\n"
-	: "=&r" (increment)
-	: "0" (increment), "r" (ptr), "i" (__down_interruptible)
-	: "g3", "g4", "g7", "memory", "cc");
-
-	return increment;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
-	register volatile int *ptr asm("g1");
-	register int increment asm("g2");
-
-	ptr = &(sem->count.counter);
-	increment = 1;
-
-	__asm__ __volatile__(
-	"mov	%%o7, %%g4\n\t"
-	"call	___atomic24_sub\n\t"
-	" add	%%o7, 8, %%o7\n\t"
-	"tst	%%g2\n\t"
-	"bl	2f\n\t"
-	" clr	%%g2\n"
-	"1:\n\t"
-	".subsection 2\n"
-	"2:\n\t"
-	"save	%%sp, -64, %%sp\n\t"
-	"mov	%%g1, %%l1\n\t"
-	"mov	%%g5, %%l5\n\t"
-	"call	%3\n\t"
-	" mov	%%g1, %%o0\n\t"
-	"mov	%%l1, %%g1\n\t"
-	"mov	%%l5, %%g5\n\t"
-	"ba	1b\n\t"
-	" restore %%o0, %%g0, %%g2\n\t"
-	".previous\n"
-	: "=&r" (increment)
-	: "0" (increment), "r" (ptr), "i" (__down_trylock)
-	: "g3", "g4", "g7", "memory", "cc");
-
-	return increment;
-}
-
-static inline void up(struct semaphore * sem)
-{
-	register volatile int *ptr asm("g1");
-	register int increment asm("g2");
-
-	ptr = &(sem->count.counter);
-	increment = 1;
-
-	__asm__ __volatile__(
-	"mov	%%o7, %%g4\n\t"
-	"call	___atomic24_add\n\t"
-	" add	%%o7, 8, %%o7\n\t"
-	"tst	%%g2\n\t"
-	"ble	2f\n\t"
-	" nop\n"
-	"1:\n\t"
-	".subsection 2\n"
-	"2:\n\t"
-	"save	%%sp, -64, %%sp\n\t"
-	"mov	%%g1, %%l1\n\t"
-	"mov	%%g5, %%l5\n\t"
-	"call	%3\n\t"
-	" mov	%%g1, %%o0\n\t"
-	"mov	%%l1, %%g1\n\t"
-	"ba	1b\n\t"
-	" restore %%l5, %%g0, %%g5\n\t"
-	".previous\n"
-	: "=&r" (increment)
-	: "0" (increment), "r" (ptr), "i" (__up)
-	: "g3", "g4", "g7", "memory", "cc");
-}	
-
-#endif /* __KERNEL__ */
-
-#endif /* !(_SPARC_SEMAPHORE_H) */
+#include <linux/semaphore.h>
diff --git a/include/asm-sparc64/ide.h b/include/asm-sparc64/ide.h
index ac7eb21..c5fdabe 100644
--- a/include/asm-sparc64/ide.h
+++ b/include/asm-sparc64/ide.h
@@ -24,8 +24,6 @@
 # endif
 #endif
 
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
 #define __ide_insl(data_reg, buffer, wcount) \
 	__ide_insw(data_reg, buffer, (wcount)<<1)
 #define __ide_outsl(data_reg, buffer, wcount) \
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h
index 7f7c0c4..d9b2034 100644
--- a/include/asm-sparc64/semaphore.h
+++ b/include/asm-sparc64/semaphore.h
@@ -1,53 +1 @@
-#ifndef _SPARC64_SEMAPHORE_H
-#define _SPARC64_SEMAPHORE_H
-
-/* These are actually reasonable on the V9.
- *
- * See asm-ppc/semaphore.h for implementation commentary,
- * only sparc64 specific issues are commented here.
- */
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	atomic_t count;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, count) \
-	{ ATOMIC_INIT(count), \
-	  __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	atomic_set(&sem->count, val);
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern void up(struct semaphore *sem);
-extern void down(struct semaphore *sem);
-extern int down_trylock(struct semaphore *sem);
-extern int down_interruptible(struct semaphore *sem);
-
-#endif /* __KERNEL__ */
-
-#endif /* !(_SPARC64_SEMAPHORE_H) */
+#include <linux/semaphore.h>
diff --git a/include/asm-um/semaphore.h b/include/asm-um/semaphore.h
index ff13c34..d9b2034 100644
--- a/include/asm-um/semaphore.h
+++ b/include/asm-um/semaphore.h
@@ -1,6 +1 @@
-#ifndef __UM_SEMAPHORE_H
-#define __UM_SEMAPHORE_H
-
-#include "asm/arch/semaphore.h"
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h
index 10ed0cc..d9b2034 100644
--- a/include/asm-v850/semaphore.h
+++ b/include/asm-v850/semaphore.h
@@ -1,84 +1 @@
-#ifndef __V850_SEMAPHORE_H__
-#define __V850_SEMAPHORE_H__
-
-#include <linux/linkage.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/atomic.h>
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name,count)				      \
-	{ ATOMIC_INIT (count), 0,					      \
-	  __WAIT_QUEUE_HEAD_INITIALIZER ((name).wait) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count)	\
-	struct semaphore name = __SEMAPHORE_INITIALIZER (name,count)
-
-#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC (name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init (sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init (sem, 0);
-}
-
-/*
- * special register calling convention
- */
-asmlinkage void __down_failed (void);
-asmlinkage int  __down_interruptible_failed (void);
-asmlinkage int  __down_trylock_failed (void);
-asmlinkage void __up_wakeup (void);
-
-extern void __down (struct semaphore * sem);
-extern int  __down_interruptible (struct semaphore * sem);
-extern int  __down_trylock (struct semaphore * sem);
-extern void __up (struct semaphore * sem);
-
-static inline void down (struct semaphore * sem)
-{
-	might_sleep();
-	if (atomic_dec_return (&sem->count) < 0)
-		__down (sem);
-}
-
-static inline int down_interruptible (struct semaphore * sem)
-{
-	int ret = 0;
-	might_sleep();
-	if (atomic_dec_return (&sem->count) < 0)
-		ret = __down_interruptible (sem);
-	return ret;
-}
-
-static inline int down_trylock (struct semaphore *sem)
-{
-	int ret = 0;
-	if (atomic_dec_return (&sem->count) < 0)
-		ret = __down_trylock (sem);
-	return ret;
-}
-
-static inline void up (struct semaphore * sem)
-{
-	if (atomic_inc_return (&sem->count) <= 0)
-		__up (sem);
-}
-
-#endif /* __V850_SEMAPHORE_H__ */
+#include <linux/semaphore.h>
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index 3b8160a..1e35545 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -10,6 +10,7 @@
 header-y += ptrace-abi.h
 header-y += sigcontext32.h
 header-y += ucontext.h
+header-y += processor-flags.h
 
 unifdef-y += e820.h
 unifdef-y += ist.h
diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h
index d2b6e11..714207a 100644
--- a/include/asm-x86/a.out-core.h
+++ b/include/asm-x86/a.out-core.h
@@ -29,8 +29,9 @@
 	dump->magic = CMAGIC;
 	dump->start_code = 0;
 	dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
-	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
-	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+	dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT;
+	dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1)))
+			>> PAGE_SHIFT;
 	dump->u_dsize -= dump->u_tsize;
 	dump->u_ssize = 0;
 	dump->u_debugreg[0] = current->thread.debugreg0;
@@ -43,7 +44,8 @@
 	dump->u_debugreg[7] = current->thread.debugreg7;
 
 	if (dump->start_stack < TASK_SIZE)
-		dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+		dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack))
+				>> PAGE_SHIFT;
 
 	dump->regs.bx = regs->bx;
 	dump->regs.cx = regs->cx;
@@ -55,7 +57,7 @@
 	dump->regs.ds = (u16)regs->ds;
 	dump->regs.es = (u16)regs->es;
 	dump->regs.fs = (u16)regs->fs;
-	savesegment(gs,gs);
+	savesegment(gs, gs);
 	dump->regs.orig_ax = regs->orig_ax;
 	dump->regs.ip = regs->ip;
 	dump->regs.cs = (u16)regs->cs;
@@ -63,7 +65,7 @@
 	dump->regs.sp = regs->sp;
 	dump->regs.ss = (u16)regs->ss;
 
-	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+	dump->u_fpvalid = dump_fpu(regs, &dump->i387);
 }
 
 #endif /* CONFIG_X86_32 */
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index 7a72d6a..14411c9 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -67,16 +67,16 @@
  */
 #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
 	asm("divl %2;"				     \
-	    :"=a"(q32), "=d"(r32)		     \
-	    :"r"(d32),				     \
+	    : "=a"(q32), "=d"(r32)		     \
+	    : "r"(d32),				     \
 	     "0"(n_lo), "1"(n_hi))
 
 
 #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
 	asm("shrl   $1,%2	;"	\
 	    "rcrl   $1,%3;"		\
-	    :"=r"(n_hi), "=r"(n_lo)	\
-	    :"0"(n_hi), "1"(n_lo))
+	    : "=r"(n_hi), "=r"(n_lo)	\
+	    : "0"(n_hi), "1"(n_lo))
 
 #ifdef CONFIG_ACPI
 extern int acpi_lapic;
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h
index d8bacf3..1f6a9ca 100644
--- a/include/asm-x86/alternative.h
+++ b/include/asm-x86/alternative.h
@@ -66,8 +66,8 @@
 extern void alternatives_smp_switch(int smp);
 #else
 static inline void alternatives_smp_module_add(struct module *mod, char *name,
-					void *locks, void *locks_end,
-					void *text, void *text_end) {}
+					       void *locks, void *locks_end,
+					       void *text, void *text_end) {}
 static inline void alternatives_smp_module_del(struct module *mod) {}
 static inline void alternatives_smp_switch(int smp) {}
 #endif	/* CONFIG_SMP */
@@ -148,14 +148,34 @@
 void apply_paravirt(struct paravirt_patch_site *start,
 		    struct paravirt_patch_site *end);
 #else
-static inline void
-apply_paravirt(struct paravirt_patch_site *start,
-	       struct paravirt_patch_site *end)
+static inline void apply_paravirt(struct paravirt_patch_site *start,
+				  struct paravirt_patch_site *end)
 {}
 #define __parainstructions	NULL
 #define __parainstructions_end	NULL
 #endif
 
-extern void text_poke(void *addr, unsigned char *opcode, int len);
+extern void add_nops(void *insns, unsigned int len);
+
+/*
+ * Clear and restore the kernel write-protection flag on the local CPU.
+ * Allows the kernel to edit read-only pages.
+ * Side-effect: any interrupt handler running between save and restore will have
+ * the ability to write to read-only pages.
+ *
+ * Warning:
+ * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
+ * no thread can be preempted in the instructions being modified (no iret to an
+ * invalid instruction possible) or if the instructions are changed from a
+ * consistent state to another consistent state atomically.
+ * More care must be taken when modifying code in the SMP case because of
+ * Intel's errata.
+ * On the local CPU you need to be protected again NMI or MCE handlers seeing an
+ * inconsistent instruction while you patch.
+ * The _early version expects the memory to already be RW.
+ */
+
+extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern void *text_poke_early(void *addr, const void *opcode, size_t len);
 
 #endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index bcfc07f..be9639a 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -44,7 +44,6 @@
 extern int ioapic_force;
 extern int disable_apic;
 extern int disable_apic_timer;
-extern unsigned boot_cpu_id;
 
 /*
  * Basic functions accessing APICs.
@@ -59,6 +58,8 @@
 #define setup_secondary_clock setup_secondary_APIC_clock
 #endif
 
+extern int is_vsmp_box(void);
+
 static inline void native_apic_write(unsigned long reg, u32 v)
 {
 	*((volatile u32 *)(APIC_BASE + reg)) = v;
@@ -66,7 +67,7 @@
 
 static inline void native_apic_write_atomic(unsigned long reg, u32 v)
 {
-	(void) xchg((u32*)(APIC_BASE + reg), v);
+	(void)xchg((u32 *)(APIC_BASE + reg), v);
 }
 
 static inline u32 native_apic_read(unsigned long reg)
@@ -123,7 +124,7 @@
  * On 32bit this is mach-xxx local
  */
 #ifdef CONFIG_X86_64
-extern void setup_apic_routing(void);
+extern void early_init_lapic_mapping(void);
 #endif
 
 extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask);
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h
index 550af7a..6b9008c 100644
--- a/include/asm-x86/apicdef.h
+++ b/include/asm-x86/apicdef.h
@@ -12,17 +12,15 @@
 
 #define	APIC_ID		0x20
 
-#ifdef CONFIG_X86_64
-# define	APIC_ID_MASK		(0xFFu<<24)
-# define	GET_APIC_ID(x)		(((x)>>24)&0xFFu)
-# define	SET_APIC_ID(x)		(((x)<<24))
-#endif
-
 #define	APIC_LVR	0x30
 #define		APIC_LVR_MASK		0xFF00FF
-#define		GET_APIC_VERSION(x)	((x)&0xFFu)
-#define		GET_APIC_MAXLVT(x)	(((x)>>16)&0xFFu)
-#define		APIC_INTEGRATED(x)	((x)&0xF0u)
+#define		GET_APIC_VERSION(x)	((x) & 0xFFu)
+#define		GET_APIC_MAXLVT(x)	(((x) >> 16) & 0xFFu)
+#ifdef CONFIG_X86_32
+#  define	APIC_INTEGRATED(x)	((x) & 0xF0u)
+#else
+#  define	APIC_INTEGRATED(x)	(1)
+#endif
 #define		APIC_XAPIC(x)		((x) >= 0x14)
 #define	APIC_TASKPRI	0x80
 #define		APIC_TPRI_MASK		0xFFu
@@ -33,16 +31,16 @@
 #define		APIC_EIO_ACK		0x0
 #define	APIC_RRR	0xC0
 #define	APIC_LDR	0xD0
-#define		APIC_LDR_MASK		(0xFFu<<24)
-#define		GET_APIC_LOGICAL_ID(x)	(((x)>>24)&0xFFu)
-#define		SET_APIC_LOGICAL_ID(x)	(((x)<<24))
+#define		APIC_LDR_MASK		(0xFFu << 24)
+#define		GET_APIC_LOGICAL_ID(x)	(((x) >> 24) & 0xFFu)
+#define		SET_APIC_LOGICAL_ID(x)	(((x) << 24))
 #define		APIC_ALL_CPUS		0xFFu
 #define	APIC_DFR	0xE0
 #define		APIC_DFR_CLUSTER		0x0FFFFFFFul
 #define		APIC_DFR_FLAT			0xFFFFFFFFul
 #define	APIC_SPIV	0xF0
-#define		APIC_SPIV_FOCUS_DISABLED	(1<<9)
-#define		APIC_SPIV_APIC_ENABLED		(1<<8)
+#define		APIC_SPIV_FOCUS_DISABLED	(1 << 9)
+#define		APIC_SPIV_APIC_ENABLED		(1 << 8)
 #define	APIC_ISR	0x100
 #define	APIC_ISR_NR     0x8     /* Number of 32 bit ISR registers. */
 #define	APIC_TMR	0x180
@@ -78,27 +76,27 @@
 #define		APIC_DM_EXTINT		0x00700
 #define		APIC_VECTOR_MASK	0x000FF
 #define	APIC_ICR2	0x310
-#define		GET_APIC_DEST_FIELD(x)	(((x)>>24)&0xFF)
-#define		SET_APIC_DEST_FIELD(x)	((x)<<24)
+#define		GET_APIC_DEST_FIELD(x)	(((x) >> 24) & 0xFF)
+#define		SET_APIC_DEST_FIELD(x)	((x) << 24)
 #define	APIC_LVTT	0x320
 #define	APIC_LVTTHMR	0x330
 #define	APIC_LVTPC	0x340
 #define	APIC_LVT0	0x350
-#define		APIC_LVT_TIMER_BASE_MASK	(0x3<<18)
-#define		GET_APIC_TIMER_BASE(x)		(((x)>>18)&0x3)
-#define		SET_APIC_TIMER_BASE(x)		(((x)<<18))
+#define		APIC_LVT_TIMER_BASE_MASK	(0x3 << 18)
+#define		GET_APIC_TIMER_BASE(x)		(((x) >> 18) & 0x3)
+#define		SET_APIC_TIMER_BASE(x)		(((x) << 18))
 #define		APIC_TIMER_BASE_CLKIN		0x0
 #define		APIC_TIMER_BASE_TMBASE		0x1
 #define		APIC_TIMER_BASE_DIV		0x2
-#define		APIC_LVT_TIMER_PERIODIC		(1<<17)
-#define		APIC_LVT_MASKED			(1<<16)
-#define		APIC_LVT_LEVEL_TRIGGER		(1<<15)
-#define		APIC_LVT_REMOTE_IRR		(1<<14)
-#define		APIC_INPUT_POLARITY		(1<<13)
-#define		APIC_SEND_PENDING		(1<<12)
+#define		APIC_LVT_TIMER_PERIODIC		(1 << 17)
+#define		APIC_LVT_MASKED			(1 << 16)
+#define		APIC_LVT_LEVEL_TRIGGER		(1 << 15)
+#define		APIC_LVT_REMOTE_IRR		(1 << 14)
+#define		APIC_INPUT_POLARITY		(1 << 13)
+#define		APIC_SEND_PENDING		(1 << 12)
 #define		APIC_MODE_MASK			0x700
-#define		GET_APIC_DELIVERY_MODE(x)	(((x)>>8)&0x7)
-#define		SET_APIC_DELIVERY_MODE(x, y)	(((x)&~0x700)|((y)<<8))
+#define		GET_APIC_DELIVERY_MODE(x)	(((x) >> 8) & 0x7)
+#define		SET_APIC_DELIVERY_MODE(x, y)	(((x) & ~0x700) | ((y) << 8))
 #define			APIC_MODE_FIXED		0x0
 #define			APIC_MODE_NMI		0x4
 #define			APIC_MODE_EXTINT	0x7
@@ -107,7 +105,7 @@
 #define	APIC_TMICT	0x380
 #define	APIC_TMCCT	0x390
 #define	APIC_TDCR	0x3E0
-#define		APIC_TDR_DIV_TMBASE	(1<<2)
+#define		APIC_TDR_DIV_TMBASE	(1 << 2)
 #define		APIC_TDR_DIV_1		0xB
 #define		APIC_TDR_DIV_2		0x0
 #define		APIC_TDR_DIV_4		0x1
@@ -117,14 +115,14 @@
 #define		APIC_TDR_DIV_64		0x9
 #define		APIC_TDR_DIV_128	0xA
 #define	APIC_EILVT0     0x500
-#define		APIC_EILVT_NR_AMD_K8	1	/* Number of extended interrupts */
+#define		APIC_EILVT_NR_AMD_K8	1	/* # of extended interrupts */
 #define		APIC_EILVT_NR_AMD_10H	4
-#define		APIC_EILVT_LVTOFF(x)	(((x)>>4)&0xF)
+#define		APIC_EILVT_LVTOFF(x)	(((x) >> 4) & 0xF)
 #define		APIC_EILVT_MSG_FIX	0x0
 #define		APIC_EILVT_MSG_SMI	0x2
 #define		APIC_EILVT_MSG_NMI	0x4
 #define		APIC_EILVT_MSG_EXT	0x7
-#define		APIC_EILVT_MASKED	(1<<16)
+#define		APIC_EILVT_MASKED	(1 << 16)
 #define	APIC_EILVT1     0x510
 #define	APIC_EILVT2     0x520
 #define	APIC_EILVT3     0x530
@@ -135,7 +133,7 @@
 # define MAX_IO_APICS 64
 #else
 # define MAX_IO_APICS 128
-# define MAX_LOCAL_APIC 256
+# define MAX_LOCAL_APIC 32768
 #endif
 
 /*
@@ -408,6 +406,9 @@
 
 #undef u32
 
-#define BAD_APICID 0xFFu
-
+#ifdef CONFIG_X86_32
+ #define BAD_APICID 0xFFu
+#else
+ #define BAD_APICID 0xFFFFu
+#endif
 #endif
diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h
index 437aac8..21a4825 100644
--- a/include/asm-x86/atomic_32.h
+++ b/include/asm-x86/atomic_32.h
@@ -15,138 +15,133 @@
  * on us. We need to use _exactly_ the address the user gave us,
  * not some alias that contains the same information.
  */
-typedef struct { int counter; } atomic_t;
+typedef struct {
+	int counter;
+} atomic_t;
 
 #define ATOMIC_INIT(i)	{ (i) }
 
 /**
  * atomic_read - read atomic variable
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically reads the value of @v.
- */ 
+ */
 #define atomic_read(v)		((v)->counter)
 
 /**
  * atomic_set - set atomic variable
  * @v: pointer of type atomic_t
  * @i: required value
- * 
+ *
  * Atomically sets the value of @v to @i.
- */ 
-#define atomic_set(v,i)		(((v)->counter) = (i))
+ */
+#define atomic_set(v, i)	(((v)->counter) = (i))
 
 /**
  * atomic_add - add integer to atomic variable
  * @i: integer value to add
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically adds @i to @v.
  */
-static __inline__ void atomic_add(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "addl %1,%0"
-		:"+m" (v->counter)
-		:"ir" (i));
+	asm volatile(LOCK_PREFIX "addl %1,%0"
+		     : "+m" (v->counter)
+		     : "ir" (i));
 }
 
 /**
  * atomic_sub - subtract integer from atomic variable
  * @i: integer value to subtract
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically subtracts @i from @v.
  */
-static __inline__ void atomic_sub(int i, atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "subl %1,%0"
-		:"+m" (v->counter)
-		:"ir" (i));
+	asm volatile(LOCK_PREFIX "subl %1,%0"
+		     : "+m" (v->counter)
+		     : "ir" (i));
 }
 
 /**
  * atomic_sub_and_test - subtract value from variable and test result
  * @i: integer value to subtract
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically subtracts @i from @v and returns
  * true if the result is zero, or false for all
  * other cases.
  */
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
+static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "subl %2,%0; sete %1"
-		:"+m" (v->counter), "=qm" (c)
-		:"ir" (i) : "memory");
+	asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
+		     : "+m" (v->counter), "=qm" (c)
+		     : "ir" (i) : "memory");
 	return c;
 }
 
 /**
  * atomic_inc - increment atomic variable
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically increments @v by 1.
- */ 
-static __inline__ void atomic_inc(atomic_t *v)
+ */
+static inline void atomic_inc(atomic_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "incl %0"
-		:"+m" (v->counter));
+	asm volatile(LOCK_PREFIX "incl %0"
+		     : "+m" (v->counter));
 }
 
 /**
  * atomic_dec - decrement atomic variable
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically decrements @v by 1.
- */ 
-static __inline__ void atomic_dec(atomic_t *v)
+ */
+static inline void atomic_dec(atomic_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "decl %0"
-		:"+m" (v->counter));
+	asm volatile(LOCK_PREFIX "decl %0"
+		     : "+m" (v->counter));
 }
 
 /**
  * atomic_dec_and_test - decrement and test
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically decrements @v by 1 and
  * returns true if the result is 0, or false for all other
  * cases.
- */ 
-static __inline__ int atomic_dec_and_test(atomic_t *v)
+ */
+static inline int atomic_dec_and_test(atomic_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "decl %0; sete %1"
-		:"+m" (v->counter), "=qm" (c)
-		: : "memory");
+	asm volatile(LOCK_PREFIX "decl %0; sete %1"
+		     : "+m" (v->counter), "=qm" (c)
+		     : : "memory");
 	return c != 0;
 }
 
 /**
- * atomic_inc_and_test - increment and test 
+ * atomic_inc_and_test - increment and test
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically increments @v by 1
  * and returns true if the result is zero, or false for all
  * other cases.
- */ 
-static __inline__ int atomic_inc_and_test(atomic_t *v)
+ */
+static inline int atomic_inc_and_test(atomic_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "incl %0; sete %1"
-		:"+m" (v->counter), "=qm" (c)
-		: : "memory");
+	asm volatile(LOCK_PREFIX "incl %0; sete %1"
+		     : "+m" (v->counter), "=qm" (c)
+		     : : "memory");
 	return c != 0;
 }
 
@@ -154,19 +149,18 @@
  * atomic_add_negative - add and test if negative
  * @v: pointer of type atomic_t
  * @i: integer value to add
- * 
+ *
  * Atomically adds @i to @v and returns true
  * if the result is negative, or false when
  * result is greater than or equal to zero.
- */ 
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
+ */
+static inline int atomic_add_negative(int i, atomic_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "addl %2,%0; sets %1"
-		:"+m" (v->counter), "=qm" (c)
-		:"ir" (i) : "memory");
+	asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
+		     : "+m" (v->counter), "=qm" (c)
+		     : "ir" (i) : "memory");
 	return c;
 }
 
@@ -177,20 +171,19 @@
  *
  * Atomically adds @i to @v and returns @i + @v
  */
-static __inline__ int atomic_add_return(int i, atomic_t *v)
+static inline int atomic_add_return(int i, atomic_t *v)
 {
 	int __i;
 #ifdef CONFIG_M386
 	unsigned long flags;
-	if(unlikely(boot_cpu_data.x86 <= 3))
+	if (unlikely(boot_cpu_data.x86 <= 3))
 		goto no_xadd;
 #endif
 	/* Modern 486+ processor */
 	__i = i;
-	__asm__ __volatile__(
-		LOCK_PREFIX "xaddl %0, %1"
-		:"+r" (i), "+m" (v->counter)
-		: : "memory");
+	asm volatile(LOCK_PREFIX "xaddl %0, %1"
+		     : "+r" (i), "+m" (v->counter)
+		     : : "memory");
 	return i + __i;
 
 #ifdef CONFIG_M386
@@ -210,9 +203,9 @@
  *
  * Atomically subtracts @i from @v and returns @v - @i
  */
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
+static inline int atomic_sub_return(int i, atomic_t *v)
 {
-	return atomic_add_return(-i,v);
+	return atomic_add_return(-i, v);
 }
 
 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
@@ -227,7 +220,7 @@
  * Atomically adds @a to @v, so long as @v was not already @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
 {
 	int c, old;
 	c = atomic_read(v);
@@ -244,17 +237,17 @@
 
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
-#define atomic_inc_return(v)  (atomic_add_return(1,v))
-#define atomic_dec_return(v)  (atomic_sub_return(1,v))
+#define atomic_inc_return(v)  (atomic_add_return(1, v))
+#define atomic_dec_return(v)  (atomic_sub_return(1, v))
 
 /* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
-: : "r" (~(mask)),"m" (*addr) : "memory")
+#define atomic_clear_mask(mask, addr)				\
+	asm volatile(LOCK_PREFIX "andl %0,%1"			\
+		     : : "r" (~(mask)), "m" (*(addr)) : "memory")
 
-#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
-: : "r" (mask),"m" (*(addr)) : "memory")
+#define atomic_set_mask(mask, addr)				\
+	asm volatile(LOCK_PREFIX "orl %0,%1"				\
+		     : : "r" (mask), "m" (*(addr)) : "memory")
 
 /* Atomic operations are already serializing on x86 */
 #define smp_mb__before_atomic_dec()	barrier()
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index 2d20a7a..3e0cd7d 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -22,140 +22,135 @@
  * on us. We need to use _exactly_ the address the user gave us,
  * not some alias that contains the same information.
  */
-typedef struct { int counter; } atomic_t;
+typedef struct {
+	int counter;
+} atomic_t;
 
 #define ATOMIC_INIT(i)	{ (i) }
 
 /**
  * atomic_read - read atomic variable
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically reads the value of @v.
- */ 
+ */
 #define atomic_read(v)		((v)->counter)
 
 /**
  * atomic_set - set atomic variable
  * @v: pointer of type atomic_t
  * @i: required value
- * 
+ *
  * Atomically sets the value of @v to @i.
- */ 
-#define atomic_set(v,i)		(((v)->counter) = (i))
+ */
+#define atomic_set(v, i)		(((v)->counter) = (i))
 
 /**
  * atomic_add - add integer to atomic variable
  * @i: integer value to add
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically adds @i to @v.
  */
-static __inline__ void atomic_add(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "addl %1,%0"
-		:"=m" (v->counter)
-		:"ir" (i), "m" (v->counter));
+	asm volatile(LOCK_PREFIX "addl %1,%0"
+		     : "=m" (v->counter)
+		     : "ir" (i), "m" (v->counter));
 }
 
 /**
  * atomic_sub - subtract the atomic variable
  * @i: integer value to subtract
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically subtracts @i from @v.
  */
-static __inline__ void atomic_sub(int i, atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "subl %1,%0"
-		:"=m" (v->counter)
-		:"ir" (i), "m" (v->counter));
+	asm volatile(LOCK_PREFIX "subl %1,%0"
+		     : "=m" (v->counter)
+		     : "ir" (i), "m" (v->counter));
 }
 
 /**
  * atomic_sub_and_test - subtract value from variable and test result
  * @i: integer value to subtract
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically subtracts @i from @v and returns
  * true if the result is zero, or false for all
  * other cases.
  */
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
+static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "subl %2,%0; sete %1"
-		:"=m" (v->counter), "=qm" (c)
-		:"ir" (i), "m" (v->counter) : "memory");
+	asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
+		     : "=m" (v->counter), "=qm" (c)
+		     : "ir" (i), "m" (v->counter) : "memory");
 	return c;
 }
 
 /**
  * atomic_inc - increment atomic variable
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically increments @v by 1.
- */ 
-static __inline__ void atomic_inc(atomic_t *v)
+ */
+static inline void atomic_inc(atomic_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "incl %0"
-		:"=m" (v->counter)
-		:"m" (v->counter));
+	asm volatile(LOCK_PREFIX "incl %0"
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
 }
 
 /**
  * atomic_dec - decrement atomic variable
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically decrements @v by 1.
- */ 
-static __inline__ void atomic_dec(atomic_t *v)
+ */
+static inline void atomic_dec(atomic_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "decl %0"
-		:"=m" (v->counter)
-		:"m" (v->counter));
+	asm volatile(LOCK_PREFIX "decl %0"
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
 }
 
 /**
  * atomic_dec_and_test - decrement and test
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically decrements @v by 1 and
  * returns true if the result is 0, or false for all other
  * cases.
- */ 
-static __inline__ int atomic_dec_and_test(atomic_t *v)
+ */
+static inline int atomic_dec_and_test(atomic_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "decl %0; sete %1"
-		:"=m" (v->counter), "=qm" (c)
-		:"m" (v->counter) : "memory");
+	asm volatile(LOCK_PREFIX "decl %0; sete %1"
+		     : "=m" (v->counter), "=qm" (c)
+		     : "m" (v->counter) : "memory");
 	return c != 0;
 }
 
 /**
- * atomic_inc_and_test - increment and test 
+ * atomic_inc_and_test - increment and test
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically increments @v by 1
  * and returns true if the result is zero, or false for all
  * other cases.
- */ 
-static __inline__ int atomic_inc_and_test(atomic_t *v)
+ */
+static inline int atomic_inc_and_test(atomic_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "incl %0; sete %1"
-		:"=m" (v->counter), "=qm" (c)
-		:"m" (v->counter) : "memory");
+	asm volatile(LOCK_PREFIX "incl %0; sete %1"
+		     : "=m" (v->counter), "=qm" (c)
+		     : "m" (v->counter) : "memory");
 	return c != 0;
 }
 
@@ -163,19 +158,18 @@
  * atomic_add_negative - add and test if negative
  * @i: integer value to add
  * @v: pointer of type atomic_t
- * 
+ *
  * Atomically adds @i to @v and returns true
  * if the result is negative, or false when
  * result is greater than or equal to zero.
- */ 
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
+ */
+static inline int atomic_add_negative(int i, atomic_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "addl %2,%0; sets %1"
-		:"=m" (v->counter), "=qm" (c)
-		:"ir" (i), "m" (v->counter) : "memory");
+	asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
+		     : "=m" (v->counter), "=qm" (c)
+		     : "ir" (i), "m" (v->counter) : "memory");
 	return c;
 }
 
@@ -186,27 +180,28 @@
  *
  * Atomically adds @i to @v and returns @i + @v
  */
-static __inline__ int atomic_add_return(int i, atomic_t *v)
+static inline int atomic_add_return(int i, atomic_t *v)
 {
 	int __i = i;
-	__asm__ __volatile__(
-		LOCK_PREFIX "xaddl %0, %1"
-		:"+r" (i), "+m" (v->counter)
-		: : "memory");
+	asm volatile(LOCK_PREFIX "xaddl %0, %1"
+		     : "+r" (i), "+m" (v->counter)
+		     : : "memory");
 	return i + __i;
 }
 
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
+static inline int atomic_sub_return(int i, atomic_t *v)
 {
-	return atomic_add_return(-i,v);
+	return atomic_add_return(-i, v);
 }
 
-#define atomic_inc_return(v)  (atomic_add_return(1,v))
-#define atomic_dec_return(v)  (atomic_sub_return(1,v))
+#define atomic_inc_return(v)  (atomic_add_return(1, v))
+#define atomic_dec_return(v)  (atomic_sub_return(1, v))
 
 /* An 64bit atomic type */
 
-typedef struct { long counter; } atomic64_t;
+typedef struct {
+	long counter;
+} atomic64_t;
 
 #define ATOMIC64_INIT(i)	{ (i) }
 
@@ -226,7 +221,7 @@
  *
  * Atomically sets the value of @v to @i.
  */
-#define atomic64_set(v,i)		(((v)->counter) = (i))
+#define atomic64_set(v, i)		(((v)->counter) = (i))
 
 /**
  * atomic64_add - add integer to atomic64 variable
@@ -235,12 +230,11 @@
  *
  * Atomically adds @i to @v.
  */
-static __inline__ void atomic64_add(long i, atomic64_t *v)
+static inline void atomic64_add(long i, atomic64_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "addq %1,%0"
-		:"=m" (v->counter)
-		:"ir" (i), "m" (v->counter));
+	asm volatile(LOCK_PREFIX "addq %1,%0"
+		     : "=m" (v->counter)
+		     : "ir" (i), "m" (v->counter));
 }
 
 /**
@@ -250,12 +244,11 @@
  *
  * Atomically subtracts @i from @v.
  */
-static __inline__ void atomic64_sub(long i, atomic64_t *v)
+static inline void atomic64_sub(long i, atomic64_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "subq %1,%0"
-		:"=m" (v->counter)
-		:"ir" (i), "m" (v->counter));
+	asm volatile(LOCK_PREFIX "subq %1,%0"
+		     : "=m" (v->counter)
+		     : "ir" (i), "m" (v->counter));
 }
 
 /**
@@ -267,14 +260,13 @@
  * true if the result is zero, or false for all
  * other cases.
  */
-static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
+static inline int atomic64_sub_and_test(long i, atomic64_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "subq %2,%0; sete %1"
-		:"=m" (v->counter), "=qm" (c)
-		:"ir" (i), "m" (v->counter) : "memory");
+	asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
+		     : "=m" (v->counter), "=qm" (c)
+		     : "ir" (i), "m" (v->counter) : "memory");
 	return c;
 }
 
@@ -284,12 +276,11 @@
  *
  * Atomically increments @v by 1.
  */
-static __inline__ void atomic64_inc(atomic64_t *v)
+static inline void atomic64_inc(atomic64_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "incq %0"
-		:"=m" (v->counter)
-		:"m" (v->counter));
+	asm volatile(LOCK_PREFIX "incq %0"
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
 }
 
 /**
@@ -298,12 +289,11 @@
  *
  * Atomically decrements @v by 1.
  */
-static __inline__ void atomic64_dec(atomic64_t *v)
+static inline void atomic64_dec(atomic64_t *v)
 {
-	__asm__ __volatile__(
-		LOCK_PREFIX "decq %0"
-		:"=m" (v->counter)
-		:"m" (v->counter));
+	asm volatile(LOCK_PREFIX "decq %0"
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
 }
 
 /**
@@ -314,14 +304,13 @@
  * returns true if the result is 0, or false for all other
  * cases.
  */
-static __inline__ int atomic64_dec_and_test(atomic64_t *v)
+static inline int atomic64_dec_and_test(atomic64_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "decq %0; sete %1"
-		:"=m" (v->counter), "=qm" (c)
-		:"m" (v->counter) : "memory");
+	asm volatile(LOCK_PREFIX "decq %0; sete %1"
+		     : "=m" (v->counter), "=qm" (c)
+		     : "m" (v->counter) : "memory");
 	return c != 0;
 }
 
@@ -333,14 +322,13 @@
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-static __inline__ int atomic64_inc_and_test(atomic64_t *v)
+static inline int atomic64_inc_and_test(atomic64_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "incq %0; sete %1"
-		:"=m" (v->counter), "=qm" (c)
-		:"m" (v->counter) : "memory");
+	asm volatile(LOCK_PREFIX "incq %0; sete %1"
+		     : "=m" (v->counter), "=qm" (c)
+		     : "m" (v->counter) : "memory");
 	return c != 0;
 }
 
@@ -353,14 +341,13 @@
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
+static inline int atomic64_add_negative(long i, atomic64_t *v)
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		LOCK_PREFIX "addq %2,%0; sets %1"
-		:"=m" (v->counter), "=qm" (c)
-		:"ir" (i), "m" (v->counter) : "memory");
+	asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
+		     : "=m" (v->counter), "=qm" (c)
+		     : "ir" (i), "m" (v->counter) : "memory");
 	return c;
 }
 
@@ -371,29 +358,28 @@
  *
  * Atomically adds @i to @v and returns @i + @v
  */
-static __inline__ long atomic64_add_return(long i, atomic64_t *v)
+static inline long atomic64_add_return(long i, atomic64_t *v)
 {
 	long __i = i;
-	__asm__ __volatile__(
-		LOCK_PREFIX "xaddq %0, %1;"
-		:"+r" (i), "+m" (v->counter)
-		: : "memory");
+	asm volatile(LOCK_PREFIX "xaddq %0, %1;"
+		     : "+r" (i), "+m" (v->counter)
+		     : : "memory");
 	return i + __i;
 }
 
-static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
+static inline long atomic64_sub_return(long i, atomic64_t *v)
 {
-	return atomic64_add_return(-i,v);
+	return atomic64_add_return(-i, v);
 }
 
-#define atomic64_inc_return(v)  (atomic64_add_return(1,v))
-#define atomic64_dec_return(v)  (atomic64_sub_return(1,v))
+#define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
+#define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
 
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
 
 /**
  * atomic_add_unless - add unless the number is a given value
@@ -404,7 +390,7 @@
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
 {
 	int c, old;
 	c = atomic_read(v);
@@ -430,7 +416,7 @@
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
 {
 	long c, old;
 	c = atomic64_read(v);
@@ -448,13 +434,14 @@
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 /* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
-: : "r" (~(mask)),"m" (*addr) : "memory")
+#define atomic_clear_mask(mask, addr)					\
+	asm volatile(LOCK_PREFIX "andl %0,%1"				\
+		     : : "r" (~(mask)), "m" (*(addr)) : "memory")
 
-#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
-: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
+#define atomic_set_mask(mask, addr)					\
+	asm volatile(LOCK_PREFIX "orl %0,%1"				\
+		     : : "r" ((unsigned)(mask)), "m" (*(addr))		\
+		     : "memory")
 
 /* Atomic operations are already serializing on x86 */
 #define smp_mb__before_atomic_dec()	barrier()
diff --git a/include/asm-x86/mach-default/bios_ebda.h b/include/asm-x86/bios_ebda.h
similarity index 100%
rename from include/asm-x86/mach-default/bios_ebda.h
rename to include/asm-x86/bios_ebda.h
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 1a23ce1..1ae7b27 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -23,10 +23,13 @@
 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
 /* Technically wrong, but this avoids compilation errors on some gcc
    versions. */
-#define ADDR "=m" (*(volatile long *) addr)
+#define ADDR "=m" (*(volatile long *)addr)
+#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5])
 #else
 #define ADDR "+m" (*(volatile long *) addr)
+#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5])
 #endif
+#define BASE_ADDR "m" (*(volatile int *)addr)
 
 /**
  * set_bit - Atomically set a bit in memory
@@ -45,9 +48,7 @@
  */
 static inline void set_bit(int nr, volatile void *addr)
 {
-	asm volatile(LOCK_PREFIX "bts %1,%0"
-		     : ADDR
-		     : "Ir" (nr) : "memory");
+	asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
 }
 
 /**
@@ -79,9 +80,7 @@
  */
 static inline void clear_bit(int nr, volatile void *addr)
 {
-	asm volatile(LOCK_PREFIX "btr %1,%0"
-		     : ADDR
-		     : "Ir" (nr));
+	asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
 }
 
 /*
@@ -100,7 +99,7 @@
 
 static inline void __clear_bit(int nr, volatile void *addr)
 {
-	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
+	asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
 }
 
 /*
@@ -135,7 +134,7 @@
  */
 static inline void __change_bit(int nr, volatile void *addr)
 {
-	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
+	asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
 }
 
 /**
@@ -149,8 +148,7 @@
  */
 static inline void change_bit(int nr, volatile void *addr)
 {
-	asm volatile(LOCK_PREFIX "btc %1,%0"
-		     : ADDR : "Ir" (nr));
+	asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
 }
 
 /**
@@ -166,9 +164,7 @@
 	int oldbit;
 
 	asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
-		     "sbb %0,%0"
-		     : "=r" (oldbit), ADDR
-		     : "Ir" (nr) : "memory");
+		     "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
 
 	return oldbit;
 }
@@ -198,10 +194,9 @@
 {
 	int oldbit;
 
-	asm("bts %2,%1\n\t"
-	    "sbb %0,%0"
-	    : "=r" (oldbit), ADDR
-	    : "Ir" (nr));
+	asm volatile("bts %2,%3\n\t"
+		     "sbb %0,%0"
+		     : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
 	return oldbit;
 }
 
@@ -219,8 +214,7 @@
 
 	asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
 		     "sbb %0,%0"
-		     : "=r" (oldbit), ADDR
-		     : "Ir" (nr) : "memory");
+		     : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
 
 	return oldbit;
 }
@@ -238,10 +232,9 @@
 {
 	int oldbit;
 
-	asm volatile("btr %2,%1\n\t"
+	asm volatile("btr %2,%3\n\t"
 		     "sbb %0,%0"
-		     : "=r" (oldbit), ADDR
-		     : "Ir" (nr));
+		     : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
 	return oldbit;
 }
 
@@ -250,10 +243,9 @@
 {
 	int oldbit;
 
-	asm volatile("btc %2,%1\n\t"
+	asm volatile("btc %2,%3\n\t"
 		     "sbb %0,%0"
-		     : "=r" (oldbit), ADDR
-		     : "Ir" (nr) : "memory");
+		     : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
 
 	return oldbit;
 }
@@ -272,8 +264,7 @@
 
 	asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
 		     "sbb %0,%0"
-		     : "=r" (oldbit), ADDR
-		     : "Ir" (nr) : "memory");
+		     : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
 
 	return oldbit;
 }
@@ -288,10 +279,11 @@
 {
 	int oldbit;
 
-	asm volatile("bt %2,%1\n\t"
+	asm volatile("bt %2,%3\n\t"
 		     "sbb %0,%0"
 		     : "=r" (oldbit)
-		     : "m" (*(unsigned long *)addr), "Ir" (nr));
+		     : "m" (((volatile const int *)addr)[nr >> 5]),
+		       "Ir" (nr), BASE_ADDR);
 
 	return oldbit;
 }
@@ -310,6 +302,8 @@
 	 constant_test_bit((nr),(addr)) :	\
 	 variable_test_bit((nr),(addr)))
 
+#undef BASE_ADDR
+#undef BIT_ADDR
 #undef ADDR
 
 #ifdef CONFIG_X86_32
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index e4d75fc..2513a81 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -20,20 +20,22 @@
 
 	if (!size)
 		return 0;
-	/* This looks at memory. Mark it volatile to tell gcc not to move it around */
-	__asm__ __volatile__(
-		"movl $-1,%%eax\n\t"
-		"xorl %%edx,%%edx\n\t"
-		"repe; scasl\n\t"
-		"je 1f\n\t"
-		"xorl -4(%%edi),%%eax\n\t"
-		"subl $4,%%edi\n\t"
-		"bsfl %%eax,%%edx\n"
-		"1:\tsubl %%ebx,%%edi\n\t"
-		"shll $3,%%edi\n\t"
-		"addl %%edi,%%edx"
-		:"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
-		:"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
+	/* This looks at memory.
+	 * Mark it volatile to tell gcc not to move it around
+	 */
+	asm volatile("movl $-1,%%eax\n\t"
+		     "xorl %%edx,%%edx\n\t"
+		     "repe; scasl\n\t"
+		     "je 1f\n\t"
+		     "xorl -4(%%edi),%%eax\n\t"
+		     "subl $4,%%edi\n\t"
+		     "bsfl %%eax,%%edx\n"
+		     "1:\tsubl %%ebx,%%edi\n\t"
+		     "shll $3,%%edi\n\t"
+		     "addl %%edi,%%edx"
+		     : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
+		     : "1" ((size + 31) >> 5), "2" (addr),
+		       "b" (addr) : "memory");
 	return res;
 }
 
@@ -75,7 +77,7 @@
 		unsigned long val = *addr++;
 		if (val)
 			return __ffs(val) + x;
-		x += (sizeof(*addr)<<3);
+		x += sizeof(*addr) << 3;
 	}
 	return x;
 }
@@ -152,10 +154,10 @@
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 
-#define ext2_set_bit_atomic(lock, nr, addr) \
-		test_and_set_bit((nr), (unsigned long *)addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
-		test_and_clear_bit((nr), (unsigned long *)addr)
+#define ext2_set_bit_atomic(lock, nr, addr)			\
+	test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr)			\
+	test_and_clear_bit((nr), (unsigned long *)(addr))
 
 #include <asm-generic/bitops/minix.h>
 
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index aaf1519..365f820 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -17,35 +17,35 @@
 	return val;
 }
 
-#define find_first_bit(addr,size) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
-  (__scanbit(*(unsigned long *)addr,(size))) : \
-  find_first_bit(addr,size)))
-
 #define find_next_bit(addr,size,off) \
 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? 	  \
   ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
 	find_next_bit(addr,size,off)))
 
-#define find_first_zero_bit(addr,size) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
-  (__scanbit(~*(unsigned long *)addr,(size))) : \
-  	find_first_zero_bit(addr,size)))
-	
 #define find_next_zero_bit(addr,size,off) \
 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? 	  \
   ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
 	find_next_zero_bit(addr,size,off)))
 
-static inline void set_bit_string(unsigned long *bitmap, unsigned long i, 
-				  int len) 
-{ 
-	unsigned long end = i + len; 
+#define find_first_bit(addr, size)					\
+	((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG	\
+	  ? (__scanbit(*(unsigned long *)(addr), (size)))		\
+	  : find_first_bit((addr), (size))))
+
+#define find_first_zero_bit(addr, size)					\
+	((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG	\
+	  ? (__scanbit(~*(unsigned long *)(addr), (size)))		\
+	  : find_first_zero_bit((addr), (size))))
+
+static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
+				  int len)
+{
+	unsigned long end = i + len;
 	while (i < end) {
-		__set_bit(i, bitmap); 
+		__set_bit(i, bitmap);
 		i++;
 	}
-} 
+}
 
 /**
  * ffz - find first zero in word.
@@ -150,10 +150,10 @@
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 
-#define ext2_set_bit_atomic(lock,nr,addr) \
-	        test_and_set_bit((nr),(unsigned long*)addr)
-#define ext2_clear_bit_atomic(lock,nr,addr) \
-	        test_and_clear_bit((nr),(unsigned long*)addr)
+#define ext2_set_bit_atomic(lock, nr, addr)			\
+	test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr)			\
+	test_and_clear_bit((nr), (unsigned long *)(addr))
 
 #include <asm-generic/bitops/minix.h>
 
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h
index 8d477a2..b69aa64 100644
--- a/include/asm-x86/bug.h
+++ b/include/asm-x86/bug.h
@@ -12,25 +12,25 @@
 # define __BUG_C0	"2:\t.quad 1b, %c0\n"
 #endif
 
-#define BUG()								\
-	do {								\
-		asm volatile("1:\tud2\n"				\
-			     ".pushsection __bug_table,\"a\"\n"		\
-			     __BUG_C0					\
-			     "\t.word %c1, 0\n"				\
-			     "\t.org 2b+%c2\n"				\
-			     ".popsection"				\
-			     : : "i" (__FILE__), "i" (__LINE__),	\
-			     "i" (sizeof(struct bug_entry)));		\
-		for(;;) ;						\
-	} while(0)
+#define BUG()							\
+do {								\
+	asm volatile("1:\tud2\n"				\
+		     ".pushsection __bug_table,\"a\"\n"		\
+		     __BUG_C0					\
+		     "\t.word %c1, 0\n"				\
+		     "\t.org 2b+%c2\n"				\
+		     ".popsection"				\
+		     : : "i" (__FILE__), "i" (__LINE__),	\
+		     "i" (sizeof(struct bug_entry)));		\
+	for (;;) ;						\
+} while (0)
 
 #else
-#define BUG()								\
-	do {								\
-		asm volatile("ud2");					\
-		for(;;) ;						\
-	} while(0)
+#define BUG()							\
+do {								\
+	asm volatile("ud2");					\
+	for (;;) ;						\
+} while (0)
 #endif
 
 #endif /* !CONFIG_BUG */
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
index fe2f2e5..e02ae2d 100644
--- a/include/asm-x86/byteorder.h
+++ b/include/asm-x86/byteorder.h
@@ -8,50 +8,59 @@
 
 #ifdef __i386__
 
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
 {
 #ifdef CONFIG_X86_BSWAP
-	__asm__("bswap %0" : "=r" (x) : "0" (x));
+	asm("bswap %0" : "=r" (x) : "0" (x));
 #else
-	__asm__("xchgb %b0,%h0\n\t"	/* swap lower bytes	*/
-		"rorl $16,%0\n\t"	/* swap words		*/
-		"xchgb %b0,%h0"		/* swap higher bytes	*/
-		:"=q" (x)
-		: "0" (x));
+	asm("xchgb %b0,%h0\n\t"	/* swap lower bytes	*/
+	    "rorl $16,%0\n\t"	/* swap words		*/
+	    "xchgb %b0,%h0"	/* swap higher bytes	*/
+	    : "=q" (x)
+	    : "0" (x));
 #endif
 	return x;
 }
 
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
+static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
 {
 	union {
-		struct { __u32 a,b; } s;
+		struct {
+			__u32 a;
+			__u32 b;
+		} s;
 		__u64 u;
 	} v;
 	v.u = val;
 #ifdef CONFIG_X86_BSWAP
-	__asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
+	asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
 	    : "=r" (v.s.a), "=r" (v.s.b)
 	    : "0" (v.s.a), "1" (v.s.b));
 #else
 	v.s.a = ___arch__swab32(v.s.a);
 	v.s.b = ___arch__swab32(v.s.b);
-	__asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
+	asm("xchgl %0,%1"
+	    : "=r" (v.s.a), "=r" (v.s.b)
+	    : "0" (v.s.a), "1" (v.s.b));
 #endif
 	return v.u;
 }
 
 #else /* __i386__ */
 
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
+static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
 {
-	__asm__("bswapq %0" : "=r" (x) : "0" (x));
+	asm("bswapq %0"
+	    : "=r" (x)
+	    : "0" (x));
 	return x;
 }
 
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
 {
-	__asm__("bswapl %0" : "=r" (x) : "0" (x));
+	asm("bswapl %0"
+	    : "=r" (x)
+	    : "0" (x));
 	return x;
 }
 
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 5396c21..f4c0ab5 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -14,18 +14,77 @@
 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
 #define flush_icache_range(start, end)		do { } while (0)
-#define flush_icache_page(vma,pg)		do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
+#define flush_icache_page(vma, pg)		do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
 #define flush_cache_vmap(start, end)		do { } while (0)
 #define flush_cache_vunmap(start, end)		do { } while (0)
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
+	memcpy((dst), (src), (len))
+#define copy_from_user_page(vma, page, vaddr, dst, src, len)	\
+	memcpy((dst), (src), (len))
 
-int __deprecated_for_modules change_page_attr(struct page *page, int numpages,
-								pgprot_t prot);
+
+/*
+ * The set_memory_* API can be used to change various attributes of a virtual
+ * address range. The attributes include:
+ * Cachability   : UnCached, WriteCombining, WriteBack
+ * Executability : eXeutable, NoteXecutable
+ * Read/Write    : ReadOnly, ReadWrite
+ * Presence      : NotPresent
+ *
+ * Within a catagory, the attributes are mutually exclusive.
+ *
+ * The implementation of this API will take care of various aspects that
+ * are associated with changing such attributes, such as:
+ * - Flushing TLBs
+ * - Flushing CPU caches
+ * - Making sure aliases of the memory behind the mapping don't violate
+ *   coherency rules as defined by the CPU in the system.
+ *
+ * What this API does not do:
+ * - Provide exclusion between various callers - including callers that
+ *   operation on other mappings of the same physical page
+ * - Restore default attributes when a page is freed
+ * - Guarantee that mappings other than the requested one are
+ *   in any state, other than that these do not violate rules for
+ *   the CPU you have. Do not depend on any effects on other mappings,
+ *   CPUs other than the one you have may have more relaxed rules.
+ * The caller is required to take care of these.
+ */
+
+int _set_memory_uc(unsigned long addr, int numpages);
+int _set_memory_wc(unsigned long addr, int numpages);
+int _set_memory_wb(unsigned long addr, int numpages);
+int set_memory_uc(unsigned long addr, int numpages);
+int set_memory_wc(unsigned long addr, int numpages);
+int set_memory_wb(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_np(unsigned long addr, int numpages);
+int set_memory_4k(unsigned long addr, int numpages);
+
+/*
+ * For legacy compatibility with the old APIs, a few functions
+ * are provided that work on a "struct page".
+ * These functions operate ONLY on the 1:1 kernel mapping of the
+ * memory that the struct page represents, and internally just
+ * call the set_memory_* function. See the description of the
+ * set_memory_* function for more details on conventions.
+ *
+ * These APIs should be considered *deprecated* and are likely going to
+ * be removed in the future.
+ * The reason for this is the implicit operation on the 1:1 mapping only,
+ * making this not a generally useful API.
+ *
+ * Specifically, many users of the old APIs had a virtual address,
+ * called virt_to_page() or vmalloc_to_page() on that address to
+ * get a struct page* that the old API required.
+ * To convert these cases, use set_memory_*() on the original
+ * virtual address, do not use these functions.
+ */
 
 int set_pages_uc(struct page *page, int numpages);
 int set_pages_wb(struct page *page, int numpages);
@@ -34,13 +93,6 @@
 int set_pages_ro(struct page *page, int numpages);
 int set_pages_rw(struct page *page, int numpages);
 
-int set_memory_uc(unsigned long addr, int numpages);
-int set_memory_wb(unsigned long addr, int numpages);
-int set_memory_x(unsigned long addr, int numpages);
-int set_memory_nx(unsigned long addr, int numpages);
-int set_memory_ro(unsigned long addr, int numpages);
-int set_memory_rw(unsigned long addr, int numpages);
-int set_memory_np(unsigned long addr, int numpages);
 
 void clflush_cache_range(void *addr, unsigned int size);
 
diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h
index 75194ab..52bbb0d 100644
--- a/include/asm-x86/checksum_32.h
+++ b/include/asm-x86/checksum_32.h
@@ -28,7 +28,8 @@
  */
 
 asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
-						  int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
+					    int len, __wsum sum,
+					    int *src_err_ptr, int *dst_err_ptr);
 
 /*
  *	Note: when you get a NULL pointer exception here this means someone
@@ -37,20 +38,20 @@
  *	If you use these functions directly please don't forget the
  *	access_ok().
  */
-static __inline__
-__wsum csum_partial_copy_nocheck (const void *src, void *dst,
-					int len, __wsum sum)
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+					       int len, __wsum sum)
 {
-	return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
+	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
 }
 
-static __inline__
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
-						int len, __wsum sum, int *err_ptr)
+static inline __wsum csum_partial_copy_from_user(const void __user *src,
+						 void *dst,
+						 int len, __wsum sum,
+						 int *err_ptr)
 {
 	might_sleep();
 	return csum_partial_copy_generic((__force void *)src, dst,
-					len, sum, err_ptr, NULL);
+					 len, sum, err_ptr, NULL);
 }
 
 /*
@@ -64,30 +65,29 @@
 {
 	unsigned int sum;
 
-	__asm__ __volatile__(
-	    "movl (%1), %0	;\n"
-	    "subl $4, %2	;\n"
-	    "jbe 2f		;\n"
-	    "addl 4(%1), %0	;\n"
-	    "adcl 8(%1), %0	;\n"
-	    "adcl 12(%1), %0	;\n"
-"1:	    adcl 16(%1), %0	;\n"
-	    "lea 4(%1), %1	;\n"
-	    "decl %2		;\n"
-	    "jne 1b		;\n"
-	    "adcl $0, %0	;\n"
-	    "movl %0, %2	;\n"
-	    "shrl $16, %0	;\n"
-	    "addw %w2, %w0	;\n"
-	    "adcl $0, %0	;\n"
-	    "notl %0		;\n"
-"2:				;\n"
+	asm volatile("movl (%1), %0	;\n"
+		     "subl $4, %2	;\n"
+		     "jbe 2f		;\n"
+		     "addl 4(%1), %0	;\n"
+		     "adcl 8(%1), %0	;\n"
+		     "adcl 12(%1), %0;\n"
+		     "1:	adcl 16(%1), %0	;\n"
+		     "lea 4(%1), %1	;\n"
+		     "decl %2	;\n"
+		     "jne 1b		;\n"
+		     "adcl $0, %0	;\n"
+		     "movl %0, %2	;\n"
+		     "shrl $16, %0	;\n"
+		     "addw %w2, %w0	;\n"
+		     "adcl $0, %0	;\n"
+		     "notl %0	;\n"
+		     "2:		;\n"
 	/* Since the input registers which are loaded with iph and ihl
 	   are modified, we must also specify them as outputs, or gcc
 	   will assume they contain their original values. */
-	: "=r" (sum), "=r" (iph), "=r" (ihl)
-	: "1" (iph), "2" (ihl)
-	: "memory");
+		     : "=r" (sum), "=r" (iph), "=r" (ihl)
+		     : "1" (iph), "2" (ihl)
+		     : "memory");
 	return (__force __sum16)sum;
 }
 
@@ -97,29 +97,27 @@
 
 static inline __sum16 csum_fold(__wsum sum)
 {
-	__asm__(
-		"addl %1, %0		;\n"
-		"adcl $0xffff, %0	;\n"
-		: "=r" (sum)
-		: "r" ((__force u32)sum << 16),
-		  "0" ((__force u32)sum & 0xffff0000)
-	);
+	asm("addl %1, %0		;\n"
+	    "adcl $0xffff, %0	;\n"
+	    : "=r" (sum)
+	    : "r" ((__force u32)sum << 16),
+	      "0" ((__force u32)sum & 0xffff0000));
 	return (__force __sum16)(~(__force u32)sum >> 16);
 }
 
 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
-					   unsigned short len,
-					   unsigned short proto,
-					   __wsum sum)
+					unsigned short len,
+					unsigned short proto,
+					__wsum sum)
 {
-    __asm__(
-	"addl %1, %0	;\n"
-	"adcl %2, %0	;\n"
-	"adcl %3, %0	;\n"
-	"adcl $0, %0	;\n"
-	: "=r" (sum)
-	: "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
-    return sum;
+	asm("addl %1, %0	;\n"
+	    "adcl %2, %0	;\n"
+	    "adcl %3, %0	;\n"
+	    "adcl $0, %0	;\n"
+	    : "=r" (sum)
+	    : "g" (daddr), "g"(saddr),
+	      "g" ((len + proto) << 8), "0" (sum));
+	return sum;
 }
 
 /*
@@ -127,11 +125,11 @@
  * returns a 16-bit checksum, already complemented
  */
 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-						   unsigned short len,
-						   unsigned short proto,
-						   __wsum sum)
+					unsigned short len,
+					unsigned short proto,
+					__wsum sum)
 {
-	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
 }
 
 /*
@@ -141,30 +139,29 @@
 
 static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
-    return csum_fold (csum_partial(buff, len, 0));
+    return csum_fold(csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
-					  const struct in6_addr *daddr,
-					  __u32 len, unsigned short proto,
-					  __wsum sum)
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+				      const struct in6_addr *daddr,
+				      __u32 len, unsigned short proto,
+				      __wsum sum)
 {
-	__asm__(
-		"addl 0(%1), %0		;\n"
-		"adcl 4(%1), %0		;\n"
-		"adcl 8(%1), %0		;\n"
-		"adcl 12(%1), %0	;\n"
-		"adcl 0(%2), %0		;\n"
-		"adcl 4(%2), %0		;\n"
-		"adcl 8(%2), %0		;\n"
-		"adcl 12(%2), %0	;\n"
-		"adcl %3, %0		;\n"
-		"adcl %4, %0		;\n"
-		"adcl $0, %0		;\n"
-		: "=&r" (sum)
-		: "r" (saddr), "r" (daddr),
-		  "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
+	asm("addl 0(%1), %0	;\n"
+	    "adcl 4(%1), %0	;\n"
+	    "adcl 8(%1), %0	;\n"
+	    "adcl 12(%1), %0	;\n"
+	    "adcl 0(%2), %0	;\n"
+	    "adcl 4(%2), %0	;\n"
+	    "adcl 8(%2), %0	;\n"
+	    "adcl 12(%2), %0	;\n"
+	    "adcl %3, %0	;\n"
+	    "adcl %4, %0	;\n"
+	    "adcl $0, %0	;\n"
+	    : "=&r" (sum)
+	    : "r" (saddr), "r" (daddr),
+	      "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
 
 	return csum_fold(sum);
 }
@@ -173,14 +170,15 @@
  *	Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src,
-						     void __user *dst,
-						     int len, __wsum sum,
-						     int *err_ptr)
+static inline __wsum csum_and_copy_to_user(const void *src,
+					   void __user *dst,
+					   int len, __wsum sum,
+					   int *err_ptr)
 {
 	might_sleep();
 	if (access_ok(VERIFY_WRITE, dst, len))
-		return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr);
+		return csum_partial_copy_generic(src, (__force void *)dst,
+						 len, sum, NULL, err_ptr);
 
 	if (len)
 		*err_ptr = -EFAULT;
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h
index e5f7999..8bd861c 100644
--- a/include/asm-x86/checksum_64.h
+++ b/include/asm-x86/checksum_64.h
@@ -1,33 +1,31 @@
 #ifndef _X86_64_CHECKSUM_H
 #define _X86_64_CHECKSUM_H
 
-/* 
- * Checksums for x86-64 
- * Copyright 2002 by Andi Kleen, SuSE Labs 
+/*
+ * Checksums for x86-64
+ * Copyright 2002 by Andi Kleen, SuSE Labs
  * with some code from asm-x86/checksum.h
- */ 
+ */
 
 #include <linux/compiler.h>
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
 
-/** 
+/**
  * csum_fold - Fold and invert a 32bit checksum.
  * sum: 32bit unfolded sum
- * 
+ *
  * Fold a 32bit running checksum to 16bit and invert it. This is usually
  * the last step before putting a checksum into a packet.
  * Make sure not to mix with 64bit checksums.
  */
 static inline __sum16 csum_fold(__wsum sum)
 {
-	__asm__(
-		"  addl %1,%0\n"
-		"  adcl $0xffff,%0"
-		: "=r" (sum)
-		: "r" ((__force u32)sum << 16),
-		  "0" ((__force u32)sum & 0xffff0000)
-	);
+	asm("  addl %1,%0\n"
+	    "  adcl $0xffff,%0"
+	    : "=r" (sum)
+	    : "r" ((__force u32)sum << 16),
+	      "0" ((__force u32)sum & 0xffff0000));
 	return (__force __sum16)(~(__force u32)sum >> 16);
 }
 
@@ -43,46 +41,46 @@
  * ip_fast_csum - Compute the IPv4 header checksum efficiently.
  * iph: ipv4 header
  * ihl: length of header / 4
- */ 
+ */
 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum;
 
-	asm(	"  movl (%1), %0\n"
-		"  subl $4, %2\n"
-		"  jbe 2f\n"
-		"  addl 4(%1), %0\n"
-		"  adcl 8(%1), %0\n"
-		"  adcl 12(%1), %0\n"
-		"1: adcl 16(%1), %0\n"
-		"  lea 4(%1), %1\n"
-		"  decl %2\n"
-		"  jne	1b\n"
-		"  adcl $0, %0\n"
-		"  movl %0, %2\n"
-		"  shrl $16, %0\n"
-		"  addw %w2, %w0\n"
-		"  adcl $0, %0\n"
-		"  notl %0\n"
-		"2:"
+	asm("  movl (%1), %0\n"
+	    "  subl $4, %2\n"
+	    "  jbe 2f\n"
+	    "  addl 4(%1), %0\n"
+	    "  adcl 8(%1), %0\n"
+	    "  adcl 12(%1), %0\n"
+	    "1: adcl 16(%1), %0\n"
+	    "  lea 4(%1), %1\n"
+	    "  decl %2\n"
+	    "  jne	1b\n"
+	    "  adcl $0, %0\n"
+	    "  movl %0, %2\n"
+	    "  shrl $16, %0\n"
+	    "  addw %w2, %w0\n"
+	    "  adcl $0, %0\n"
+	    "  notl %0\n"
+	    "2:"
 	/* Since the input registers which are loaded with iph and ihl
 	   are modified, we must also specify them as outputs, or gcc
 	   will assume they contain their original values. */
-	: "=r" (sum), "=r" (iph), "=r" (ihl)
-	: "1" (iph), "2" (ihl)
-	: "memory");
+	    : "=r" (sum), "=r" (iph), "=r" (ihl)
+	    : "1" (iph), "2" (ihl)
+	    : "memory");
 	return (__force __sum16)sum;
 }
 
-/** 
+/**
  * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
  * @saddr: source address
  * @daddr: destination address
  * @len: length of packet
  * @proto: ip protocol of packet
- * @sum: initial sum to be added in (32bit unfolded) 
- * 
- * Returns the pseudo header checksum the input data. Result is 
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
+ * Returns the pseudo header checksum the input data. Result is
  * 32bit unfolded.
  */
 static inline __wsum
@@ -93,32 +91,32 @@
 	    "  adcl %2, %0\n"
 	    "  adcl %3, %0\n"
 	    "  adcl $0, %0\n"
-		: "=r" (sum)
+	    : "=r" (sum)
 	    : "g" (daddr), "g" (saddr),
 	      "g" ((len + proto)<<8), "0" (sum));
-    return sum;
+	return sum;
 }
 
 
-/** 
+/**
  * csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
  * @saddr: source address
  * @daddr: destination address
  * @len: length of packet
  * @proto: ip protocol of packet
- * @sum: initial sum to be added in (32bit unfolded) 
- * 
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
  * Returns the 16bit pseudo header checksum the input data already
  * complemented and ready to be filled in.
  */
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-		  unsigned short len, unsigned short proto, __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+					unsigned short len,
+					unsigned short proto, __wsum sum)
 {
-	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
 }
 
-/** 
+/**
  * csum_partial - Compute an internet checksum.
  * @buff: buffer to be checksummed
  * @len: length of buffer.
@@ -127,7 +125,7 @@
  * Returns the 32bit unfolded internet checksum of the buffer.
  * Before filling it in it needs to be csum_fold()'ed.
  * buff should be aligned to a 64bit boundary if possible.
- */ 
+ */
 extern __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 #define  _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
@@ -136,23 +134,22 @@
 
 /* Do not call this directly. Use the wrappers below */
 extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
-					       int len,
-					       __wsum sum,
-					       int *src_err_ptr, int *dst_err_ptr);
+					int len, __wsum sum,
+					int *src_err_ptr, int *dst_err_ptr);
 
 
 extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
-				       int len, __wsum isum, int *errp);
+					  int len, __wsum isum, int *errp);
 extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
-				      int len, __wsum isum, int *errp);
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
-					      __wsum sum);
+					int len, __wsum isum, int *errp);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+					int len, __wsum sum);
 
 /* Old names. To be removed. */
 #define csum_and_copy_to_user csum_partial_copy_to_user
 #define csum_and_copy_from_user csum_partial_copy_from_user
 
-/** 
+/**
  * ip_compute_csum - Compute an 16bit IP checksum.
  * @buff: buffer address.
  * @len: length of buffer.
@@ -170,7 +167,7 @@
  * @proto: protocol of packet
  * @sum: initial sum (32bit unfolded) to be added in
  *
- * Computes an IPv6 pseudo header checksum. This sum is added the checksum 
+ * Computes an IPv6 pseudo header checksum. This sum is added the checksum
  * into UDP/TCP packets and contains some link layer information.
  * Returns the unfolded 32bit checksum.
  */
@@ -185,11 +182,10 @@
 static inline unsigned add32_with_carry(unsigned a, unsigned b)
 {
 	asm("addl %2,%0\n\t"
-	    "adcl $0,%0" 
-	    : "=r" (a) 
+	    "adcl $0,%0"
+	    : "=r" (a)
 	    : "0" (a), "r" (b));
 	return a;
 }
 
 #endif
-
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h
index 959fad0..bf5a69d 100644
--- a/include/asm-x86/cmpxchg_32.h
+++ b/include/asm-x86/cmpxchg_32.h
@@ -8,9 +8,12 @@
  *       you need to test for the feature in boot_cpu_data.
  */
 
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, v)							\
+	((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
 
-struct __xchg_dummy { unsigned long a[100]; };
+struct __xchg_dummy {
+	unsigned long a[100];
+};
 #define __xg(x) ((struct __xchg_dummy *)(x))
 
 /*
@@ -27,72 +30,74 @@
  * of the instruction set reference 24319102.pdf. We need
  * the reader side to see the coherent 64bit value.
  */
-static inline void __set_64bit (unsigned long long * ptr,
-		unsigned int low, unsigned int high)
+static inline void __set_64bit(unsigned long long *ptr,
+			       unsigned int low, unsigned int high)
 {
-	__asm__ __volatile__ (
-		"\n1:\t"
-		"movl (%0), %%eax\n\t"
-		"movl 4(%0), %%edx\n\t"
-		LOCK_PREFIX "cmpxchg8b (%0)\n\t"
-		"jnz 1b"
-		: /* no outputs */
-		:	"D"(ptr),
-			"b"(low),
-			"c"(high)
-		:	"ax","dx","memory");
+	asm volatile("\n1:\t"
+		     "movl (%0), %%eax\n\t"
+		     "movl 4(%0), %%edx\n\t"
+		     LOCK_PREFIX "cmpxchg8b (%0)\n\t"
+		     "jnz 1b"
+		     : /* no outputs */
+		     : "D"(ptr),
+		       "b"(low),
+		       "c"(high)
+		     : "ax", "dx", "memory");
 }
 
-static inline void __set_64bit_constant (unsigned long long *ptr,
-						 unsigned long long value)
+static inline void __set_64bit_constant(unsigned long long *ptr,
+					unsigned long long value)
 {
-	__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
-}
-#define ll_low(x)	*(((unsigned int*)&(x))+0)
-#define ll_high(x)	*(((unsigned int*)&(x))+1)
-
-static inline void __set_64bit_var (unsigned long long *ptr,
-			 unsigned long long value)
-{
-	__set_64bit(ptr,ll_low(value), ll_high(value));
+	__set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
 }
 
-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
+#define ll_low(x)	*(((unsigned int *)&(x)) + 0)
+#define ll_high(x)	*(((unsigned int *)&(x)) + 1)
 
-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
+static inline void __set_64bit_var(unsigned long long *ptr,
+				   unsigned long long value)
+{
+	__set_64bit(ptr, ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr, value)			\
+	(__builtin_constant_p((value))		\
+	 ? __set_64bit_constant((ptr), (value))	\
+	 : __set_64bit_var((ptr), (value)))
+
+#define _set_64bit(ptr, value)						\
+	(__builtin_constant_p(value)					\
+	 ? __set_64bit(ptr, (unsigned int)(value),			\
+		       (unsigned int)((value) >> 32))			\
+	 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
 
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  * Note 2: xchg has side effect, so that attribute volatile is necessary,
  *	  but generally the primitive is invalid, *ptr is output argument. --ANK
  */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+				   int size)
 {
 	switch (size) {
-		case 1:
-			__asm__ __volatile__("xchgb %b0,%1"
-				:"=q" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
-		case 2:
-			__asm__ __volatile__("xchgw %w0,%1"
-				:"=r" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
-		case 4:
-			__asm__ __volatile__("xchgl %0,%1"
-				:"=r" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
+	case 1:
+		asm volatile("xchgb %b0,%1"
+			     : "=q" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
+	case 2:
+		asm volatile("xchgw %w0,%1"
+			     : "=r" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
+	case 4:
+		asm volatile("xchgl %0,%1"
+			     : "=r" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
 	}
 	return x;
 }
@@ -105,24 +110,27 @@
 
 #ifdef CONFIG_X86_CMPXCHG
 #define __HAVE_ARCH_CMPXCHG 1
-#define cmpxchg(ptr, o, n)						     \
-	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	     \
-					(unsigned long)(n), sizeof(*(ptr))))
-#define sync_cmpxchg(ptr, o, n)						     \
-	((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),	     \
-					(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg_local(ptr, o, n)					     \
-	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	     \
-					(unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n)						\
+	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	\
+				       (unsigned long)(n),		\
+				       sizeof(*(ptr))))
+#define sync_cmpxchg(ptr, o, n)						\
+	((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),	\
+					    (unsigned long)(n),		\
+					    sizeof(*(ptr))))
+#define cmpxchg_local(ptr, o, n)					\
+	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
+					     (unsigned long)(n),	\
+					     sizeof(*(ptr))))
 #endif
 
 #ifdef CONFIG_X86_CMPXCHG64
-#define cmpxchg64(ptr, o, n)						      \
-	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o),      \
-					(unsigned long long)(n)))
-#define cmpxchg64_local(ptr, o, n)					      \
-	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\
-					(unsigned long long)(n)))
+#define cmpxchg64(ptr, o, n)						\
+	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
+					 (unsigned long long)(n)))
+#define cmpxchg64_local(ptr, o, n)					\
+	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
+					       (unsigned long long)(n)))
 #endif
 
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -131,22 +139,22 @@
 	unsigned long prev;
 	switch (size) {
 	case 1:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+			     : "=a"(prev)
+			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 2:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 4:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	}
 	return old;
@@ -158,85 +166,88 @@
  * isn't.
  */
 static inline unsigned long __sync_cmpxchg(volatile void *ptr,
-					    unsigned long old,
-					    unsigned long new, int size)
+					   unsigned long old,
+					   unsigned long new, int size)
 {
 	unsigned long prev;
 	switch (size) {
 	case 1:
-		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("lock; cmpxchgb %b1,%2"
+			     : "=a"(prev)
+			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 2:
-		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("lock; cmpxchgw %w1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 4:
-		__asm__ __volatile__("lock; cmpxchgl %1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("lock; cmpxchgl %1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	}
 	return old;
 }
 
 static inline unsigned long __cmpxchg_local(volatile void *ptr,
-			unsigned long old, unsigned long new, int size)
+					    unsigned long old,
+					    unsigned long new, int size)
 {
 	unsigned long prev;
 	switch (size) {
 	case 1:
-		__asm__ __volatile__("cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgb %b1,%2"
+			     : "=a"(prev)
+			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 2:
-		__asm__ __volatile__("cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgw %w1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 4:
-		__asm__ __volatile__("cmpxchgl %1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgl %1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	}
 	return old;
 }
 
 static inline unsigned long long __cmpxchg64(volatile void *ptr,
-			unsigned long long old, unsigned long long new)
+					     unsigned long long old,
+					     unsigned long long new)
 {
 	unsigned long long prev;
-	__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
-			     : "=A"(prev)
-			     : "b"((unsigned long)new),
-			       "c"((unsigned long)(new >> 32)),
-			       "m"(*__xg(ptr)),
-			       "0"(old)
-			     : "memory");
+	asm volatile(LOCK_PREFIX "cmpxchg8b %3"
+		     : "=A"(prev)
+		     : "b"((unsigned long)new),
+		       "c"((unsigned long)(new >> 32)),
+		       "m"(*__xg(ptr)),
+		       "0"(old)
+		     : "memory");
 	return prev;
 }
 
 static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
-			unsigned long long old, unsigned long long new)
+						   unsigned long long old,
+						   unsigned long long new)
 {
 	unsigned long long prev;
-	__asm__ __volatile__("cmpxchg8b %3"
-			     : "=A"(prev)
-			     : "b"((unsigned long)new),
-			       "c"((unsigned long)(new >> 32)),
-			       "m"(*__xg(ptr)),
-			       "0"(old)
-			     : "memory");
+	asm volatile("cmpxchg8b %3"
+		     : "=A"(prev)
+		     : "b"((unsigned long)new),
+		       "c"((unsigned long)(new >> 32)),
+		       "m"(*__xg(ptr)),
+		       "0"(old)
+		     : "memory");
 	return prev;
 }
 
@@ -252,7 +263,7 @@
 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
 
 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
-				      unsigned long new, int size)
+					unsigned long new, int size)
 {
 	switch (size) {
 	case 1:
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
index 56f5b41..d9b26b9 100644
--- a/include/asm-x86/cmpxchg_64.h
+++ b/include/asm-x86/cmpxchg_64.h
@@ -3,7 +3,8 @@
 
 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
 
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
+						 (ptr), sizeof(*(ptr))))
 
 #define __xg(x) ((volatile long *)(x))
 
@@ -19,33 +20,34 @@
  * Note 2: xchg has side effect, so that attribute volatile is necessary,
  *	  but generally the primitive is invalid, *ptr is output argument. --ANK
  */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+				   int size)
 {
 	switch (size) {
-		case 1:
-			__asm__ __volatile__("xchgb %b0,%1"
-				:"=q" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
-		case 2:
-			__asm__ __volatile__("xchgw %w0,%1"
-				:"=r" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
-		case 4:
-			__asm__ __volatile__("xchgl %k0,%1"
-				:"=r" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
-		case 8:
-			__asm__ __volatile__("xchgq %0,%1"
-				:"=r" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
+	case 1:
+		asm volatile("xchgb %b0,%1"
+			     : "=q" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
+	case 2:
+		asm volatile("xchgw %w0,%1"
+			     : "=r" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
+	case 4:
+		asm volatile("xchgl %k0,%1"
+			     : "=r" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
+	case 8:
+		asm volatile("xchgq %0,%1"
+			     : "=r" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
 	}
 	return x;
 }
@@ -64,61 +66,62 @@
 	unsigned long prev;
 	switch (size) {
 	case 1:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+			     : "=a"(prev)
+			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 2:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 4:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 8:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	}
 	return old;
 }
 
 static inline unsigned long __cmpxchg_local(volatile void *ptr,
-			unsigned long old, unsigned long new, int size)
+					    unsigned long old,
+					    unsigned long new, int size)
 {
 	unsigned long prev;
 	switch (size) {
 	case 1:
-		__asm__ __volatile__("cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgb %b1,%2"
+			     : "=a"(prev)
+			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 2:
-		__asm__ __volatile__("cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgw %w1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 4:
-		__asm__ __volatile__("cmpxchgl %k1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgl %k1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 8:
-		__asm__ __volatile__("cmpxchgq %1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgq %1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	}
 	return old;
@@ -126,19 +129,20 @@
 
 #define cmpxchg(ptr, o, n)						\
 	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	\
-					(unsigned long)(n), sizeof(*(ptr))))
+				       (unsigned long)(n), sizeof(*(ptr))))
 #define cmpxchg64(ptr, o, n)						\
-  ({									\
+({									\
 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
 	cmpxchg((ptr), (o), (n));					\
-  })
+})
 #define cmpxchg_local(ptr, o, n)					\
 	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
-					(unsigned long)(n), sizeof(*(ptr))))
+					     (unsigned long)(n),	\
+					     sizeof(*(ptr))))
 #define cmpxchg64_local(ptr, o, n)					\
-  ({									\
+({									\
 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
 	cmpxchg_local((ptr), (o), (n));					\
-  })
+})
 
 #endif
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h
index d3e8f3e..1793ac3 100644
--- a/include/asm-x86/compat.h
+++ b/include/asm-x86/compat.h
@@ -204,7 +204,7 @@
 	return (u32)(unsigned long)uptr;
 }
 
-static __inline__ void __user *compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	return (void __user *)regs->sp - len;
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 065e929..0d609c8 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -120,6 +120,9 @@
 extern const char * const x86_cap_flags[NCAPINTS*32];
 extern const char * const x86_power_flags[32];
 
+#define test_cpu_cap(c, bit)						\
+	 test_bit(bit, (unsigned long *)((c)->x86_capability))
+
 #define cpu_has(c, bit)							\
 	(__builtin_constant_p(bit) &&					\
 	 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) ||	\
@@ -131,7 +134,8 @@
 	   (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) ||	\
 	   (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) )	\
 	  ? 1 :								\
-	 test_bit(bit, (unsigned long *)((c)->x86_capability)))
+	 test_cpu_cap(c, bit))
+
 #define boot_cpu_has(bit)	cpu_has(&boot_cpu_data, bit)
 
 #define set_cpu_cap(c, bit)	set_bit(bit, (unsigned long *)((c)->x86_capability))
@@ -181,6 +185,8 @@
 #define cpu_has_clflush		boot_cpu_has(X86_FEATURE_CLFLSH)
 #define cpu_has_bts		boot_cpu_has(X86_FEATURE_BTS)
 #define cpu_has_gbpages		boot_cpu_has(X86_FEATURE_GBPAGES)
+#define cpu_has_arch_perfmon	boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
+#define cpu_has_pat		boot_cpu_has(X86_FEATURE_PAT)
 
 #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
 # define cpu_has_invlpg		1
diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h
index d352485..5af9bdb 100644
--- a/include/asm-x86/current_32.h
+++ b/include/asm-x86/current_32.h
@@ -11,7 +11,7 @@
 {
 	return x86_read_percpu(current_task);
 }
- 
+
 #define current get_current()
 
 #endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h
index bc8adec..2d368ed 100644
--- a/include/asm-x86/current_64.h
+++ b/include/asm-x86/current_64.h
@@ -1,23 +1,23 @@
 #ifndef _X86_64_CURRENT_H
 #define _X86_64_CURRENT_H
 
-#if !defined(__ASSEMBLY__) 
+#if !defined(__ASSEMBLY__)
 struct task_struct;
 
 #include <asm/pda.h>
 
-static inline struct task_struct *get_current(void) 
-{ 
-	struct task_struct *t = read_pda(pcurrent); 
+static inline struct task_struct *get_current(void)
+{
+	struct task_struct *t = read_pda(pcurrent);
 	return t;
-} 
+}
 
 #define current get_current()
 
 #else
 
 #ifndef ASM_OFFSET_H
-#include <asm/asm-offsets.h> 
+#include <asm/asm-offsets.h>
 #endif
 
 #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index 5b6a05d..268a012 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -62,8 +62,8 @@
 }
 
 static inline void pack_gate(gate_desc *gate, unsigned char type,
-       unsigned long base, unsigned dpl, unsigned flags, unsigned short seg)
-
+			     unsigned long base, unsigned dpl, unsigned flags,
+			     unsigned short seg)
 {
 	gate->a = (seg << 16) | (base & 0xffff);
 	gate->b = (base & 0xffff0000) |
@@ -84,22 +84,23 @@
 #define load_TR_desc() native_load_tr_desc()
 #define load_gdt(dtr) native_load_gdt(dtr)
 #define load_idt(dtr) native_load_idt(dtr)
-#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
-#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
+#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
+#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
 
 #define store_gdt(dtr) native_store_gdt(dtr)
 #define store_idt(dtr) native_store_idt(dtr)
 #define store_tr(tr) (tr = native_store_tr())
-#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
+#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))
 
 #define load_TLS(t, cpu) native_load_tls(t, cpu)
 #define set_ldt native_set_ldt
 
-#define write_ldt_entry(dt, entry, desc) \
-				native_write_ldt_entry(dt, entry, desc)
-#define write_gdt_entry(dt, entry, desc, type) \
-				native_write_gdt_entry(dt, entry, desc, type)
-#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
+#define write_ldt_entry(dt, entry, desc)	\
+	native_write_ldt_entry(dt, entry, desc)
+#define write_gdt_entry(dt, entry, desc, type)		\
+	native_write_gdt_entry(dt, entry, desc, type)
+#define write_idt_entry(dt, entry, g)		\
+	native_write_idt_entry(dt, entry, g)
 #endif
 
 static inline void native_write_idt_entry(gate_desc *idt, int entry,
@@ -138,8 +139,8 @@
 {
 	desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
 	desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
-		  (limit & 0x000f0000) | ((type & 0xff) << 8) |
-		  ((flags & 0xf) << 20);
+		(limit & 0x000f0000) | ((type & 0xff) << 8) |
+		((flags & 0xf) << 20);
 	desc->p = 1;
 }
 
@@ -159,7 +160,6 @@
 	desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
 	desc->base3 = PTR_HIGH(addr);
 #else
-
 	pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
 #endif
 }
@@ -177,7 +177,8 @@
 	 * last valid byte
 	 */
 	set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
-		IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
+			      IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
+			      sizeof(unsigned long) - 1);
 	write_gdt_entry(d, entry, &tss, DESC_TSS);
 }
 
@@ -186,7 +187,7 @@
 static inline void native_set_ldt(const void *addr, unsigned int entries)
 {
 	if (likely(entries == 0))
-		__asm__ __volatile__("lldt %w0"::"q" (0));
+		asm volatile("lldt %w0"::"q" (0));
 	else {
 		unsigned cpu = smp_processor_id();
 		ldt_desc ldt;
@@ -195,7 +196,7 @@
 				      DESC_LDT, entries * sizeof(ldt) - 1);
 		write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
 				&ldt, DESC_LDT);
-		__asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
+		asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
 	}
 }
 
@@ -240,15 +241,15 @@
 		gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
 }
 
-#define _LDT_empty(info) (\
-	(info)->base_addr	== 0	&& \
-	(info)->limit		== 0	&& \
-	(info)->contents	== 0	&& \
-	(info)->read_exec_only	== 1	&& \
-	(info)->seg_32bit	== 0	&& \
-	(info)->limit_in_pages	== 0	&& \
-	(info)->seg_not_present	== 1	&& \
-	(info)->useable		== 0)
+#define _LDT_empty(info)				\
+	((info)->base_addr		== 0	&&	\
+	 (info)->limit			== 0	&&	\
+	 (info)->contents		== 0	&&	\
+	 (info)->read_exec_only		== 1	&&	\
+	 (info)->seg_32bit		== 0	&&	\
+	 (info)->limit_in_pages		== 0	&&	\
+	 (info)->seg_not_present	== 1	&&	\
+	 (info)->useable		== 0)
 
 #ifdef CONFIG_X86_64
 #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
@@ -287,7 +288,7 @@
 }
 
 static inline void _set_gate(int gate, unsigned type, void *addr,
-			      unsigned dpl, unsigned ist, unsigned seg)
+			     unsigned dpl, unsigned ist, unsigned seg)
 {
 	gate_desc s;
 	pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
@@ -370,10 +371,10 @@
  *    Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
  */
 #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
-	movb idx*8+4(gdt), lo_b; \
-	movb idx*8+7(gdt), hi_b; \
-	shll $16, base; \
-	movw idx*8+2(gdt), lo_w;
+	movb idx * 8 + 4(gdt), lo_b;			\
+	movb idx * 8 + 7(gdt), hi_b;			\
+	shll $16, base;					\
+	movw idx * 8 + 2(gdt), lo_w;
 
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h
index e33f078..eccb4ea 100644
--- a/include/asm-x86/desc_defs.h
+++ b/include/asm-x86/desc_defs.h
@@ -18,17 +18,19 @@
  * incrementally. We keep the signature as a struct, rather than an union,
  * so we can get rid of it transparently in the future -- glommer
  */
-// 8 byte segment descriptor
+/* 8 byte segment descriptor */
 struct desc_struct {
 	union {
-		struct { unsigned int a, b; };
+		struct {
+			unsigned int a;
+			unsigned int b;
+		};
 		struct {
 			u16 limit0;
 			u16 base0;
 			unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
 			unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
 		};
-
 	};
 } __attribute__((packed));
 
@@ -39,7 +41,7 @@
 	GATE_TASK = 0x5,
 };
 
-// 16byte gate
+/* 16byte gate */
 struct gate_struct64 {
 	u16 offset_low;
 	u16 segment;
@@ -56,10 +58,10 @@
 enum {
 	DESC_TSS = 0x9,
 	DESC_LDT = 0x2,
-	DESCTYPE_S =	0x10,	/* !system */
+	DESCTYPE_S = 0x10,	/* !system */
 };
 
-// LDT or TSS descriptor in the GDT. 16 bytes.
+/* LDT or TSS descriptor in the GDT. 16 bytes. */
 struct ldttss_desc64 {
 	u16 limit0;
 	u16 base0;
@@ -84,7 +86,6 @@
 	unsigned long address;
 } __attribute__((packed)) ;
 
-
 #endif /* !__ASSEMBLY__ */
 
 #endif
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h
index e98d16e..0dbf8bf 100644
--- a/include/asm-x86/div64.h
+++ b/include/asm-x86/div64.h
@@ -17,18 +17,20 @@
  * This ends up being the most efficient "calling
  * convention" on x86.
  */
-#define do_div(n,base) ({ \
-	unsigned long __upper, __low, __high, __mod, __base; \
-	__base = (base); \
-	asm("":"=a" (__low), "=d" (__high):"A" (n)); \
-	__upper = __high; \
-	if (__high) { \
-		__upper = __high % (__base); \
-		__high = __high / (__base); \
-	} \
-	asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
-	asm("":"=A" (n):"a" (__low),"d" (__high)); \
-	__mod; \
+#define do_div(n, base)						\
+({								\
+	unsigned long __upper, __low, __high, __mod, __base;	\
+	__base = (base);					\
+	asm("":"=a" (__low), "=d" (__high) : "A" (n));		\
+	__upper = __high;					\
+	if (__high) {						\
+		__upper = __high % (__base);			\
+		__high = __high / (__base);			\
+	}							\
+	asm("divl %2":"=a" (__low), "=d" (__mod)		\
+	    : "rm" (__base), "0" (__low), "1" (__upper));	\
+	asm("":"=A" (n) : "a" (__low), "d" (__high));		\
+	__mod;							\
 })
 
 /*
@@ -37,14 +39,13 @@
  *
  * Warning, this will do an exception if X overflows.
  */
-#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
+#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c)
 
-static inline long
-div_ll_X_l_rem(long long divs, long div, long *rem)
+static inline long div_ll_X_l_rem(long long divs, long div, long *rem)
 {
 	long dum2;
-      __asm__("divl %2":"=a"(dum2), "=d"(*rem)
-      :	"rm"(div), "A"(divs));
+	asm("divl %2":"=a"(dum2), "=d"(*rem)
+	    : "rm"(div), "A"(divs));
 
 	return dum2;
 
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h
index e9733ce..ca1098a 100644
--- a/include/asm-x86/dma.h
+++ b/include/asm-x86/dma.h
@@ -12,7 +12,6 @@
 #include <asm/io.h>		/* need byte IO */
 #include <linux/delay.h>
 
-
 #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
 #define dma_outb	outb_p
 #else
@@ -74,15 +73,15 @@
 #ifdef CONFIG_X86_32
 
 /* The maximum address that we can perform a DMA transfer to on this platform */
-#define MAX_DMA_ADDRESS      (PAGE_OFFSET+0x1000000)
+#define MAX_DMA_ADDRESS      (PAGE_OFFSET + 0x1000000)
 
 #else
 
 /* 16MB ISA DMA zone */
-#define MAX_DMA_PFN   ((16*1024*1024) >> PAGE_SHIFT)
+#define MAX_DMA_PFN   ((16 * 1024 * 1024) >> PAGE_SHIFT)
 
 /* 4GB broken PCI/AGP hardware bus master zone */
-#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
+#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
 
 /* Compat define for old dma zone */
 #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
@@ -154,20 +153,20 @@
 
 extern spinlock_t  dma_spin_lock;
 
-static __inline__ unsigned long claim_dma_lock(void)
+static inline unsigned long claim_dma_lock(void)
 {
 	unsigned long flags;
 	spin_lock_irqsave(&dma_spin_lock, flags);
 	return flags;
 }
 
-static __inline__ void release_dma_lock(unsigned long flags)
+static inline void release_dma_lock(unsigned long flags)
 {
 	spin_unlock_irqrestore(&dma_spin_lock, flags);
 }
 
 /* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
+static inline void enable_dma(unsigned int dmanr)
 {
 	if (dmanr <= 3)
 		dma_outb(dmanr, DMA1_MASK_REG);
@@ -175,7 +174,7 @@
 		dma_outb(dmanr & 3, DMA2_MASK_REG);
 }
 
-static __inline__ void disable_dma(unsigned int dmanr)
+static inline void disable_dma(unsigned int dmanr)
 {
 	if (dmanr <= 3)
 		dma_outb(dmanr | 4, DMA1_MASK_REG);
@@ -190,7 +189,7 @@
  * --- In order to do that, the DMA routines below should ---
  * --- only be used while holding the DMA lock ! ---
  */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
+static inline void clear_dma_ff(unsigned int dmanr)
 {
 	if (dmanr <= 3)
 		dma_outb(0, DMA1_CLEAR_FF_REG);
@@ -199,7 +198,7 @@
 }
 
 /* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+static inline void set_dma_mode(unsigned int dmanr, char mode)
 {
 	if (dmanr <= 3)
 		dma_outb(mode | dmanr, DMA1_MODE_REG);
@@ -212,7 +211,7 @@
  * the lower 16 bits of the DMA current address register, but a 64k boundary
  * may have been crossed.
  */
-static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+static inline void set_dma_page(unsigned int dmanr, char pagenr)
 {
 	switch (dmanr) {
 	case 0:
@@ -243,15 +242,15 @@
 /* Set transfer address & page bits for specific DMA channel.
  * Assumes dma flipflop is clear.
  */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
 {
 	set_dma_page(dmanr, a>>16);
 	if (dmanr <= 3)  {
 		dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
 		dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
 	}  else  {
-	    dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
-	    dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+		dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+		dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
 	}
 }
 
@@ -264,18 +263,18 @@
  * Assumes dma flip-flop is clear.
  * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
  */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+static inline void set_dma_count(unsigned int dmanr, unsigned int count)
 {
 	count--;
 	if (dmanr <= 3)  {
-	    dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
-	    dma_outb((count >> 8) & 0xff,
-		     ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+		dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+		dma_outb((count >> 8) & 0xff,
+			 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
 	} else {
-	    dma_outb((count >> 1) & 0xff,
-		     ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
-	    dma_outb((count >> 9) & 0xff,
-		     ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+		dma_outb((count >> 1) & 0xff,
+			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+		dma_outb((count >> 9) & 0xff,
+			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
 	}
 }
 
@@ -288,7 +287,7 @@
  *
  * Assumes DMA flip-flop is clear.
  */
-static __inline__ int get_dma_residue(unsigned int dmanr)
+static inline int get_dma_residue(unsigned int dmanr)
 {
 	unsigned int io_port;
 	/* using short to get 16-bit wrap around */
diff --git a/include/asm-x86/dwarf2_64.h b/include/asm-x86/dwarf2_64.h
index eedc085..c950519 100644
--- a/include/asm-x86/dwarf2_64.h
+++ b/include/asm-x86/dwarf2_64.h
@@ -1,16 +1,15 @@
 #ifndef _DWARF2_H
 #define _DWARF2_H 1
 
-
 #ifndef __ASSEMBLY__
 #warning "asm/dwarf2.h should be only included in pure assembly files"
 #endif
 
-/* 
+/*
    Macros for dwarf2 CFI unwind table entries.
-   See "as.info" for details on these pseudo ops. Unfortunately 
-   they are only supported in very new binutils, so define them 
-   away for older version. 
+   See "as.info" for details on these pseudo ops. Unfortunately
+   they are only supported in very new binutils, so define them
+   away for older version.
  */
 
 #ifdef CONFIG_AS_CFI
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h
index e7207a6..43b1a8b 100644
--- a/include/asm-x86/e820_32.h
+++ b/include/asm-x86/e820_32.h
@@ -34,8 +34,8 @@
 extern void limit_regions(unsigned long long size);
 extern void print_memory_map(char *who);
 extern void init_iomem_resources(struct resource *code_resource,
-			    struct resource *data_resource,
-			    struct resource *bss_resource);
+				 struct resource *data_resource,
+				 struct resource *bss_resource);
 
 #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
 extern void e820_mark_nosave_regions(void);
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h
index 22ede73..f478c57 100644
--- a/include/asm-x86/e820_64.h
+++ b/include/asm-x86/e820_64.h
@@ -14,20 +14,24 @@
 #include <linux/ioport.h>
 
 #ifndef __ASSEMBLY__
-extern unsigned long find_e820_area(unsigned long start, unsigned long end, 
-				    unsigned size, unsigned long align);
-extern void add_memory_region(unsigned long start, unsigned long size, 
+extern unsigned long find_e820_area(unsigned long start, unsigned long end,
+				    unsigned long size, unsigned long align);
+extern unsigned long find_e820_area_size(unsigned long start,
+					 unsigned long *sizep,
+					 unsigned long align);
+extern void add_memory_region(unsigned long start, unsigned long size,
 			      int type);
 extern void update_memory_range(u64 start, u64 size, unsigned old_type,
 				unsigned new_type);
 extern void setup_memory_region(void);
-extern void contig_e820_setup(void); 
+extern void contig_e820_setup(void);
 extern unsigned long e820_end_of_ram(void);
-extern void e820_reserve_resources(struct resource *code_resource,
-		struct resource *data_resource, struct resource *bss_resource);
+extern void e820_reserve_resources(void);
 extern void e820_mark_nosave_regions(void);
-extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
-extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
+extern int e820_any_mapped(unsigned long start, unsigned long end,
+			   unsigned type);
+extern int e820_all_mapped(unsigned long start, unsigned long end,
+			   unsigned type);
 extern int e820_any_non_reserved(unsigned long start, unsigned long end);
 extern int is_memory_any_valid(unsigned long start, unsigned long end);
 extern int e820_all_non_reserved(unsigned long start, unsigned long end);
@@ -35,8 +39,8 @@
 extern unsigned long e820_hole_size(unsigned long start, unsigned long end);
 
 extern void e820_setup_gap(void);
-extern void e820_register_active_regions(int nid,
-				unsigned long start_pfn, unsigned long end_pfn);
+extern void e820_register_active_regions(int nid, unsigned long start_pfn,
+					 unsigned long end_pfn);
 
 extern void finish_e820_parsing(void);
 
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h
index cf3200a..a8088f6 100644
--- a/include/asm-x86/edac.h
+++ b/include/asm-x86/edac.h
@@ -3,7 +3,7 @@
 
 /* ECC atomic, DMA, SMP and interrupt safe scrub function */
 
-static __inline__ void atomic_scrub(void *va, u32 size)
+static inline void atomic_scrub(void *va, u32 size)
 {
 	u32 i, *virt_addr = va;
 
@@ -12,7 +12,7 @@
 	 * are interrupt, DMA and SMP safe.
 	 */
 	for (i = 0; i < size / 4; i++, virt_addr++)
-		__asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+		asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
 }
 
 #endif
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index ea9734b..d53004b 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -20,7 +20,7 @@
  */
 
 #define efi_call_virt(f, args...) \
-     ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
+	((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
 
 #define efi_call_virt0(f)		efi_call_virt(f)
 #define efi_call_virt1(f, a1)		efi_call_virt(f, a1)
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index fb62f99..8f232dc 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -11,7 +11,7 @@
 
 typedef unsigned long elf_greg_t;
 
-#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
 typedef struct user_i387_struct elf_fpregset_t;
@@ -82,8 +82,9 @@
 #define elf_check_arch_ia32(x) \
 	(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
 
-#ifdef CONFIG_X86_32
 #include <asm/processor.h>
+
+#ifdef CONFIG_X86_32
 #include <asm/system.h>		/* for savesegment */
 #include <asm/desc.h>
 
@@ -99,10 +100,11 @@
    We might as well make sure everything else is cleared too (except for %esp),
    just to make things more deterministic.
  */
-#define ELF_PLAT_INIT(_r, load_addr)	do { \
-	_r->bx = 0; _r->cx = 0; _r->dx = 0; \
-	_r->si = 0; _r->di = 0; _r->bp = 0; \
-	_r->ax = 0; \
+#define ELF_PLAT_INIT(_r, load_addr)		\
+	do {					\
+	_r->bx = 0; _r->cx = 0; _r->dx = 0;	\
+	_r->si = 0; _r->di = 0; _r->bp = 0;	\
+	_r->ax = 0;				\
 } while (0)
 
 /*
@@ -110,24 +112,25 @@
  * now struct_user_regs, they are different)
  */
 
-#define ELF_CORE_COPY_REGS(pr_reg, regs) do {		\
-	pr_reg[0] = regs->bx;				\
-	pr_reg[1] = regs->cx;				\
-	pr_reg[2] = regs->dx;				\
-	pr_reg[3] = regs->si;				\
-	pr_reg[4] = regs->di;				\
-	pr_reg[5] = regs->bp;				\
-	pr_reg[6] = regs->ax;				\
-	pr_reg[7] = regs->ds & 0xffff;			\
-	pr_reg[8] = regs->es & 0xffff;			\
-	pr_reg[9] = regs->fs & 0xffff;			\
-	savesegment(gs, pr_reg[10]);			\
-	pr_reg[11] = regs->orig_ax;			\
-	pr_reg[12] = regs->ip;				\
-	pr_reg[13] = regs->cs & 0xffff;			\
-	pr_reg[14] = regs->flags;			\
-	pr_reg[15] = regs->sp;				\
-	pr_reg[16] = regs->ss & 0xffff;			\
+#define ELF_CORE_COPY_REGS(pr_reg, regs)	\
+do {						\
+	pr_reg[0] = regs->bx;			\
+	pr_reg[1] = regs->cx;			\
+	pr_reg[2] = regs->dx;			\
+	pr_reg[3] = regs->si;			\
+	pr_reg[4] = regs->di;			\
+	pr_reg[5] = regs->bp;			\
+	pr_reg[6] = regs->ax;			\
+	pr_reg[7] = regs->ds & 0xffff;		\
+	pr_reg[8] = regs->es & 0xffff;		\
+	pr_reg[9] = regs->fs & 0xffff;		\
+	savesegment(gs, pr_reg[10]);		\
+	pr_reg[11] = regs->orig_ax;		\
+	pr_reg[12] = regs->ip;			\
+	pr_reg[13] = regs->cs & 0xffff;		\
+	pr_reg[14] = regs->flags;		\
+	pr_reg[15] = regs->sp;			\
+	pr_reg[16] = regs->ss & 0xffff;		\
 } while (0);
 
 #define ELF_PLATFORM	(utsname()->machine)
@@ -135,12 +138,10 @@
 
 #else /* CONFIG_X86_32 */
 
-#include <asm/processor.h>
-
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
-#define elf_check_arch(x) \
+#define elf_check_arch(x)			\
 	((x)->e_machine == EM_X86_64)
 
 #define compat_elf_check_arch(x)	elf_check_arch_ia32(x)
@@ -169,24 +170,30 @@
 	t->ds = t->es = ds;
 }
 
-#define ELF_PLAT_INIT(_r, load_addr)	do {		  \
-	elf_common_init(&current->thread, _r, 0);	  \
-	clear_thread_flag(TIF_IA32);			  \
+#define ELF_PLAT_INIT(_r, load_addr)			\
+do {							\
+	elf_common_init(&current->thread, _r, 0);	\
+	clear_thread_flag(TIF_IA32);			\
 } while (0)
 
-#define	COMPAT_ELF_PLAT_INIT(regs, load_addr)	\
+#define	COMPAT_ELF_PLAT_INIT(regs, load_addr)		\
 	elf_common_init(&current->thread, regs, __USER_DS)
-#define	compat_start_thread(regs, ip, sp)	do {		\
-		start_ia32_thread(regs, ip, sp);		\
-		set_fs(USER_DS);				\
-	} while (0)
-#define COMPAT_SET_PERSONALITY(ex, ibcs2)	do {		\
-		if (test_thread_flag(TIF_IA32))			\
-			clear_thread_flag(TIF_ABI_PENDING);	\
-		else						\
-			set_thread_flag(TIF_ABI_PENDING);	\
-		current->personality |= force_personality32;	\
-	} while (0)
+
+#define	compat_start_thread(regs, ip, sp)		\
+do {							\
+	start_ia32_thread(regs, ip, sp);		\
+	set_fs(USER_DS);				\
+} while (0)
+
+#define COMPAT_SET_PERSONALITY(ex, ibcs2)		\
+do {							\
+	if (test_thread_flag(TIF_IA32))			\
+		clear_thread_flag(TIF_ABI_PENDING);	\
+	else						\
+		set_thread_flag(TIF_ABI_PENDING);	\
+	current->personality |= force_personality32;	\
+} while (0)
+
 #define COMPAT_ELF_PLATFORM			("i686")
 
 /*
@@ -195,7 +202,8 @@
  * getting dumped.
  */
 
-#define ELF_CORE_COPY_REGS(pr_reg, regs)  do {			\
+#define ELF_CORE_COPY_REGS(pr_reg, regs)			\
+do {								\
 	unsigned v;						\
 	(pr_reg)[0] = (regs)->r15;				\
 	(pr_reg)[1] = (regs)->r14;				\
@@ -269,10 +277,12 @@
 
 struct task_struct;
 
-#define	ARCH_DLINFO_IA32(vdso_enabled) \
-do if (vdso_enabled) {							\
+#define	ARCH_DLINFO_IA32(vdso_enabled)					\
+do {									\
+	if (vdso_enabled) {						\
 		NEW_AUX_ENT(AT_SYSINFO,	VDSO_ENTRY);			\
 		NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);	\
+	}								\
 } while (0)
 
 #ifdef CONFIG_X86_32
@@ -290,9 +300,11 @@
 /* 1GB for 64bit, 8MB for 32bit */
 #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
 
-#define ARCH_DLINFO						\
-do if (vdso_enabled) {						\
-	NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
+#define ARCH_DLINFO							\
+do {									\
+	if (vdso_enabled)						\
+		NEW_AUX_ENT(AT_SYSINFO_EHDR,				\
+			    (unsigned long)current->mm->context.vdso);	\
 } while (0)
 
 #define AT_SYSINFO		32
@@ -305,8 +317,8 @@
 
 #define VDSO_CURRENT_BASE	((unsigned long)current->mm->context.vdso)
 
-#define VDSO_ENTRY \
-	((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+#define VDSO_ENTRY							\
+	((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
 
 struct linux_binprm;
 
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index a7404d5..eb16651 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -99,8 +99,7 @@
 	 */
 #define NR_FIX_BTMAPS		64
 #define FIX_BTMAPS_NESTING	4
-	FIX_BTMAP_END =
-		__end_of_permanent_fixed_addresses + 512 -
+	FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 -
 			(__end_of_permanent_fixed_addresses & 511),
 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
 	FIX_WP_TEST,
@@ -110,20 +109,20 @@
 	__end_of_fixed_addresses
 };
 
-extern void __set_fixmap (enum fixed_addresses idx,
-					unsigned long phys, pgprot_t flags);
+extern void __set_fixmap(enum fixed_addresses idx,
+			 unsigned long phys, pgprot_t flags);
 extern void reserve_top_address(unsigned long reserve);
 
-#define set_fixmap(idx, phys) \
-		__set_fixmap(idx, phys, PAGE_KERNEL)
+#define set_fixmap(idx, phys)				\
+	__set_fixmap(idx, phys, PAGE_KERNEL)
 /*
  * Some hardware wants to get fixmapped without caching.
  */
-#define set_fixmap_nocache(idx, phys) \
-		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+#define set_fixmap_nocache(idx, phys)			\
+	__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
 
-#define clear_fixmap(idx) \
-		__set_fixmap(idx, 0, __pgprot(0))
+#define clear_fixmap(idx)			\
+	__set_fixmap(idx, 0, __pgprot(0))
 
 #define FIXADDR_TOP	((unsigned long)__FIXADDR_TOP)
 
@@ -156,7 +155,7 @@
 	if (idx >= __end_of_fixed_addresses)
 		__this_fixmap_does_not_exist();
 
-        return __fix_to_virt(idx);
+	return __fix_to_virt(idx);
 }
 
 static inline unsigned long virt_to_fix(const unsigned long vaddr)
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h
index 70ddb21..f3d7685 100644
--- a/include/asm-x86/fixmap_64.h
+++ b/include/asm-x86/fixmap_64.h
@@ -34,32 +34,34 @@
 
 enum fixed_addresses {
 	VSYSCALL_LAST_PAGE,
-	VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
+	VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+			    + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
 	VSYSCALL_HPET,
 	FIX_DBGP_BASE,
 	FIX_EARLYCON_MEM_BASE,
 	FIX_HPET_BASE,
 	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
 	FIX_IO_APIC_BASE_0,
-	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
 	FIX_EFI_IO_MAP_LAST_PAGE,
-	FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE+MAX_EFI_IO_PAGES-1,
+	FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
+				  + MAX_EFI_IO_PAGES - 1,
 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
 	FIX_OHCI1394_BASE,
 #endif
 	__end_of_fixed_addresses
 };
 
-extern void __set_fixmap (enum fixed_addresses idx,
-					unsigned long phys, pgprot_t flags);
+extern void __set_fixmap(enum fixed_addresses idx,
+			 unsigned long phys, pgprot_t flags);
 
-#define set_fixmap(idx, phys) \
-		__set_fixmap(idx, phys, PAGE_KERNEL)
+#define set_fixmap(idx, phys)			\
+	__set_fixmap(idx, phys, PAGE_KERNEL)
 /*
  * Some hardware wants to get fixmapped without caching.
  */
-#define set_fixmap_nocache(idx, phys) \
-		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+#define set_fixmap_nocache(idx, phys)			\
+	__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
 
 #define FIXADDR_TOP	(VSYSCALL_END-PAGE_SIZE)
 #define FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h
index a48d715..dbe82a5 100644
--- a/include/asm-x86/floppy.h
+++ b/include/asm-x86/floppy.h
@@ -20,20 +20,21 @@
  * driver otherwise. It doesn't matter much for performance anyway, as most
  * floppy accesses go through the track buffer.
  */
-#define _CROSS_64KB(a,s,vdma) \
-(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+#define _CROSS_64KB(a, s, vdma)						\
+	(!(vdma) &&							\
+	 ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
 
-#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
+#define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1)
 
 
-#define SW fd_routine[use_virtual_dma&1]
+#define SW fd_routine[use_virtual_dma & 1]
 #define CSW fd_routine[can_use_virtual_dma & 1]
 
 
 #define fd_inb(port)		inb_p(port)
-#define fd_outb(value,port)	outb_p(value,port)
+#define fd_outb(value, port)	outb_p(value, port)
 
-#define fd_request_dma()	CSW._request_dma(FLOPPY_DMA,"floppy")
+#define fd_request_dma()	CSW._request_dma(FLOPPY_DMA, "floppy")
 #define fd_free_dma()		CSW._free_dma(FLOPPY_DMA)
 #define fd_enable_irq()		enable_irq(FLOPPY_IRQ)
 #define fd_disable_irq()	disable_irq(FLOPPY_IRQ)
@@ -52,64 +53,64 @@
 
 static irqreturn_t floppy_hardint(int irq, void *dev_id)
 {
-	register unsigned char st;
+	unsigned char st;
 
 #undef TRACE_FLPY_INT
 
 #ifdef TRACE_FLPY_INT
-	static int calls=0;
-	static int bytes=0;
-	static int dma_wait=0;
+	static int calls;
+	static int bytes;
+	static int dma_wait;
 #endif
 	if (!doing_pdma)
 		return floppy_interrupt(irq, dev_id);
 
 #ifdef TRACE_FLPY_INT
-	if(!calls)
+	if (!calls)
 		bytes = virtual_dma_count;
 #endif
 
 	{
-		register int lcount;
-		register char *lptr;
+		int lcount;
+		char *lptr;
 
 		st = 1;
-		for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
-		    lcount; lcount--, lptr++) {
-			st=inb(virtual_dma_port+4) & 0xa0 ;
-			if(st != 0xa0)
+		for (lcount = virtual_dma_count, lptr = virtual_dma_addr;
+		     lcount; lcount--, lptr++) {
+			st = inb(virtual_dma_port + 4) & 0xa0;
+			if (st != 0xa0)
 				break;
-			if(virtual_dma_mode)
-				outb_p(*lptr, virtual_dma_port+5);
+			if (virtual_dma_mode)
+				outb_p(*lptr, virtual_dma_port + 5);
 			else
-				*lptr = inb_p(virtual_dma_port+5);
+				*lptr = inb_p(virtual_dma_port + 5);
 		}
 		virtual_dma_count = lcount;
 		virtual_dma_addr = lptr;
-		st = inb(virtual_dma_port+4);
+		st = inb(virtual_dma_port + 4);
 	}
 
 #ifdef TRACE_FLPY_INT
 	calls++;
 #endif
-	if(st == 0x20)
+	if (st == 0x20)
 		return IRQ_HANDLED;
-	if(!(st & 0x20)) {
+	if (!(st & 0x20)) {
 		virtual_dma_residue += virtual_dma_count;
-		virtual_dma_count=0;
+		virtual_dma_count = 0;
 #ifdef TRACE_FLPY_INT
 		printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
 		       virtual_dma_count, virtual_dma_residue, calls, bytes,
 		       dma_wait);
 		calls = 0;
-		dma_wait=0;
+		dma_wait = 0;
 #endif
 		doing_pdma = 0;
 		floppy_interrupt(irq, dev_id);
 		return IRQ_HANDLED;
 	}
 #ifdef TRACE_FLPY_INT
-	if(!virtual_dma_count)
+	if (!virtual_dma_count)
 		dma_wait++;
 #endif
 	return IRQ_HANDLED;
@@ -117,14 +118,14 @@
 
 static void fd_disable_dma(void)
 {
-	if(! (can_use_virtual_dma & 1))
+	if (!(can_use_virtual_dma & 1))
 		disable_dma(FLOPPY_DMA);
 	doing_pdma = 0;
 	virtual_dma_residue += virtual_dma_count;
-	virtual_dma_count=0;
+	virtual_dma_count = 0;
 }
 
-static int vdma_request_dma(unsigned int dmanr, const char * device_id)
+static int vdma_request_dma(unsigned int dmanr, const char *device_id)
 {
 	return 0;
 }
@@ -142,7 +143,7 @@
 
 static int fd_request_irq(void)
 {
-	if(can_use_virtual_dma)
+	if (can_use_virtual_dma)
 		return request_irq(FLOPPY_IRQ, floppy_hardint,
 				   IRQF_DISABLED, "floppy", NULL);
 	else
@@ -152,13 +153,13 @@
 
 static unsigned long dma_mem_alloc(unsigned long size)
 {
-	return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size));
+	return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size));
 }
 
 
 static unsigned long vdma_mem_alloc(unsigned long size)
 {
-	return (unsigned long) vmalloc(size);
+	return (unsigned long)vmalloc(size);
 
 }
 
@@ -166,7 +167,7 @@
 
 static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
 {
-	if((unsigned long) addr >= (unsigned long) high_memory)
+	if ((unsigned long)addr >= (unsigned long)high_memory)
 		vfree((void *)addr);
 	else
 		free_pages(addr, get_order(size));
@@ -176,10 +177,10 @@
 
 static void _fd_chose_dma_mode(char *addr, unsigned long size)
 {
-	if(can_use_virtual_dma == 2) {
-		if((unsigned long) addr >= (unsigned long) high_memory ||
-		   isa_virt_to_bus(addr) >= 0x1000000 ||
-		   _CROSS_64KB(addr, size, 0))
+	if (can_use_virtual_dma == 2) {
+		if ((unsigned long)addr >= (unsigned long)high_memory ||
+		    isa_virt_to_bus(addr) >= 0x1000000 ||
+		    _CROSS_64KB(addr, size, 0))
 			use_virtual_dma = 1;
 		else
 			use_virtual_dma = 0;
@@ -195,7 +196,7 @@
 {
 	doing_pdma = 1;
 	virtual_dma_port = io;
-	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
+	virtual_dma_mode = (mode == DMA_MODE_WRITE);
 	virtual_dma_addr = addr;
 	virtual_dma_count = size;
 	virtual_dma_residue = 0;
@@ -213,18 +214,18 @@
 	/* actual, physical DMA */
 	doing_pdma = 0;
 	clear_dma_ff(FLOPPY_DMA);
-	set_dma_mode(FLOPPY_DMA,mode);
-	set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
-	set_dma_count(FLOPPY_DMA,size);
+	set_dma_mode(FLOPPY_DMA, mode);
+	set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr));
+	set_dma_count(FLOPPY_DMA, size);
 	enable_dma(FLOPPY_DMA);
 	return 0;
 }
 
 static struct fd_routine_l {
-	int (*_request_dma)(unsigned int dmanr, const char * device_id);
+	int (*_request_dma)(unsigned int dmanr, const char *device_id);
 	void (*_free_dma)(unsigned int dmanr);
 	int (*_get_dma_residue)(unsigned int dummy);
-	unsigned long (*_dma_mem_alloc) (unsigned long size);
+	unsigned long (*_dma_mem_alloc)(unsigned long size);
 	int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
 } fd_routine[] = {
 	{
@@ -252,7 +253,8 @@
  * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
  * coincides with another rtc CMOS user.		Paul G.
  */
-#define FLOPPY0_TYPE	({				\
+#define FLOPPY0_TYPE					\
+({							\
 	unsigned long flags;				\
 	unsigned char val;				\
 	spin_lock_irqsave(&rtc_lock, flags);		\
@@ -261,7 +263,8 @@
 	val;						\
 })
 
-#define FLOPPY1_TYPE	({				\
+#define FLOPPY1_TYPE					\
+({							\
 	unsigned long flags;				\
 	unsigned char val;				\
 	spin_lock_irqsave(&rtc_lock, flags);		\
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h
index c9952ea..ac0fbf2 100644
--- a/include/asm-x86/futex.h
+++ b/include/asm-x86/futex.h
@@ -12,35 +12,32 @@
 #include <asm/uaccess.h>
 
 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)	\
-  __asm__ __volatile(						\
-"1:	" insn "\n"						\
-"2:	.section .fixup,\"ax\"\n				\
-3:	mov	%3, %1\n					\
-	jmp	2b\n						\
-	.previous\n"						\
-	_ASM_EXTABLE(1b,3b)					\
-	: "=r" (oldval), "=r" (ret), "+m" (*uaddr)		\
-	: "i" (-EFAULT), "0" (oparg), "1" (0))
+	asm volatile("1:\t" insn "\n"				\
+		     "2:\t.section .fixup,\"ax\"\n"		\
+		     "3:\tmov\t%3, %1\n"			\
+		     "\tjmp\t2b\n"				\
+		     "\t.previous\n"				\
+		     _ASM_EXTABLE(1b, 3b)			\
+		     : "=r" (oldval), "=r" (ret), "+m" (*uaddr)	\
+		     : "i" (-EFAULT), "0" (oparg), "1" (0))
 
 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg)	\
-  __asm__ __volatile(						\
-"1:	movl	%2, %0\n					\
-	movl	%0, %3\n"					\
-	insn "\n"						\
-"2:	lock; cmpxchgl %3, %2\n					\
-	jnz	1b\n						\
-3:	.section .fixup,\"ax\"\n				\
-4:	mov	%5, %1\n					\
-	jmp	3b\n						\
-	.previous\n"						\
-	_ASM_EXTABLE(1b,4b)					\
-	_ASM_EXTABLE(2b,4b)					\
-	: "=&a" (oldval), "=&r" (ret), "+m" (*uaddr),		\
-	  "=&r" (tem)						\
-	: "r" (oparg), "i" (-EFAULT), "1" (0))
+	asm volatile("1:\tmovl	%2, %0\n"			\
+		     "\tmovl\t%0, %3\n"				\
+		     "\t" insn "\n"				\
+		     "2:\tlock; cmpxchgl %3, %2\n"		\
+		     "\tjnz\t1b\n"				\
+		     "3:\t.section .fixup,\"ax\"\n"		\
+		     "4:\tmov\t%5, %1\n"			\
+		     "\tjmp\t3b\n"				\
+		     "\t.previous\n"				\
+		     _ASM_EXTABLE(1b, 4b)			\
+		     _ASM_EXTABLE(2b, 4b)			\
+		     : "=&a" (oldval), "=&r" (ret),		\
+		       "+m" (*uaddr), "=&r" (tem)		\
+		     : "r" (oparg), "i" (-EFAULT), "1" (0))
 
-static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
 {
 	int op = (encoded_op >> 28) & 7;
 	int cmp = (encoded_op >> 24) & 15;
@@ -87,20 +84,33 @@
 
 	if (!ret) {
 		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
+		case FUTEX_OP_CMP_EQ:
+			ret = (oldval == cmparg);
+			break;
+		case FUTEX_OP_CMP_NE:
+			ret = (oldval != cmparg);
+			break;
+		case FUTEX_OP_CMP_LT:
+			ret = (oldval < cmparg);
+			break;
+		case FUTEX_OP_CMP_GE:
+			ret = (oldval >= cmparg);
+			break;
+		case FUTEX_OP_CMP_LE:
+			ret = (oldval <= cmparg);
+			break;
+		case FUTEX_OP_CMP_GT:
+			ret = (oldval > cmparg);
+			break;
+		default:
+			ret = -ENOSYS;
 		}
 	}
 	return ret;
 }
 
-static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
+						int newval)
 {
 
 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
@@ -112,16 +122,15 @@
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	__asm__ __volatile__(
-		"1:	lock; cmpxchgl %3, %1			\n"
-		"2:	.section .fixup, \"ax\"			\n"
-		"3:	mov     %2, %0				\n"
-		"	jmp     2b				\n"
-		"	.previous				\n"
-		_ASM_EXTABLE(1b,3b)
-		: "=a" (oldval), "+m" (*uaddr)
-		: "i" (-EFAULT), "r" (newval), "0" (oldval)
-		: "memory"
+	asm volatile("1:\tlock; cmpxchgl %3, %1\n"
+		     "2:\t.section .fixup, \"ax\"\n"
+		     "3:\tmov     %2, %0\n"
+		     "\tjmp     2b\n"
+		     "\t.previous\n"
+		     _ASM_EXTABLE(1b, 3b)
+		     : "=a" (oldval), "+m" (*uaddr)
+		     : "i" (-EFAULT), "r" (newval), "0" (oldval)
+		     : "memory"
 	);
 
 	return oldval;
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index 33e3ffe..f1b9693 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -14,23 +14,22 @@
  * Copyright 2003 Andi Kleen, SuSE Labs.
  */
 
-struct mpc_config_translation;
 struct mpc_config_bus;
 struct mp_config_table;
 struct mpc_config_processor;
 
-struct genapic { 
-	char *name; 
-	int (*probe)(void); 
+struct genapic {
+	char *name;
+	int (*probe)(void);
 
 	int (*apic_id_registered)(void);
 	cpumask_t (*target_cpus)(void);
 	int int_delivery_mode;
-	int int_dest_mode; 
+	int int_dest_mode;
 	int ESR_DISABLE;
 	int apic_destination_logical;
 	unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
-	unsigned long (*check_apicid_present)(int apicid); 
+	unsigned long (*check_apicid_present)(int apicid);
 	int no_balance_irq;
 	int no_ioapic_check;
 	void (*init_apic_ldr)(void);
@@ -38,28 +37,21 @@
 
 	void (*setup_apic_routing)(void);
 	int (*multi_timer_check)(int apic, int irq);
-	int (*apicid_to_node)(int logical_apicid); 
+	int (*apicid_to_node)(int logical_apicid);
 	int (*cpu_to_logical_apicid)(int cpu);
 	int (*cpu_present_to_apicid)(int mps_cpu);
 	physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
-	int (*mpc_apic_id)(struct mpc_config_processor *m, 
-			   struct mpc_config_translation *t); 
-	void (*setup_portio_remap)(void); 
+	void (*setup_portio_remap)(void);
 	int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
 	void (*enable_apic_mode)(void);
 	u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
 
 	/* mpparse */
-	void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, 
-				 struct mpc_config_translation *);
-	void (*mpc_oem_pci_bus)(struct mpc_config_bus *, 
-				struct mpc_config_translation *); 
-
 	/* When one of the next two hooks returns 1 the genapic
-	   is switched to this. Essentially they are additional probe 
+	   is switched to this. Essentially they are additional probe
 	   functions. */
-	int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, 
-			      char *productid);
+	int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
+			     char *productid);
 	int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
 
 	unsigned (*get_apic_id)(unsigned long x);
@@ -72,7 +64,7 @@
 	void (*send_IPI_allbutself)(int vector);
 	void (*send_IPI_all)(int vector);
 #endif
-}; 
+};
 
 #define APICFUNC(x) .x = x,
 
@@ -85,43 +77,46 @@
 #define IPIFUNC(x)
 #endif
 
-#define APIC_INIT(aname, aprobe) { \
-	.name = aname, \
-	.probe = aprobe, \
-	.int_delivery_mode = INT_DELIVERY_MODE, \
-	.int_dest_mode = INT_DEST_MODE, \
-	.no_balance_irq = NO_BALANCE_IRQ, \
-	.ESR_DISABLE = esr_disable, \
-	.apic_destination_logical = APIC_DEST_LOGICAL, \
-	APICFUNC(apic_id_registered) \
-	APICFUNC(target_cpus) \
-	APICFUNC(check_apicid_used) \
-	APICFUNC(check_apicid_present) \
-	APICFUNC(init_apic_ldr) \
-	APICFUNC(ioapic_phys_id_map) \
-	APICFUNC(setup_apic_routing) \
-	APICFUNC(multi_timer_check) \
-	APICFUNC(apicid_to_node) \
-	APICFUNC(cpu_to_logical_apicid) \
-	APICFUNC(cpu_present_to_apicid) \
-	APICFUNC(apicid_to_cpu_present) \
-	APICFUNC(mpc_apic_id) \
-	APICFUNC(setup_portio_remap) \
-	APICFUNC(check_phys_apicid_present) \
-	APICFUNC(mpc_oem_bus_info) \
-	APICFUNC(mpc_oem_pci_bus) \
-	APICFUNC(mps_oem_check) \
-	APICFUNC(get_apic_id) \
-	.apic_id_mask = APIC_ID_MASK, \
-	APICFUNC(cpu_mask_to_apicid) \
-	APICFUNC(acpi_madt_oem_check) \
-	IPIFUNC(send_IPI_mask) \
-	IPIFUNC(send_IPI_allbutself) \
-	IPIFUNC(send_IPI_all) \
-	APICFUNC(enable_apic_mode) \
-	APICFUNC(phys_pkg_id) \
-	}
+#define APIC_INIT(aname, aprobe)			\
+{							\
+	.name = aname,					\
+	.probe = aprobe,				\
+	.int_delivery_mode = INT_DELIVERY_MODE,		\
+	.int_dest_mode = INT_DEST_MODE,			\
+	.no_balance_irq = NO_BALANCE_IRQ,		\
+	.ESR_DISABLE = esr_disable,			\
+	.apic_destination_logical = APIC_DEST_LOGICAL,	\
+	APICFUNC(apic_id_registered)			\
+	APICFUNC(target_cpus)				\
+	APICFUNC(check_apicid_used)			\
+	APICFUNC(check_apicid_present)			\
+	APICFUNC(init_apic_ldr)				\
+	APICFUNC(ioapic_phys_id_map)			\
+	APICFUNC(setup_apic_routing)			\
+	APICFUNC(multi_timer_check)			\
+	APICFUNC(apicid_to_node)			\
+	APICFUNC(cpu_to_logical_apicid)			\
+	APICFUNC(cpu_present_to_apicid)			\
+	APICFUNC(apicid_to_cpu_present)			\
+	APICFUNC(setup_portio_remap)			\
+	APICFUNC(check_phys_apicid_present)		\
+	APICFUNC(mps_oem_check)				\
+	APICFUNC(get_apic_id)				\
+	.apic_id_mask = APIC_ID_MASK,			\
+	APICFUNC(cpu_mask_to_apicid)			\
+	APICFUNC(acpi_madt_oem_check)			\
+	IPIFUNC(send_IPI_mask)				\
+	IPIFUNC(send_IPI_allbutself)			\
+	IPIFUNC(send_IPI_all)				\
+	APICFUNC(enable_apic_mode)			\
+	APICFUNC(phys_pkg_id)				\
+}
 
 extern struct genapic *genapic;
 
+enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
+#define get_uv_system_type()		UV_NONE
+#define is_uv_system()			0
+
+
 #endif
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h
index d7e516c..1de931b 100644
--- a/include/asm-x86/genapic_64.h
+++ b/include/asm-x86/genapic_64.h
@@ -33,5 +33,15 @@
 
 extern struct genapic apic_flat;
 extern struct genapic apic_physflat;
+extern int acpi_madt_oem_check(char *, char *);
+
+enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
+extern enum uv_system_type get_uv_system_type(void);
+extern int is_uv_system(void);
+
+extern struct genapic apic_x2apic_uv_x;
+DECLARE_PER_CPU(int, x2apic_extra_bits);
+extern void uv_cpu_init(void);
+extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
 
 #endif
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index 9e72800..9870cc1 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -167,7 +167,7 @@
 /* MFGPTs */
 
 #define MFGPT_MAX_TIMERS	8
-#define MFGPT_TIMER_ANY		-1
+#define MFGPT_TIMER_ANY		(-1)
 
 #define MFGPT_DOMAIN_WORKING	1
 #define MFGPT_DOMAIN_STANDBY	2
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index 479767c..e153f3b 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -8,7 +8,7 @@
  *		      Gerhard.Wichert@pdb.siemens.de
  *
  *
- * Redesigned the x86 32-bit VM architecture to deal with 
+ * Redesigned the x86 32-bit VM architecture to deal with
  * up to 16 Terabyte physical memory. With current x86 CPUs
  * we now support up to 64 Gigabytes physical RAM.
  *
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
index 312a58d..0062ef3 100644
--- a/include/asm-x86/hw_irq_64.h
+++ b/include/asm-x86/hw_irq_64.h
@@ -36,7 +36,7 @@
  * cleanup after irq migration.
  */
 #define IRQ_MOVE_CLEANUP_VECTOR	FIRST_EXTERNAL_VECTOR
- 
+
 /*
  * Vectors 0x30-0x3f are used for ISA interrupts.
  */
@@ -159,13 +159,12 @@
  *	SMP has a few special interrupts for IPI messages
  */
 
-#define BUILD_IRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
-__asm__( \
-"\n.p2align\n" \
-"IRQ" #nr "_interrupt:\n\t" \
-	"push $~(" #nr ") ; " \
-	"jmp common_interrupt");
+#define BUILD_IRQ(nr)				\
+	asmlinkage void IRQ_NAME(nr);		\
+	asm("\n.p2align\n"			\
+	    "IRQ" #nr "_interrupt:\n\t"		\
+	    "push $~(" #nr ") ; "		\
+	    "jmp common_interrupt");
 
 #define platform_legacy_irq(irq)	((irq) < 16)
 
diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h
index c16c6ff..d2bbd23 100644
--- a/include/asm-x86/hypertransport.h
+++ b/include/asm-x86/hypertransport.h
@@ -8,12 +8,14 @@
 #define HT_IRQ_LOW_BASE			0xf8000000
 
 #define HT_IRQ_LOW_VECTOR_SHIFT		16
-#define  HT_IRQ_LOW_VECTOR_MASK		0x00ff0000
-#define  HT_IRQ_LOW_VECTOR(v)		(((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
+#define HT_IRQ_LOW_VECTOR_MASK		0x00ff0000
+#define HT_IRQ_LOW_VECTOR(v)						\
+	(((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
 
 #define HT_IRQ_LOW_DEST_ID_SHIFT	8
-#define  HT_IRQ_LOW_DEST_ID_MASK	0x0000ff00
-#define  HT_IRQ_LOW_DEST_ID(v)		(((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
+#define HT_IRQ_LOW_DEST_ID_MASK		0x0000ff00
+#define HT_IRQ_LOW_DEST_ID(v)						\
+	(((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
 
 #define HT_IRQ_LOW_DM_PHYSICAL		0x0000000
 #define HT_IRQ_LOW_DM_LOGICAL		0x0000040
@@ -36,7 +38,8 @@
 
 
 #define HT_IRQ_HIGH_DEST_ID_SHIFT	0
-#define  HT_IRQ_HIGH_DEST_ID_MASK	0x00ffffff
-#define  HT_IRQ_HIGH_DEST_ID(v)		((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
+#define HT_IRQ_HIGH_DEST_ID_MASK	0x00ffffff
+#define HT_IRQ_HIGH_DEST_ID(v)						\
+	((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
 
 #endif /* ASM_HYPERTRANSPORT_H */
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index f377b76..54522b8 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -41,7 +41,7 @@
 {
 	asm volatile("1: fwait\n"
 		     "2:\n"
-		     _ASM_EXTABLE(1b,2b));
+		     _ASM_EXTABLE(1b, 2b));
 }
 
 static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
@@ -54,7 +54,7 @@
 		     "3:  movl $-1,%[err]\n"
 		     "    jmp  2b\n"
 		     ".previous\n"
-		     _ASM_EXTABLE(1b,3b)
+		     _ASM_EXTABLE(1b, 3b)
 		     : [err] "=r" (err)
 #if 0 /* See comment in __save_init_fpu() below. */
 		     : [fx] "r" (fx), "m" (*fx), "0" (0));
@@ -76,11 +76,11 @@
 static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
 {
 	if (unlikely(fx->swd & X87_FSW_ES))
-		 asm volatile("fnclex");
+		asm volatile("fnclex");
 	alternative_input(ASM_NOP8 ASM_NOP2,
-		     "    emms\n"		/* clear stack tags */
-		     "    fildl %%gs:0",	/* load to clear state */
-		     X86_FEATURE_FXSAVE_LEAK);
+			  "    emms\n"		/* clear stack tags */
+			  "    fildl %%gs:0",	/* load to clear state */
+			  X86_FEATURE_FXSAVE_LEAK);
 }
 
 static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
@@ -93,14 +93,15 @@
 		     "3:  movl $-1,%[err]\n"
 		     "    jmp  2b\n"
 		     ".previous\n"
-		     _ASM_EXTABLE(1b,3b)
+		     _ASM_EXTABLE(1b, 3b)
 		     : [err] "=r" (err), "=m" (*fx)
 #if 0 /* See comment in __fxsave_clear() below. */
 		     : [fx] "r" (fx), "0" (0));
 #else
 		     : [fx] "cdaSDb" (fx), "0" (0));
 #endif
-	if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct)))
+	if (unlikely(err) &&
+	    __clear_user(fx, sizeof(struct i387_fxsave_struct)))
 		err = -EFAULT;
 	/* No need to clear here because the caller clears USED_MATH */
 	return err;
@@ -156,8 +157,10 @@
 		return 0;
 	clear_used_math(); /* trigger finit */
 	if (task_thread_info(tsk)->status & TS_USEDFPU) {
-		err = save_i387_checking((struct i387_fxsave_struct __user *)buf);
-		if (err) return err;
+		err = save_i387_checking((struct i387_fxsave_struct __user *)
+					 buf);
+		if (err)
+			return err;
 		task_thread_info(tsk)->status &= ~TS_USEDFPU;
 		stts();
 	} else {
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index 67c319e..45d4df3 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -1,9 +1,11 @@
 #ifndef __ASM_I8259_H__
 #define __ASM_I8259_H__
 
+#include <linux/delay.h>
+
 extern unsigned int cached_irq_mask;
 
-#define __byte(x,y)		(((unsigned char *) &(y))[x])
+#define __byte(x, y)		(((unsigned char *)&(y))[x])
 #define cached_master_mask	(__byte(0, cached_irq_mask))
 #define cached_slave_mask	(__byte(1, cached_irq_mask))
 
@@ -29,7 +31,28 @@
 extern void disable_8259A_irq(unsigned int irq);
 extern unsigned int startup_8259A_irq(unsigned int irq);
 
-#define inb_pic		inb_p
-#define outb_pic	outb_p
+/* the PIC may need a careful delay on some platforms, hence specific calls */
+static inline unsigned char inb_pic(unsigned int port)
+{
+	unsigned char value = inb(port);
+
+	/*
+	 * delay for some accesses to PIC on motherboard or in chipset
+	 * must be at least one microsecond, so be safe here:
+	 */
+	udelay(2);
+
+	return value;
+}
+
+static inline void outb_pic(unsigned char value, unsigned int port)
+{
+	outb(value, port);
+	/*
+	 * delay for some accesses to PIC on motherboard or in chipset
+	 * must be at least one microsecond, so be safe here:
+	 */
+	udelay(2);
+}
 
 #endif	/* __ASM_I8259_H__ */
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h
index aa97332..55d3abe 100644
--- a/include/asm-x86/ia32.h
+++ b/include/asm-x86/ia32.h
@@ -14,19 +14,19 @@
 
 /* signal.h */
 struct sigaction32 {
-       unsigned int  sa_handler;	/* Really a pointer, but need to deal 
-					     with 32 bits */
-       unsigned int sa_flags;
-       unsigned int sa_restorer;	/* Another 32 bit pointer */
-       compat_sigset_t sa_mask;		/* A 32 bit mask */
+	unsigned int  sa_handler;	/* Really a pointer, but need to deal
+					   with 32 bits */
+	unsigned int sa_flags;
+	unsigned int sa_restorer;	/* Another 32 bit pointer */
+	compat_sigset_t sa_mask;	/* A 32 bit mask */
 };
 
 struct old_sigaction32 {
-       unsigned int  sa_handler;	/* Really a pointer, but need to deal 
-					     with 32 bits */
-       compat_old_sigset_t sa_mask;		/* A 32 bit mask */
-       unsigned int sa_flags;
-       unsigned int sa_restorer;	/* Another 32 bit pointer */
+	unsigned int  sa_handler;	/* Really a pointer, but need to deal
+					   with 32 bits */
+	compat_old_sigset_t sa_mask;	/* A 32 bit mask */
+	unsigned int sa_flags;
+	unsigned int sa_restorer;	/* Another 32 bit pointer */
 };
 
 typedef struct sigaltstack_ia32 {
@@ -65,7 +65,7 @@
 	long long		st_size;
 	unsigned int		st_blksize;
 
-	long long		st_blocks;/* Number 512-byte blocks allocated. */
+	long long		st_blocks;/* Number 512-byte blocks allocated */
 
 	unsigned 		st_atime;
 	unsigned 		st_atime_nsec;
@@ -77,13 +77,13 @@
 	unsigned long long	st_ino;
 } __attribute__((packed));
 
-typedef struct compat_siginfo{
+typedef struct compat_siginfo {
 	int si_signo;
 	int si_errno;
 	int si_code;
 
 	union {
-		int _pad[((128/sizeof(int)) - 3)];
+		int _pad[((128 / sizeof(int)) - 3)];
 
 		/* kill() */
 		struct {
@@ -129,28 +129,26 @@
 	} _sifields;
 } compat_siginfo_t;
 
-struct sigframe32
-{
-        u32 pretcode;
-        int sig;
-        struct sigcontext_ia32 sc;
-        struct _fpstate_ia32 fpstate;
-        unsigned int extramask[_COMPAT_NSIG_WORDS-1];
+struct sigframe32 {
+	u32 pretcode;
+	int sig;
+	struct sigcontext_ia32 sc;
+	struct _fpstate_ia32 fpstate;
+	unsigned int extramask[_COMPAT_NSIG_WORDS-1];
 };
 
-struct rt_sigframe32
-{
-        u32 pretcode;
-        int sig;
-        u32 pinfo;
-        u32 puc;
-        compat_siginfo_t info;
-        struct ucontext_ia32 uc;
-        struct _fpstate_ia32 fpstate;
+struct rt_sigframe32 {
+	u32 pretcode;
+	int sig;
+	u32 pinfo;
+	u32 puc;
+	compat_siginfo_t info;
+	struct ucontext_ia32 uc;
+	struct _fpstate_ia32 fpstate;
 };
 
 struct ustat32 {
-	__u32	f_tfree;
+	__u32			f_tfree;
 	compat_ino_t		f_tinode;
 	char			f_fname[6];
 	char			f_fpack[6];
@@ -168,5 +166,5 @@
 #endif
 
 #endif /* !CONFIG_IA32_SUPPORT */
- 
-#endif 
+
+#endif
diff --git a/include/asm-x86/ide.h b/include/asm-x86/ide.h
index c2552d8..cf9c98e 100644
--- a/include/asm-x86/ide.h
+++ b/include/asm-x86/ide.h
@@ -20,8 +20,6 @@
 # endif
 #endif
 
-#define IDE_ARCH_OBSOLETE_DEFAULTS
-
 static __inline__ int ide_default_irq(unsigned long base)
 {
 	switch (base) {
@@ -60,14 +58,6 @@
 	}
 }
 
-#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
-
-#ifdef CONFIG_BLK_DEV_IDEPCI
-#define ide_init_default_irq(base)	(0)
-#else
-#define ide_init_default_irq(base)	ide_default_irq(base)
-#endif
-
 #include <asm-generic/ide_iops.h>
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h
index 5a58b17..7b292d3 100644
--- a/include/asm-x86/io.h
+++ b/include/asm-x86/io.h
@@ -1,5 +1,11 @@
+#define ARCH_HAS_IOREMAP_WC
+
 #ifdef CONFIG_X86_32
 # include "io_32.h"
 #else
 # include "io_64.h"
 #endif
+extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
+				unsigned long prot_val);
+extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
+
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index d4d8fbd..509045f 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -65,14 +65,14 @@
  *
  *	The returned physical address is the physical (CPU) mapping for
  *	the memory address given. It is only valid to use this function on
- *	addresses directly mapped or allocated via kmalloc. 
+ *	addresses directly mapped or allocated via kmalloc.
  *
  *	This function does not give bus mappings for DMA transfers. In
  *	almost all conceivable cases a device driver should not be using
  *	this function
  */
- 
-static inline unsigned long virt_to_phys(volatile void * address)
+
+static inline unsigned long virt_to_phys(volatile void *address)
 {
 	return __pa(address);
 }
@@ -90,7 +90,7 @@
  *	this function
  */
 
-static inline void * phys_to_virt(unsigned long address)
+static inline void *phys_to_virt(unsigned long address)
 {
 	return __va(address);
 }
@@ -169,16 +169,19 @@
 
 static inline unsigned char readb(const volatile void __iomem *addr)
 {
-	return *(volatile unsigned char __force *) addr;
+	return *(volatile unsigned char __force *)addr;
 }
+
 static inline unsigned short readw(const volatile void __iomem *addr)
 {
-	return *(volatile unsigned short __force *) addr;
+	return *(volatile unsigned short __force *)addr;
 }
+
 static inline unsigned int readl(const volatile void __iomem *addr)
 {
 	return *(volatile unsigned int __force *) addr;
 }
+
 #define readb_relaxed(addr) readb(addr)
 #define readw_relaxed(addr) readw(addr)
 #define readl_relaxed(addr) readl(addr)
@@ -188,15 +191,17 @@
 
 static inline void writeb(unsigned char b, volatile void __iomem *addr)
 {
-	*(volatile unsigned char __force *) addr = b;
+	*(volatile unsigned char __force *)addr = b;
 }
+
 static inline void writew(unsigned short b, volatile void __iomem *addr)
 {
-	*(volatile unsigned short __force *) addr = b;
+	*(volatile unsigned short __force *)addr = b;
 }
+
 static inline void writel(unsigned int b, volatile void __iomem *addr)
 {
-	*(volatile unsigned int __force *) addr = b;
+	*(volatile unsigned int __force *)addr = b;
 }
 #define __raw_writeb writeb
 #define __raw_writew writew
@@ -239,12 +244,12 @@
  *	1. Out of order aware processors
  *	2. Accidentally out of order processors (PPro errata #51)
  */
- 
+
 #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
 
 static inline void flush_write_buffers(void)
 {
-	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+	asm volatile("lock; addl $0,0(%%esp)": : :"memory");
 }
 
 #else
@@ -264,7 +269,8 @@
 #include <asm/paravirt.h>
 #else
 
-static inline void slow_down_io(void) {
+static inline void slow_down_io(void)
+{
 	native_io_delay();
 #ifdef REALLY_SLOW_IO
 	native_io_delay();
@@ -275,51 +281,74 @@
 
 #endif
 
-#define __BUILDIO(bwl,bw,type) \
-static inline void out##bwl(unsigned type value, int port) { \
-	out##bwl##_local(value, port); \
-} \
-static inline unsigned type in##bwl(int port) { \
-	return in##bwl##_local(port); \
+#define __BUILDIO(bwl, bw, type)				\
+static inline void out##bwl(unsigned type value, int port)	\
+{								\
+	out##bwl##_local(value, port);				\
+}								\
+								\
+static inline unsigned type in##bwl(int port)			\
+{								\
+	return in##bwl##_local(port);				\
 }
 
-#define BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_local(unsigned type value, int port) { \
-	__asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
-} \
-static inline unsigned type in##bwl##_local(int port) { \
-	unsigned type value; \
-	__asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
-	return value; \
-} \
-static inline void out##bwl##_local_p(unsigned type value, int port) { \
-	out##bwl##_local(value, port); \
-	slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_local_p(int port) { \
-	unsigned type value = in##bwl##_local(port); \
-	slow_down_io(); \
-	return value; \
-} \
-__BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_p(unsigned type value, int port) { \
-	out##bwl(value, port); \
-	slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_p(int port) { \
-	unsigned type value = in##bwl(port); \
-	slow_down_io(); \
-	return value; \
-} \
-static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
-	__asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
-} \
-static inline void ins##bwl(int port, void *addr, unsigned long count) { \
-	__asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
+#define BUILDIO(bwl, bw, type)						\
+static inline void out##bwl##_local(unsigned type value, int port)	\
+{									\
+	asm volatile("out" #bwl " %" #bw "0, %w1"		\
+		     : : "a"(value), "Nd"(port));			\
+}									\
+									\
+static inline unsigned type in##bwl##_local(int port)			\
+{									\
+	unsigned type value;						\
+	asm volatile("in" #bwl " %w1, %" #bw "0"		\
+		     : "=a"(value) : "Nd"(port));			\
+	return value;							\
+}									\
+									\
+static inline void out##bwl##_local_p(unsigned type value, int port)	\
+{									\
+	out##bwl##_local(value, port);					\
+	slow_down_io();							\
+}									\
+									\
+static inline unsigned type in##bwl##_local_p(int port)			\
+{									\
+	unsigned type value = in##bwl##_local(port);			\
+	slow_down_io();							\
+	return value;							\
+}									\
+									\
+__BUILDIO(bwl, bw, type)						\
+									\
+static inline void out##bwl##_p(unsigned type value, int port)		\
+{									\
+	out##bwl(value, port);						\
+	slow_down_io();							\
+}									\
+									\
+static inline unsigned type in##bwl##_p(int port)			\
+{									\
+	unsigned type value = in##bwl(port);				\
+	slow_down_io();							\
+	return value;							\
+}									\
+									\
+static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+{									\
+	asm volatile("rep; outs" #bwl					\
+		     : "+S"(addr), "+c"(count) : "d"(port));		\
+}									\
+									\
+static inline void ins##bwl(int port, void *addr, unsigned long count)	\
+{									\
+	asm volatile("rep; ins" #bwl					\
+		     : "+D"(addr), "+c"(count) : "d"(port));		\
 }
 
-BUILDIO(b,b,char)
-BUILDIO(w,w,short)
-BUILDIO(l,,int)
+BUILDIO(b, b, char)
+BUILDIO(w, w, short)
+BUILDIO(l, , int)
 
 #endif
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index db0be20..c2f5eef 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -58,60 +58,75 @@
 /*
  * Talk about misusing macros..
  */
-#define __OUT1(s,x) \
+#define __OUT1(s, x)							\
 static inline void out##s(unsigned x value, unsigned short port) {
 
-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+#define __OUT2(s, s1, s2)				\
+asm volatile ("out" #s " %" s1 "0,%" s2 "1"
 
 #ifndef REALLY_SLOW_IO
 #define REALLY_SLOW_IO
 #define UNSET_REALLY_SLOW_IO
 #endif
 
-#define __OUT(s,s1,x) \
-__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
-		slow_down_io(); }
+#define __OUT(s, s1, x)							\
+	__OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port));	\
+	}								\
+	__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
+	slow_down_io();							\
+}
 
-#define __IN1(s) \
-static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+#define __IN1(s)							\
+static inline RETURN_TYPE in##s(unsigned short port)			\
+{									\
+	RETURN_TYPE _v;
 
-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+#define __IN2(s, s1, s2)						\
+	asm volatile ("in" #s " %" s2 "1,%" s1 "0"
 
-#define __IN(s,s1,i...) \
-__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \
-__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i);	  \
-				slow_down_io(); return _v; }
+#define __IN(s, s1, i...)						\
+	__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i);	\
+	return _v;							\
+	}								\
+	__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i);	\
+	slow_down_io(); \
+	return _v; }
 
 #ifdef UNSET_REALLY_SLOW_IO
 #undef REALLY_SLOW_IO
 #endif
 
-#define __INS(s) \
-static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; ins" #s \
-: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+#define __INS(s)							\
+static inline void ins##s(unsigned short port, void *addr,		\
+			  unsigned long count)				\
+{									\
+	asm volatile ("rep ; ins" #s					\
+		      : "=D" (addr), "=c" (count)			\
+		      : "d" (port), "0" (addr), "1" (count));		\
+}
 
-#define __OUTS(s) \
-static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; outs" #s \
-: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+#define __OUTS(s)							\
+static inline void outs##s(unsigned short port, const void *addr,	\
+			   unsigned long count)				\
+{									\
+	asm volatile ("rep ; outs" #s					\
+		      : "=S" (addr), "=c" (count)			\
+		      : "d" (port), "0" (addr), "1" (count));		\
+}
 
 #define RETURN_TYPE unsigned char
-__IN(b,"")
+__IN(b, "")
 #undef RETURN_TYPE
 #define RETURN_TYPE unsigned short
-__IN(w,"")
+__IN(w, "")
 #undef RETURN_TYPE
 #define RETURN_TYPE unsigned int
-__IN(l,"")
+__IN(l, "")
 #undef RETURN_TYPE
 
-__OUT(b,"b",char)
-__OUT(w,"w",short)
-__OUT(l,,int)
+__OUT(b, "b", char)
+__OUT(w, "w", short)
+__OUT(l, , int)
 
 __INS(b)
 __INS(w)
@@ -132,12 +147,12 @@
  * Change virtual addresses to physical addresses and vv.
  * These are pretty trivial
  */
-static inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(volatile void *address)
 {
 	return __pa(address);
 }
 
-static inline void * phys_to_virt(unsigned long address)
+static inline void *phys_to_virt(unsigned long address)
 {
 	return __va(address);
 }
@@ -200,18 +215,22 @@
 {
 	return *(__force volatile __u8 *)addr;
 }
+
 static inline __u16 __readw(const volatile void __iomem *addr)
 {
 	return *(__force volatile __u16 *)addr;
 }
+
 static __always_inline __u32 __readl(const volatile void __iomem *addr)
 {
 	return *(__force volatile __u32 *)addr;
 }
+
 static inline __u64 __readq(const volatile void __iomem *addr)
 {
 	return *(__force volatile __u64 *)addr;
 }
+
 #define readb(x) __readb(x)
 #define readw(x) __readw(x)
 #define readl(x) __readl(x)
@@ -231,37 +250,44 @@
 {
 	*(__force volatile __u32 *)addr = b;
 }
+
 static inline void __writeq(__u64 b, volatile void __iomem *addr)
 {
 	*(__force volatile __u64 *)addr = b;
 }
+
 static inline void __writeb(__u8 b, volatile void __iomem *addr)
 {
 	*(__force volatile __u8 *)addr = b;
 }
+
 static inline void __writew(__u16 b, volatile void __iomem *addr)
 {
 	*(__force volatile __u16 *)addr = b;
 }
-#define writeq(val,addr) __writeq((val),(addr))
-#define writel(val,addr) __writel((val),(addr))
-#define writew(val,addr) __writew((val),(addr))
-#define writeb(val,addr) __writeb((val),(addr))
+
+#define writeq(val, addr) __writeq((val), (addr))
+#define writel(val, addr) __writel((val), (addr))
+#define writew(val, addr) __writew((val), (addr))
+#define writeb(val, addr) __writeb((val), (addr))
 #define __raw_writeb writeb
 #define __raw_writew writew
 #define __raw_writel writel
 #define __raw_writeq writeq
 
-void __memcpy_fromio(void*,unsigned long,unsigned);
-void __memcpy_toio(unsigned long,const void*,unsigned);
+void __memcpy_fromio(void *, unsigned long, unsigned);
+void __memcpy_toio(unsigned long, const void *, unsigned);
 
-static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
+				 unsigned len)
 {
-	__memcpy_fromio(to,(unsigned long)from,len);
+	__memcpy_fromio(to, (unsigned long)from, len);
 }
-static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
+
+static inline void memcpy_toio(volatile void __iomem *to, const void *from,
+			       unsigned len)
 {
-	__memcpy_toio((unsigned long)to,from,len);
+	__memcpy_toio((unsigned long)to, from, len);
 }
 
 void memset_io(volatile void __iomem *a, int b, size_t c);
@@ -276,7 +302,7 @@
  */
 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
 
-#define flush_write_buffers() 
+#define flush_write_buffers()
 
 extern int iommu_bio_merge;
 #define BIO_VMERGE_BOUNDARY iommu_bio_merge
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index 0f5b3fe..0c9e17c 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -110,6 +110,13 @@
  * MP-BIOS irq configuration table structures:
  */
 
+struct mp_ioapic_routing {
+	int apic_id;
+	int gsi_base;
+	int gsi_end;
+	u32 pin_programmed[4];
+};
+
 /* I/O APIC entries */
 extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
 
@@ -146,7 +153,6 @@
 extern int io_apic_get_redir_entries(int ioapic);
 extern int io_apic_set_pci_routing(int ioapic, int pin, int irq,
 				   int edge_level, int active_high_low);
-extern int timer_uses_ioapic_pin_0;
 #endif /* CONFIG_ACPI */
 
 extern int (*ioapic_renumber_irq)(int ioapic, int irq);
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h
index 93c894d..c0c338b 100644
--- a/include/asm-x86/ioctls.h
+++ b/include/asm-x86/ioctls.h
@@ -47,12 +47,13 @@
 #define TIOCSBRK	0x5427  /* BSD compatibility */
 #define TIOCCBRK	0x5428  /* BSD compatibility */
 #define TIOCGSID	0x5429  /* Return the session ID of FD */
-#define TCGETS2		_IOR('T',0x2A, struct termios2)
-#define TCSETS2		_IOW('T',0x2B, struct termios2)
-#define TCSETSW2	_IOW('T',0x2C, struct termios2)
-#define TCSETSF2	_IOW('T',0x2D, struct termios2)
-#define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
-#define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+#define TCGETS2		_IOR('T', 0x2A, struct termios2)
+#define TCSETS2		_IOW('T', 0x2B, struct termios2)
+#define TCSETSW2	_IOW('T', 0x2C, struct termios2)
+#define TCSETSF2	_IOW('T', 0x2D, struct termios2)
+#define TIOCGPTN	_IOR('T', 0x30, unsigned int)
+				/* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK	_IOW('T', 0x31, int)  /* Lock/unlock Pty */
 
 #define FIONCLEX	0x5450
 #define FIOCLEX		0x5451
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h
index 2adf8b3..ee678fd 100644
--- a/include/asm-x86/ipcbuf.h
+++ b/include/asm-x86/ipcbuf.h
@@ -11,8 +11,7 @@
  * - 2 miscellaneous 32-bit values
  */
 
-struct ipc64_perm
-{
+struct ipc64_perm {
 	__kernel_key_t		key;
 	__kernel_uid32_t	uid;
 	__kernel_gid32_t	gid;
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index 6d011bd..ecc80f3 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -27,7 +27,8 @@
  * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
  */
 
-static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest)
+static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
+					 unsigned int dest)
 {
 	unsigned int icr = shortcut | dest;
 
@@ -42,12 +43,13 @@
 	return icr;
 }
 
-static inline int __prepare_ICR2 (unsigned int mask)
+static inline int __prepare_ICR2(unsigned int mask)
 {
 	return SET_APIC_DEST_FIELD(mask);
 }
 
-static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
+static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
+				       unsigned int dest)
 {
 	/*
 	 * Subtle. In the case of the 'never do double writes' workaround
@@ -78,7 +80,8 @@
  * This is used to send an IPI with no shorthand notation (the destination is
  * specified in bits 56 to 63 of the ICR).
  */
-static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
+static inline void __send_IPI_dest_field(unsigned int mask, int vector,
+					 unsigned int dest)
 {
 	unsigned long cfg;
 
diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h
index aca9c96..0b79f31 100644
--- a/include/asm-x86/irq_32.h
+++ b/include/asm-x86/irq_32.h
@@ -15,7 +15,7 @@
 #include "irq_vectors.h"
 #include <asm/thread_info.h>
 
-static __inline__ int irq_canonicalize(int irq)
+static inline int irq_canonicalize(int irq)
 {
 	return ((irq == 2) ? 9 : irq);
 }
diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h
index 5006c6e..083d35a 100644
--- a/include/asm-x86/irq_64.h
+++ b/include/asm-x86/irq_64.h
@@ -31,10 +31,10 @@
 
 #define FIRST_SYSTEM_VECTOR	0xef   /* duplicated in hw_irq.h */
 
-#define NR_IRQS (NR_VECTORS + (32 *NR_CPUS))
+#define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
 #define NR_IRQ_VECTORS NR_IRQS
 
-static __inline__ int irq_canonicalize(int irq)
+static inline int irq_canonicalize(int irq)
 {
 	return ((irq == 2) ? 9 : irq);
 }
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
index 0e22924..c242527 100644
--- a/include/asm-x86/irqflags.h
+++ b/include/asm-x86/irqflags.h
@@ -12,25 +12,21 @@
 {
 	unsigned long flags;
 
-	__asm__ __volatile__(
-		"# __raw_save_flags\n\t"
-		"pushf ; pop %0"
-		: "=g" (flags)
-		: /* no input */
-		: "memory"
-	);
+	asm volatile("# __raw_save_flags\n\t"
+		     "pushf ; pop %0"
+		     : "=g" (flags)
+		     : /* no input */
+		     : "memory");
 
 	return flags;
 }
 
 static inline void native_restore_fl(unsigned long flags)
 {
-	__asm__ __volatile__(
-		"push %0 ; popf"
-		: /* no output */
-		:"g" (flags)
-		:"memory", "cc"
-	);
+	asm volatile("push %0 ; popf"
+		     : /* no output */
+		     :"g" (flags)
+		     :"memory", "cc");
 }
 
 static inline void native_irq_disable(void)
@@ -70,26 +66,6 @@
 	native_restore_fl(flags);
 }
 
-#ifdef CONFIG_X86_VSMP
-
-/*
- * Interrupt control for the VSMP architecture:
- */
-
-static inline void raw_local_irq_disable(void)
-{
-	unsigned long flags = __raw_local_save_flags();
-	raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
-}
-
-static inline void raw_local_irq_enable(void)
-{
-	unsigned long flags = __raw_local_save_flags();
-	raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
-}
-
-#else
-
 static inline void raw_local_irq_disable(void)
 {
 	native_irq_disable();
@@ -100,8 +76,6 @@
 	native_irq_enable();
 }
 
-#endif
-
 /*
  * Used in the idle loop; sti takes one instruction cycle
  * to complete:
@@ -153,23 +127,16 @@
 #endif /* CONFIG_PARAVIRT */
 
 #ifndef __ASSEMBLY__
-#define raw_local_save_flags(flags) \
-		do { (flags) = __raw_local_save_flags(); } while (0)
+#define raw_local_save_flags(flags)				\
+	do { (flags) = __raw_local_save_flags(); } while (0)
 
-#define raw_local_irq_save(flags) \
-		do { (flags) = __raw_local_irq_save(); } while (0)
+#define raw_local_irq_save(flags)				\
+	do { (flags) = __raw_local_irq_save(); } while (0)
 
-#ifdef CONFIG_X86_VSMP
-static inline int raw_irqs_disabled_flags(unsigned long flags)
-{
-	return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
-}
-#else
 static inline int raw_irqs_disabled_flags(unsigned long flags)
 {
 	return !(flags & X86_EFLAGS_IF);
 }
-#endif
 
 static inline int raw_irqs_disabled(void)
 {
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
index 99dcbaf..96651bb 100644
--- a/include/asm-x86/kdebug.h
+++ b/include/asm-x86/kdebug.h
@@ -20,15 +20,16 @@
 	DIE_CALL,
 	DIE_NMI_IPI,
 	DIE_PAGE_FAULT,
+	DIE_NMIUNKNOWN,
 };
 
 extern void printk_address(unsigned long address, int reliable);
-extern void die(const char *,struct pt_regs *,long);
+extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_registers(struct pt_regs *regs);
 extern void __show_registers(struct pt_regs *, int all);
 extern void show_trace(struct task_struct *t, struct pt_regs *regs,
-			unsigned long *sp, unsigned long bp);
+		       unsigned long *sp, unsigned long bp);
 extern void __show_regs(struct pt_regs *regs);
 extern void show_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index c90d3c7..8f855a1 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -94,10 +94,9 @@
 {
 #ifdef CONFIG_X86_32
 	newregs->sp = (unsigned long)&(oldregs->sp);
-	__asm__ __volatile__(
-			"xorl %%eax, %%eax\n\t"
-			"movw %%ss, %%ax\n\t"
-			:"=a"(newregs->ss));
+	asm volatile("xorl %%eax, %%eax\n\t"
+		     "movw %%ss, %%ax\n\t"
+		     :"=a"(newregs->ss));
 #endif
 }
 
@@ -114,39 +113,39 @@
 		crash_fixup_ss_esp(newregs, oldregs);
 	} else {
 #ifdef CONFIG_X86_32
-		__asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx));
-		__asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx));
-		__asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx));
-		__asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si));
-		__asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di));
-		__asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp));
-		__asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax));
-		__asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp));
-		__asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
-		__asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
-		__asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds));
-		__asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es));
-		__asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags));
+		asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
+		asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
+		asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
+		asm volatile("movl %%esi,%0" : "=m"(newregs->si));
+		asm volatile("movl %%edi,%0" : "=m"(newregs->di));
+		asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
+		asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
+		asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
+		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
+		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
+		asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
+		asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
+		asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
 #else
-		__asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx));
-		__asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx));
-		__asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx));
-		__asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si));
-		__asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di));
-		__asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp));
-		__asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax));
-		__asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp));
-		__asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
-		__asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
-		__asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
-		__asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
-		__asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
-		__asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
-		__asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
-		__asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
-		__asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
-		__asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
-		__asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags));
+		asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
+		asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
+		asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
+		asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
+		asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
+		asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
+		asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
+		asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
+		asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
+		asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
+		asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
+		asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
+		asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
+		asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
+		asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
+		asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
+		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
+		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
+		asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
 #endif
 		newregs->ip = (unsigned long)current_text_addr();
 	}
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h
new file mode 100644
index 0000000..484c475
--- /dev/null
+++ b/include/asm-x86/kgdb.h
@@ -0,0 +1,81 @@
+#ifndef _ASM_KGDB_H_
+#define _ASM_KGDB_H_
+
+/*
+ * Copyright (C) 2001-2004 Amit S. Kale
+ * Copyright (C) 2008 Wind River Systems, Inc.
+ */
+
+/*
+ * BUFMAX defines the maximum number of characters in inbound/outbound
+ * buffers at least NUMREGBYTES*2 are needed for register packets
+ * Longer buffer is needed to list all threads
+ */
+#define BUFMAX			1024
+
+/*
+ *  Note that this register image is in a different order than
+ *  the register image that Linux produces at interrupt time.
+ *
+ *  Linux's register image is defined by struct pt_regs in ptrace.h.
+ *  Just why GDB uses a different order is a historical mystery.
+ */
+#ifdef CONFIG_X86_32
+enum regnames {
+	GDB_AX,			/* 0 */
+	GDB_CX,			/* 1 */
+	GDB_DX,			/* 2 */
+	GDB_BX,			/* 3 */
+	GDB_SP,			/* 4 */
+	GDB_BP,			/* 5 */
+	GDB_SI,			/* 6 */
+	GDB_DI,			/* 7 */
+	GDB_PC,			/* 8 also known as eip */
+	GDB_PS,			/* 9 also known as eflags */
+	GDB_CS,			/* 10 */
+	GDB_SS,			/* 11 */
+	GDB_DS,			/* 12 */
+	GDB_ES,			/* 13 */
+	GDB_FS,			/* 14 */
+	GDB_GS,			/* 15 */
+};
+#else /* ! CONFIG_X86_32 */
+enum regnames {
+	GDB_AX,			/* 0 */
+	GDB_DX,			/* 1 */
+	GDB_CX,			/* 2 */
+	GDB_BX,			/* 3 */
+	GDB_SI,			/* 4 */
+	GDB_DI,			/* 5 */
+	GDB_BP,			/* 6 */
+	GDB_SP,			/* 7 */
+	GDB_R8,			/* 8 */
+	GDB_R9,			/* 9 */
+	GDB_R10,		/* 10 */
+	GDB_R11,		/* 11 */
+	GDB_R12,		/* 12 */
+	GDB_R13,		/* 13 */
+	GDB_R14,		/* 14 */
+	GDB_R15,		/* 15 */
+	GDB_PC,			/* 16 */
+	GDB_PS,			/* 17 */
+};
+#endif /* CONFIG_X86_32 */
+
+/*
+ * Number of bytes of registers:
+ */
+#ifdef CONFIG_X86_32
+# define NUMREGBYTES		64
+#else
+# define NUMREGBYTES		((GDB_PS+1)*8)
+#endif
+
+static inline void arch_kgdb_breakpoint(void)
+{
+	asm("   int $3");
+}
+#define BREAK_INSTR_SIZE	1
+#define CACHE_FLUSH_IS_SAFE	1
+
+#endif				/* _ASM_KGDB_H_ */
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h
index 61ad7b5..54980b0 100644
--- a/include/asm-x86/kprobes.h
+++ b/include/asm-x86/kprobes.h
@@ -35,12 +35,12 @@
 #define RELATIVEJUMP_INSTRUCTION 0xe9
 #define MAX_INSN_SIZE 16
 #define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
-	(((unsigned long)current_thread_info()) + THREAD_SIZE \
-	 - (unsigned long)(ADDR))) \
-	? (MAX_STACK_SIZE) \
-	: (((unsigned long)current_thread_info()) + THREAD_SIZE \
-	   - (unsigned long)(ADDR)))
+#define MIN_STACK_SIZE(ADDR)					       \
+	(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+			      THREAD_SIZE - (unsigned long)(ADDR)))    \
+	 ? (MAX_STACK_SIZE)					       \
+	 : (((unsigned long)current_thread_info()) +		       \
+	    THREAD_SIZE - (unsigned long)(ADDR)))
 
 #define flush_insn_slot(p)	do { } while (0)
 
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 4702b04..68ee390 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -22,15 +22,16 @@
 
 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
-#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
+#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS |	\
+				  0xFFFFFF0000000000ULL)
 
-#define KVM_GUEST_CR0_MASK \
+#define KVM_GUEST_CR0_MASK				   \
 	(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
 	 | X86_CR0_NW | X86_CR0_CD)
-#define KVM_VM_CR0_ALWAYS_ON \
+#define KVM_VM_CR0_ALWAYS_ON						\
 	(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
 	 | X86_CR0_MP)
-#define KVM_GUEST_CR4_MASK \
+#define KVM_GUEST_CR4_MASK						\
 	(X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
@@ -133,12 +134,12 @@
 union kvm_mmu_page_role {
 	unsigned word;
 	struct {
-		unsigned glevels : 4;
-		unsigned level : 4;
-		unsigned quadrant : 2;
-		unsigned pad_for_nice_hex_output : 6;
-		unsigned metaphysical : 1;
-		unsigned access : 3;
+		unsigned glevels:4;
+		unsigned level:4;
+		unsigned quadrant:2;
+		unsigned pad_for_nice_hex_output:6;
+		unsigned metaphysical:1;
+		unsigned access:3;
 	};
 };
 
@@ -606,6 +607,7 @@
 #define TSS_BASE_SIZE 0x68
 #define TSS_IOPB_SIZE (65536 / 8)
 #define TSS_REDIRECTION_SIZE (256 / 8)
-#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
+#define RMODE_TSS_SIZE							\
+	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
 
 #endif
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
index 7db91b9..d6337f9 100644
--- a/include/asm-x86/kvm_x86_emulate.h
+++ b/include/asm-x86/kvm_x86_emulate.h
@@ -68,10 +68,10 @@
 	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
 	 *  @bytes: [IN ] Number of bytes to read from memory.
 	 */
-	int (*read_emulated) (unsigned long addr,
-			      void *val,
-			      unsigned int bytes,
-			      struct kvm_vcpu *vcpu);
+	int (*read_emulated)(unsigned long addr,
+			     void *val,
+			     unsigned int bytes,
+			     struct kvm_vcpu *vcpu);
 
 	/*
 	 * write_emulated: Read bytes from emulated/special memory area.
@@ -80,10 +80,10 @@
 	 *                required).
 	 *  @bytes: [IN ] Number of bytes to write to memory.
 	 */
-	int (*write_emulated) (unsigned long addr,
-			       const void *val,
-			       unsigned int bytes,
-			       struct kvm_vcpu *vcpu);
+	int (*write_emulated)(unsigned long addr,
+			      const void *val,
+			      unsigned int bytes,
+			      struct kvm_vcpu *vcpu);
 
 	/*
 	 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
@@ -93,11 +93,11 @@
 	 *  @new:   [IN ] Value to write to @addr.
 	 *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
 	 */
-	int (*cmpxchg_emulated) (unsigned long addr,
-				 const void *old,
-				 const void *new,
-				 unsigned int bytes,
-				 struct kvm_vcpu *vcpu);
+	int (*cmpxchg_emulated)(unsigned long addr,
+				const void *old,
+				const void *new,
+				unsigned int bytes,
+				struct kvm_vcpu *vcpu);
 
 };
 
@@ -143,7 +143,7 @@
 	/* Register state before/after emulation. */
 	struct kvm_vcpu *vcpu;
 
-	/* Linear faulting address (if emulating a page-faulting instruction). */
+	/* Linear faulting address (if emulating a page-faulting instruction) */
 	unsigned long eflags;
 
 	/* Emulated execution mode, represented by an X86EMUL_MODE value. */
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
index 9b17571..be4a724 100644
--- a/include/asm-x86/lguest.h
+++ b/include/asm-x86/lguest.h
@@ -34,8 +34,7 @@
 extern void lguest_iret(void);
 extern void lguest_init(void);
 
-struct lguest_regs
-{
+struct lguest_regs {
 	/* Manually saved part. */
 	unsigned long eax, ebx, ecx, edx;
 	unsigned long esi, edi, ebp;
@@ -51,8 +50,7 @@
 };
 
 /* This is a guest-specific page (mapped ro) into the guest. */
-struct lguest_ro_state
-{
+struct lguest_ro_state {
 	/* Host information we need to restore when we switch back. */
 	u32 host_cr3;
 	struct desc_ptr host_idt_desc;
@@ -67,8 +65,7 @@
 	struct desc_struct guest_gdt[GDT_ENTRIES];
 };
 
-struct lg_cpu_arch
-{
+struct lg_cpu_arch {
 	/* The GDT entries copied into lguest_ro_state when running. */
 	struct desc_struct gdt[GDT_ENTRIES];
 
@@ -85,7 +82,7 @@
 
 	cr0 = read_cr0();
 	if (!(cr0 & 8))
-		write_cr0(cr0|8);
+		write_cr0(cr0 | 8);
 }
 
 /* Full 4G segment descriptors, suitable for CS and DS. */
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index f239e70..a3241f2 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -46,7 +46,7 @@
 {
 	/* "int" is the Intel instruction to trigger a trap. */
 	asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
-		       /* The call in %eax (aka "a") might be overwritten */
+		     /* The call in %eax (aka "a") might be overwritten */
 		     : "=a"(call)
 		       /* The arguments are in %eax, %edx, %ebx & %ecx */
 		     : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
@@ -62,8 +62,7 @@
 #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
 
 #define LHCALL_RING_SIZE 64
-struct hcall_args
-{
+struct hcall_args {
 	/* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */
 	unsigned long arg0, arg2, arg3, arg1;
 };
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
index c048353..64e444f 100644
--- a/include/asm-x86/linkage.h
+++ b/include/asm-x86/linkage.h
@@ -1,6 +1,9 @@
 #ifndef __ASM_LINKAGE_H
 #define __ASM_LINKAGE_H
 
+#undef notrace
+#define notrace __attribute__((no_instrument_function))
+
 #ifdef CONFIG_X86_64
 #define __ALIGN .p2align 4,,15
 #define __ALIGN_STR ".p2align 4,,15"
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h
index f852c62..330a724 100644
--- a/include/asm-x86/local.h
+++ b/include/asm-x86/local.h
@@ -18,32 +18,28 @@
 
 static inline void local_inc(local_t *l)
 {
-	__asm__ __volatile__(
-		_ASM_INC "%0"
-		:"+m" (l->a.counter));
+	asm volatile(_ASM_INC "%0"
+		     : "+m" (l->a.counter));
 }
 
 static inline void local_dec(local_t *l)
 {
-	__asm__ __volatile__(
-		_ASM_DEC "%0"
-		:"+m" (l->a.counter));
+	asm volatile(_ASM_DEC "%0"
+		     : "+m" (l->a.counter));
 }
 
 static inline void local_add(long i, local_t *l)
 {
-	__asm__ __volatile__(
-		_ASM_ADD "%1,%0"
-		:"+m" (l->a.counter)
-		:"ir" (i));
+	asm volatile(_ASM_ADD "%1,%0"
+		     : "+m" (l->a.counter)
+		     : "ir" (i));
 }
 
 static inline void local_sub(long i, local_t *l)
 {
-	__asm__ __volatile__(
-		_ASM_SUB "%1,%0"
-		:"+m" (l->a.counter)
-		:"ir" (i));
+	asm volatile(_ASM_SUB "%1,%0"
+		     : "+m" (l->a.counter)
+		     : "ir" (i));
 }
 
 /**
@@ -59,10 +55,9 @@
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		_ASM_SUB "%2,%0; sete %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		:"ir" (i) : "memory");
+	asm volatile(_ASM_SUB "%2,%0; sete %1"
+		     : "+m" (l->a.counter), "=qm" (c)
+		     : "ir" (i) : "memory");
 	return c;
 }
 
@@ -78,10 +73,9 @@
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		_ASM_DEC "%0; sete %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		: : "memory");
+	asm volatile(_ASM_DEC "%0; sete %1"
+		     : "+m" (l->a.counter), "=qm" (c)
+		     : : "memory");
 	return c != 0;
 }
 
@@ -97,10 +91,9 @@
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		_ASM_INC "%0; sete %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		: : "memory");
+	asm volatile(_ASM_INC "%0; sete %1"
+		     : "+m" (l->a.counter), "=qm" (c)
+		     : : "memory");
 	return c != 0;
 }
 
@@ -117,10 +110,9 @@
 {
 	unsigned char c;
 
-	__asm__ __volatile__(
-		_ASM_ADD "%2,%0; sets %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		:"ir" (i) : "memory");
+	asm volatile(_ASM_ADD "%2,%0; sets %1"
+		     : "+m" (l->a.counter), "=qm" (c)
+		     : "ir" (i) : "memory");
 	return c;
 }
 
@@ -141,10 +133,9 @@
 #endif
 	/* Modern 486+ processor */
 	__i = i;
-	__asm__ __volatile__(
-		_ASM_XADD "%0, %1;"
-		:"+r" (i), "+m" (l->a.counter)
-		: : "memory");
+	asm volatile(_ASM_XADD "%0, %1;"
+		     : "+r" (i), "+m" (l->a.counter)
+		     : : "memory");
 	return i + __i;
 
 #ifdef CONFIG_M386
@@ -182,11 +173,11 @@
 #define local_add_unless(l, a, u)				\
 ({								\
 	long c, old;						\
-	c = local_read(l);					\
+	c = local_read((l));					\
 	for (;;) {						\
 		if (unlikely(c == (u)))				\
 			break;					\
-		old = local_cmpxchg((l), c, c + (a));	\
+		old = local_cmpxchg((l), c, c + (a));		\
 		if (likely(old == c))				\
 			break;					\
 		c = old;					\
@@ -214,26 +205,30 @@
 
 /* Need to disable preemption for the cpu local counters otherwise we could
    still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l)	 	\
-	({ local_t res__;		\
-	   preempt_disable(); 		\
-	   res__ = (l);			\
-	   preempt_enable();		\
-	   res__; })
+#define cpu_local_wrap_v(l)		\
+({					\
+	local_t res__;			\
+	preempt_disable(); 		\
+	res__ = (l);			\
+	preempt_enable();		\
+	res__;				\
+})
 #define cpu_local_wrap(l)		\
-	({ preempt_disable();		\
-	   l;				\
-	   preempt_enable(); })		\
+({					\
+	preempt_disable();		\
+	(l);				\
+	preempt_enable();		\
+})					\
 
-#define cpu_local_read(l)    cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i)  cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l)     cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l)     cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l)  cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l)  cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
+#define cpu_local_read(l)    cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
+#define cpu_local_set(l, i)  cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
+#define cpu_local_inc(l)     cpu_local_wrap(local_inc(&__get_cpu_var((l))))
+#define cpu_local_dec(l)     cpu_local_wrap(local_dec(&__get_cpu_var((l))))
+#define cpu_local_add(i, l)  cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
+#define cpu_local_sub(i, l)  cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
 
-#define __cpu_local_inc(l)	cpu_local_inc(l)
-#define __cpu_local_dec(l)	cpu_local_dec(l)
+#define __cpu_local_inc(l)	cpu_local_inc((l))
+#define __cpu_local_dec(l)	cpu_local_dec((l))
 #define __cpu_local_add(i, l)	cpu_local_add((i), (l))
 #define __cpu_local_sub(i, l)	cpu_local_sub((i), (l))
 
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h
index 6df235e..8327907 100644
--- a/include/asm-x86/mach-bigsmp/mach_apic.h
+++ b/include/asm-x86/mach-bigsmp/mach_apic.h
@@ -1,10 +1,7 @@
 #ifndef __ASM_MACH_APIC_H
 #define __ASM_MACH_APIC_H
 
-
-extern u8 bios_cpu_apicid[];
-
-#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
+#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
 #define esr_disable (1)
 
 static inline int apic_id_registered(void)
@@ -90,7 +87,7 @@
 static inline int cpu_present_to_apicid(int mps_cpu)
 {
 	if (mps_cpu < NR_CPUS)
-		return (int) bios_cpu_apicid[mps_cpu];
+		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
 
 	return BAD_APICID;
 }
@@ -109,17 +106,6 @@
 	return cpu_physical_id(cpu);
 }
 
-static inline int mpc_apic_id(struct mpc_config_processor *m,
-			      struct mpc_config_translation *translation_record)
-{
-	printk("Processor #%d %u:%u APIC version %d\n",
-	       m->mpc_apicid,
-	       (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
-	       (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
-	       m->mpc_apicver);
-	return m->mpc_apicid;
-}
-
 static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
 {
 	/* For clustered we don't have a good way to do this yet - hack */
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
index e3c2c10..0a6634f 100644
--- a/include/asm-x86/mach-default/mach_apic.h
+++ b/include/asm-x86/mach-default/mach_apic.h
@@ -1,6 +1,8 @@
 #ifndef __ASM_MACH_APIC_H
 #define __ASM_MACH_APIC_H
 
+#ifdef CONFIG_X86_LOCAL_APIC
+
 #include <mach_apicdef.h>
 #include <asm/smp.h>
 
@@ -14,24 +16,25 @@
 	return cpumask_of_cpu(0);
 #endif
 } 
-#define TARGET_CPUS (target_cpus())
 
 #define NO_BALANCE_IRQ (0)
 #define esr_disable (0)
 
+#ifdef CONFIG_X86_64
+#include <asm/genapic.h>
+#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
+#define INT_DEST_MODE (genapic->int_dest_mode)
+#define TARGET_CPUS	  (genapic->target_cpus())
+#define apic_id_registered (genapic->apic_id_registered)
+#define init_apic_ldr (genapic->init_apic_ldr)
+#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
+#define phys_pkg_id	(genapic->phys_pkg_id)
+#define vector_allocation_domain    (genapic->vector_allocation_domain)
+extern void setup_apic_routing(void);
+#else
 #define INT_DELIVERY_MODE dest_LowestPrio
 #define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-	return physid_isset(apicid, bitmap);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
-	return physid_isset(bit, phys_cpu_present_map);
-}
-
+#define TARGET_CPUS (target_cpus())
 /*
  * Set up the logical destination ID.
  *
@@ -49,27 +52,55 @@
 	apic_write_around(APIC_LDR, val);
 }
 
+static inline int apic_id_registered(void)
+{
+	return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
+}
+
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+	return cpus_addr(cpumask)[0];
+}
+
+static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
+{
+	return cpuid_apic >> index_msb;
+}
+
+static inline void setup_apic_routing(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+	printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
+					"Flat", nr_ioapics);
+#endif
+}
+
+static inline int apicid_to_node(int logical_apicid)
+{
+	return 0;
+}
+#endif
+
+static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+	return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long check_apicid_present(int bit)
+{
+	return physid_isset(bit, phys_cpu_present_map);
+}
+
 static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
 {
 	return phys_map;
 }
 
-static inline void setup_apic_routing(void)
-{
-	printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
-					"Flat", nr_ioapics);
-}
-
 static inline int multi_timer_check(int apic, int irq)
 {
 	return 0;
 }
 
-static inline int apicid_to_node(int logical_apicid)
-{
-	return 0;
-}
-
 /* Mapping from cpu number to logical apicid */
 static inline int cpu_to_logical_apicid(int cpu)
 {
@@ -78,8 +109,13 @@
 
 static inline int cpu_present_to_apicid(int mps_cpu)
 {
+#ifdef CONFIG_X86_64
+	if (cpu_present(mps_cpu))
+		return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+#else
 	if (mps_cpu < get_physical_broadcast())
 		return  mps_cpu;
+#endif
 	else
 		return BAD_APICID;
 }
@@ -89,17 +125,6 @@
 	return physid_mask_of_physid(phys_apicid);
 }
 
-static inline int mpc_apic_id(struct mpc_config_processor *m,
-			      struct mpc_config_translation *translation_record)
-{
-	printk("Processor #%d %u:%u APIC version %d\n",
-	       m->mpc_apicid,
-	       (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
-	       (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
-	       m->mpc_apicver);
-	return m->mpc_apicid;
-}
-
 static inline void setup_portio_remap(void)
 {
 }
@@ -109,23 +134,9 @@
 	return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
 }
 
-static inline int apic_id_registered(void)
-{
-	return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
-}
-
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
-{
-	return cpus_addr(cpumask)[0];
-}
-
 static inline void enable_apic_mode(void)
 {
 }
 
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-	return cpuid_apic >> index_msb;
-}
-
+#endif /* CONFIG_X86_LOCAL_APIC */
 #endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h
index ae98413..e4b29ba 100644
--- a/include/asm-x86/mach-default/mach_apicdef.h
+++ b/include/asm-x86/mach-default/mach_apicdef.h
@@ -3,10 +3,14 @@
 
 #include <asm/apic.h>
 
+#ifdef CONFIG_X86_64
+#define	APIC_ID_MASK		(0xFFu<<24)
+#define GET_APIC_ID(x)          (((x)>>24)&0xFFu)
+#define	SET_APIC_ID(x)		(((x)<<24))
+#else
 #define		APIC_ID_MASK		(0xF<<24)
-
 static inline unsigned get_apic_id(unsigned long x) 
-{ 
+{
 	unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
 	if (APIC_XAPIC(ver))
 		return (((x)>>24)&0xFF);
@@ -15,5 +19,6 @@
 } 
 
 #define		GET_APIC_ID(x)	get_apic_id(x)
+#endif
 
 #endif
diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h
index 0dba244..be32336 100644
--- a/include/asm-x86/mach-default/mach_ipi.h
+++ b/include/asm-x86/mach-default/mach_ipi.h
@@ -9,10 +9,15 @@
 
 extern int no_broadcast;
 
+#ifdef CONFIG_X86_64
+#include <asm/genapic.h>
+#define send_IPI_mask (genapic->send_IPI_mask)
+#else
 static inline void send_IPI_mask(cpumask_t mask, int vector)
 {
 	send_IPI_mask_bitmask(mask, vector);
 }
+#endif
 
 static inline void __local_send_IPI_allbutself(int vector)
 {
@@ -33,6 +38,10 @@
 		__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
 }
 
+#ifdef CONFIG_X86_64
+#define send_IPI_allbutself (genapic->send_IPI_allbutself)
+#define send_IPI_all (genapic->send_IPI_all)
+#else
 static inline void send_IPI_allbutself(int vector)
 {
 	/*
@@ -50,5 +59,6 @@
 {
 	__local_send_IPI_all(vector);
 }
+#endif
 
 #endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h
index 1d38324..d141085 100644
--- a/include/asm-x86/mach-default/mach_mpparse.h
+++ b/include/asm-x86/mach-default/mach_mpparse.h
@@ -1,17 +1,6 @@
 #ifndef __ASM_MACH_MPPARSE_H
 #define __ASM_MACH_MPPARSE_H
 
-static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 
-				struct mpc_config_translation *translation)
-{
-//	Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
-}
-
-static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 
-				struct mpc_config_translation *translation)
-{
-}
-
 static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 
 		char *productid)
 {
diff --git a/include/asm-x86/mach-default/mach_reboot.h b/include/asm-x86/mach-default/mach_reboot.h
deleted file mode 100644
index 6adee6a..0000000
--- a/include/asm-x86/mach-default/mach_reboot.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- *  arch/i386/mach-generic/mach_reboot.h
- *
- *  Machine specific reboot functions for generic.
- *  Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-#ifndef _MACH_REBOOT_H
-#define _MACH_REBOOT_H
-
-static inline void kb_wait(void)
-{
-	int i;
-
-	for (i = 0; i < 0x10000; i++)
-		if ((inb_p(0x64) & 0x02) == 0)
-			break;
-}
-
-static inline void mach_reboot(void)
-{
-	int i;
-
-	/* old method, works on most machines */
-	for (i = 0; i < 10; i++) {
-		kb_wait();
-		udelay(50);
-		outb(0xfe, 0x64);	/* pulse reset low */
-		udelay(50);
-	}
-
-	/* New method: sets the "System flag" which, when set, indicates
-	 * successful completion of the keyboard controller self-test (Basic
-	 * Assurance Test, BAT).  This is needed for some machines with no
-	 * keyboard plugged in.  This read-modify-write sequence sets only the
-	 * system flag
-	 */
-	for (i = 0; i < 10; i++) {
-		int cmd;
-
-		outb(0x20, 0x64);	/* read Controller Command Byte */
-		udelay(50);
-		kb_wait();
-		udelay(50);
-		cmd = inb(0x60);
-		udelay(50);
-		kb_wait();
-		udelay(50);
-		outb(0x60, 0x64);	/* write Controller Command Byte */
-		udelay(50);
-		kb_wait();
-		udelay(50);
-		outb(cmd | 0x14, 0x60); /* set "System flag" and "Keyboard Disabled" */
-		udelay(50);
-		kb_wait();
-		udelay(50);
-		outb(0xfe, 0x64);	/* pulse reset low */
-		udelay(50);
-	}
-}
-
-#endif /* !_MACH_REBOOT_H */
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 7f45f63..3ff2c5b 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -41,4 +41,11 @@
 	 */
 	if (!skip_ioapic_setup && nr_ioapics)
 		setup_IO_APIC();
+	else
+		nr_ioapics = 0;
+}
+
+static inline void smpboot_clear_io_apic(void)
+{
+	nr_ioapics = 0;
 }
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h
index d23011f..fbc8ad2 100644
--- a/include/asm-x86/mach-es7000/mach_apic.h
+++ b/include/asm-x86/mach-es7000/mach_apic.h
@@ -1,9 +1,7 @@
 #ifndef __ASM_MACH_APIC_H
 #define __ASM_MACH_APIC_H
 
-extern u8 bios_cpu_apicid[];
-
-#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
+#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
 #define esr_disable (1)
 
 static inline int apic_id_registered(void)
@@ -80,7 +78,7 @@
 extern int apic_version [MAX_APICS];
 static inline void setup_apic_routing(void)
 {
-	int apic = bios_cpu_apicid[smp_processor_id()];
+	int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
 	printk("Enabling APIC mode:  %s.  Using %d I/O APICs, target cpus %lx\n",
 		(apic_version[apic] == 0x14) ? 
 		"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
@@ -102,7 +100,7 @@
 	if (!mps_cpu)
 		return boot_cpu_physical_apicid;
 	else if (mps_cpu < NR_CPUS)
-		return (int) bios_cpu_apicid[mps_cpu];
+		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
 	else
 		return BAD_APICID;
 }
@@ -129,16 +127,6 @@
 #endif
 }
 
-static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused)
-{
-	printk("Processor #%d %u:%u APIC version %d\n",
-	       m->mpc_apicid,
-	       (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
-	       (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
-	       m->mpc_apicver);
-	return (m->mpc_apicid);
-}
-
 static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
 {
 	/* For clustered we don't have a good way to do this yet - hack */
@@ -153,7 +141,7 @@
 extern unsigned int boot_cpu_physical_apicid;
 static inline int check_phys_apicid_present(int cpu_physical_apicid)
 {
-	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+	boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
 	return (1);
 }
 
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h
index 52ee75c..ef26d35 100644
--- a/include/asm-x86/mach-es7000/mach_mpparse.h
+++ b/include/asm-x86/mach-es7000/mach_mpparse.h
@@ -3,17 +3,6 @@
 
 #include <linux/acpi.h>
 
-static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
-				struct mpc_config_translation *translation)
-{
-	Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
-}
-
-static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
-				struct mpc_config_translation *translation)
-{
-}
-
 extern int parse_unisys_oem (char *oemptr);
 extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
 extern void setup_unisys(void);
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h
index a236e70..6eff343 100644
--- a/include/asm-x86/mach-generic/mach_apic.h
+++ b/include/asm-x86/mach-generic/mach_apic.h
@@ -19,7 +19,6 @@
 #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) 
 #define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
 #define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
-#define mpc_apic_id (genapic->mpc_apic_id) 
 #define setup_portio_remap (genapic->setup_portio_remap)
 #define check_apicid_present (genapic->check_apicid_present)
 #define check_phys_apicid_present (genapic->check_phys_apicid_present)
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h
index dbd9fce..0d0b5ba 100644
--- a/include/asm-x86/mach-generic/mach_mpparse.h
+++ b/include/asm-x86/mach-generic/mach_mpparse.h
@@ -1,11 +1,6 @@
 #ifndef _MACH_MPPARSE_H
 #define _MACH_MPPARSE_H 1
 
-#include <asm/genapic.h>
-
-#define mpc_oem_bus_info (genapic->mpc_oem_bus_info)
-#define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus)
-
 int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); 
 int acpi_madt_oem_check(char *oem_id, char *oem_table_id); 
 
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h
index 3b637fa..75a56e5 100644
--- a/include/asm-x86/mach-numaq/mach_apic.h
+++ b/include/asm-x86/mach-numaq/mach_apic.h
@@ -95,6 +95,16 @@
 	return physid_mask_of_physid(cpu + 4*node);
 }
 
+struct mpc_config_translation {
+	unsigned char mpc_type;
+	unsigned char trans_len;
+	unsigned char trans_type;
+	unsigned char trans_quad;
+	unsigned char trans_global;
+	unsigned char trans_local;
+	unsigned short trans_reserved;
+};
+
 static inline int mpc_apic_id(struct mpc_config_processor *m, 
 			struct mpc_config_translation *translation_record)
 {
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h
index 51bbac8..459b124 100644
--- a/include/asm-x86/mach-numaq/mach_mpparse.h
+++ b/include/asm-x86/mach-numaq/mach_mpparse.h
@@ -1,25 +1,10 @@
 #ifndef __ASM_MACH_MPPARSE_H
 #define __ASM_MACH_MPPARSE_H
 
-static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 
-				struct mpc_config_translation *translation)
-{
-	int quad = translation->trans_quad;
-	int local = translation->trans_local;
-
-	mp_bus_id_to_node[m->mpc_busid] = quad;
-	mp_bus_id_to_local[m->mpc_busid] = local;
-	printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad);
-}
-
-static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 
-				struct mpc_config_translation *translation)
-{
-	int quad = translation->trans_quad;
-	int local = translation->trans_local;
-
-	quad_local_to_mp_bus_id[quad][local] = m->mpc_busid;
-}
+extern void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
+			     struct mpc_config_translation *translation);
+extern void mpc_oem_pci_bus(struct mpc_config_bus *m,
+	struct mpc_config_translation *translation);
 
 /* Hook from generic ACPI tables.c */
 static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h
index 062c97f..1f76c2e 100644
--- a/include/asm-x86/mach-summit/mach_apic.h
+++ b/include/asm-x86/mach-summit/mach_apic.h
@@ -40,7 +40,6 @@
 
 #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
 
-extern u8 bios_cpu_apicid[];
 extern u8 cpu_2_logical_apicid[];
 
 static inline void init_apic_ldr(void)
@@ -110,7 +109,7 @@
 static inline int cpu_present_to_apicid(int mps_cpu)
 {
 	if (mps_cpu < NR_CPUS)
-		return (int)bios_cpu_apicid[mps_cpu];
+		return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
 	else
 		return BAD_APICID;
 }
@@ -126,17 +125,6 @@
 	return physid_mask_of_physid(0);
 }
 
-static inline int mpc_apic_id(struct mpc_config_processor *m,
-			      struct mpc_config_translation *translation_record)
-{
-	printk("Processor #%d %u:%u APIC version %d\n",
-	       m->mpc_apicid,
-	       (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
-	       (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
-	       m->mpc_apicver);
-	return m->mpc_apicid;
-}
-
 static inline void setup_portio_remap(void)
 {
 }
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h
index c252053..fdf5917 100644
--- a/include/asm-x86/mach-summit/mach_mpparse.h
+++ b/include/asm-x86/mach-summit/mach_mpparse.h
@@ -12,17 +12,6 @@
 #define setup_summit()	{}
 #endif
 
-static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 
-				struct mpc_config_translation *translation)
-{
-	Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
-}
-
-static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 
-				struct mpc_config_translation *translation)
-{
-}
-
 static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 
 		char *productid)
 {
diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h
index efac6f0..a9ef33a 100644
--- a/include/asm-x86/mach-visws/mach_apic.h
+++ b/include/asm-x86/mach-visws/mach_apic.h
@@ -23,7 +23,7 @@
 
 static inline int apic_id_registered(void)
 {
-	return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
+	return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
 }
 
 /*
diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h
index d926471..c9b83e3 100644
--- a/include/asm-x86/mach-visws/smpboot_hooks.h
+++ b/include/asm-x86/mach-visws/smpboot_hooks.h
@@ -22,3 +22,7 @@
 static inline void smpboot_setup_io_apic(void)
 {
 }
+
+static inline void smpboot_clear_io_apic(void)
+{
+}
diff --git a/include/asm-x86/mach_apic.h b/include/asm-x86/mach_apic.h
deleted file mode 100644
index 7b7115a..0000000
--- a/include/asm-x86/mach_apic.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_APIC_H
-#define __ASM_MACH_APIC_H
-
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC sub-arch defines.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-
-#include <asm/genapic.h>
-
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#define TARGET_CPUS	  (genapic->target_cpus())
-#define vector_allocation_domain	(genapic->vector_allocation_domain)
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define phys_pkg_id	(genapic->phys_pkg_id)
-
-#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h
index cdd9f96..daf1ccd 100644
--- a/include/asm-x86/mc146818rtc.h
+++ b/include/asm-x86/mc146818rtc.h
@@ -42,7 +42,7 @@
 static inline void lock_cmos(unsigned char reg)
 {
 	unsigned long new;
-	new = ((smp_processor_id()+1) << 8) | reg;
+	new = ((smp_processor_id() + 1) << 8) | reg;
 	for (;;) {
 		if (cmos_lock) {
 			cpu_relax();
@@ -57,22 +57,26 @@
 {
 	cmos_lock = 0;
 }
+
 static inline int do_i_have_lock_cmos(void)
 {
-	return (cmos_lock >> 8) == (smp_processor_id()+1);
+	return (cmos_lock >> 8) == (smp_processor_id() + 1);
 }
+
 static inline unsigned char current_lock_cmos_reg(void)
 {
 	return cmos_lock & 0xff;
 }
-#define lock_cmos_prefix(reg) \
+
+#define lock_cmos_prefix(reg)			\
 	do {					\
 		unsigned long cmos_flags;	\
 		local_irq_save(cmos_flags);	\
 		lock_cmos(reg)
-#define lock_cmos_suffix(reg) \
-		unlock_cmos();			\
-		local_irq_restore(cmos_flags);	\
+
+#define lock_cmos_suffix(reg)			\
+	unlock_cmos();				\
+	local_irq_restore(cmos_flags);		\
 	} while (0)
 #else
 #define lock_cmos_prefix(reg) do {} while (0)
diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h
index fbb1f3b..c3dca6e 100644
--- a/include/asm-x86/mca_dma.h
+++ b/include/asm-x86/mca_dma.h
@@ -12,18 +12,18 @@
  *   count by 2 when using 16-bit dma; that is not handled by these functions.
  *
  * Ramen Noodles are yummy.
- * 
- *  1998 Tymm Twillman <tymm@computer.org>  
+ *
+ *  1998 Tymm Twillman <tymm@computer.org>
  */
 
 /*
- * Registers that are used by the DMA controller; FN is the function register 
+ * Registers that are used by the DMA controller; FN is the function register
  *   (tell the controller what to do) and EXE is the execution register (how
  *   to do it)
  */
 
 #define MCA_DMA_REG_FN  0x18
-#define MCA_DMA_REG_EXE 0x1A 
+#define MCA_DMA_REG_EXE 0x1A
 
 /*
  * Functions that the DMA controller can do
@@ -43,9 +43,9 @@
 
 /*
  * Modes (used by setting MCA_DMA_FN_MODE in the function register)
- * 
+ *
  * Note that the MODE_READ is read from memory (write to device), and
- *   MODE_WRITE is vice-versa.  
+ *   MODE_WRITE is vice-versa.
  */
 
 #define MCA_DMA_MODE_XFER  0x04  /* read by default */
@@ -63,7 +63,7 @@
  *	IRQ context.
  */
 
-static __inline__ void mca_enable_dma(unsigned int dmanr)
+static inline void mca_enable_dma(unsigned int dmanr)
 {
 	outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN);
 }
@@ -76,7 +76,7 @@
  *	IRQ context.
  */
 
-static __inline__ void mca_disable_dma(unsigned int dmanr)
+static inline void mca_disable_dma(unsigned int dmanr)
 {
 	outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN);
 }
@@ -87,10 +87,10 @@
  *	@a: 24bit bus address
  *
  *	Load the address register in the DMA controller. This has a 24bit
- *	limitation (16Mb). 
+ *	limitation (16Mb).
  */
 
-static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
+static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
 {
 	outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN);
 	outb(a & 0xff, MCA_DMA_REG_EXE);
@@ -106,14 +106,14 @@
  *	limitation (16Mb). The return is a bus address.
  */
 
-static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr)
+static inline unsigned int mca_get_dma_addr(unsigned int dmanr)
 {
 	unsigned int addr;
 
 	outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN);
 	addr = inb(MCA_DMA_REG_EXE);
 	addr |= inb(MCA_DMA_REG_EXE) << 8;
-	addr |= inb(MCA_DMA_REG_EXE) << 16;  
+	addr |= inb(MCA_DMA_REG_EXE) << 16;
 
 	return addr;
 }
@@ -127,7 +127,7 @@
  *	Setting a count of zero will not do what you expect.
  */
 
-static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count)
+static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count)
 {
 	count--;  /* transfers one more than count -- correct for this */
 
@@ -144,7 +144,7 @@
  *	on this DMA channel.
  */
 
-static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr)
+static inline unsigned int mca_get_dma_residue(unsigned int dmanr)
 {
 	unsigned short count;
 
@@ -164,12 +164,12 @@
  *	with an I/O port target.
  */
 
-static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
+static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
 {
 	/*
 	 * DMA from a port address -- set the io address
 	 */
-	
+
 	outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN);
 	outb(io_addr & 0xff, MCA_DMA_REG_EXE);
 	outb((io_addr >>  8) & 0xff, MCA_DMA_REG_EXE);
@@ -192,7 +192,7 @@
  *	%MCA_DMA_MODE_16 to do 16bit transfers.
  */
 
-static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
+static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
 {
 	outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN);
 	outb(mode, MCA_DMA_REG_EXE);
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index efa962c..00e8867 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -10,10 +10,10 @@
  *
  * cpu_vm_mask is used to optimize ldt flushing.
  */
-typedef struct { 
+typedef struct {
 	void *ldt;
 #ifdef CONFIG_X86_64
-	rwlock_t ldtlock; 
+	rwlock_t ldtlock;
 #endif
 	int size;
 	struct mutex lock;
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
index 8198d1c..9756ae0 100644
--- a/include/asm-x86/mmu_context_32.h
+++ b/include/asm-x86/mmu_context_32.h
@@ -62,7 +62,7 @@
 		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
 
 		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-			/* We were in lazy tlb mode and leave_mm disabled 
+			/* We were in lazy tlb mode and leave_mm disabled
 			 * tlb flush IPI delivery. We must reload %cr3.
 			 */
 			load_cr3(next->pgd);
@@ -75,10 +75,10 @@
 #define deactivate_mm(tsk, mm)			\
 	asm("movl %0,%%gs": :"r" (0));
 
-#define activate_mm(prev, next)				\
-	do {						\
-		paravirt_activate_mm(prev, next);	\
-		switch_mm((prev),(next),NULL);		\
-	} while(0);
+#define activate_mm(prev, next)			\
+do {						\
+	paravirt_activate_mm((prev), (next));	\
+	switch_mm((prev), (next), NULL);	\
+} while (0);
 
 #endif
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h
index ad6dc82..ca44c71 100644
--- a/include/asm-x86/mmu_context_64.h
+++ b/include/asm-x86/mmu_context_64.h
@@ -20,12 +20,12 @@
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 #ifdef CONFIG_SMP
-	if (read_pda(mmu_state) == TLBSTATE_OK) 
+	if (read_pda(mmu_state) == TLBSTATE_OK)
 		write_pda(mmu_state, TLBSTATE_LAZY);
 #endif
 }
 
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 			     struct task_struct *tsk)
 {
 	unsigned cpu = smp_processor_id();
@@ -39,7 +39,7 @@
 		cpu_set(cpu, next->cpu_vm_mask);
 		load_cr3(next->pgd);
 
-		if (unlikely(next->context.ldt != prev->context.ldt)) 
+		if (unlikely(next->context.ldt != prev->context.ldt))
 			load_LDT_nolock(&next->context);
 	}
 #ifdef CONFIG_SMP
@@ -48,7 +48,7 @@
 		if (read_pda(active_mm) != next)
 			BUG();
 		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-			/* We were in lazy tlb mode and leave_mm disabled 
+			/* We were in lazy tlb mode and leave_mm disabled
 			 * tlb flush IPI delivery. We must reload CR3
 			 * to make sure to use no freed page tables.
 			 */
@@ -59,13 +59,14 @@
 #endif
 }
 
-#define deactivate_mm(tsk,mm)	do { \
-	load_gs_index(0); \
-	asm volatile("movl %0,%%fs"::"r"(0));  \
-} while(0)
+#define deactivate_mm(tsk, mm)			\
+do {						\
+	load_gs_index(0);			\
+	asm volatile("movl %0,%%fs"::"r"(0));	\
+} while (0)
 
-#define activate_mm(prev, next) \
-	switch_mm((prev),(next),NULL)
+#define activate_mm(prev, next)			\
+	switch_mm((prev), (next), NULL)
 
 
 #endif
diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h
index 46b71da..9408812 100644
--- a/include/asm-x86/mmx.h
+++ b/include/asm-x86/mmx.h
@@ -6,7 +6,7 @@
  */
 
 #include <linux/types.h>
- 
+
 extern void *_mmx_memcpy(void *to, const void *from, size_t size);
 extern void mmx_clear_page(void *page);
 extern void mmx_copy_page(void *to, void *from);
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index 274a595..cb2cad0 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -18,7 +18,7 @@
 	#include <asm/srat.h>
 #endif
 
-extern int get_memcfg_numa_flat(void );
+extern int get_memcfg_numa_flat(void);
 /*
  * This allows any one NUMA architecture to be compiled
  * for, and still fall back to the flat function if it
@@ -129,7 +129,7 @@
 	struct pglist_data  __maybe_unused			\
 				*__alloc_bootmem_node__pgdat = (pgdat);	\
 	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE,		\
-						__pa(MAX_DMA_ADDRESS))	\
+						__pa(MAX_DMA_ADDRESS));	\
 })
 #define alloc_bootmem_low_pages_node(pgdat, x)				\
 ({									\
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h
index ebaf966..594bd0d 100644
--- a/include/asm-x86/mmzone_64.h
+++ b/include/asm-x86/mmzone_64.h
@@ -7,7 +7,7 @@
 
 #ifdef CONFIG_NUMA
 
-#define VIRTUAL_BUG_ON(x) 
+#define VIRTUAL_BUG_ON(x)
 
 #include <asm/smp.h>
 
@@ -16,7 +16,7 @@
 	int shift;
 	unsigned int mapsize;
 	s16 *map;
-	s16 embedded_map[64-8];
+	s16 embedded_map[64 - 8];
 } ____cacheline_aligned; /* total size = 128 bytes */
 extern struct memnode memnode;
 #define memnode_shift memnode.shift
@@ -25,27 +25,27 @@
 
 extern struct pglist_data *node_data[];
 
-static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) 
-{ 
-	unsigned nid; 
+static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
+{
+	unsigned nid;
 	VIRTUAL_BUG_ON(!memnodemap);
 	VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize);
-	nid = memnodemap[addr >> memnode_shift]; 
-	VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); 
-	return nid; 
-} 
+	nid = memnodemap[addr >> memnode_shift];
+	VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
+	return nid;
+}
 
 #define NODE_DATA(nid)		(node_data[nid])
 
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)       (NODE_DATA(nid)->node_start_pfn + \
+#define node_end_pfn(nid)       (NODE_DATA(nid)->node_start_pfn +	\
 				 NODE_DATA(nid)->node_spanned_pages)
 
 extern int early_pfn_to_nid(unsigned long pfn);
 
 #ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE	(64*1024*1024)
-#define FAKE_NODE_MIN_HASH_MASK	(~(FAKE_NODE_MIN_SIZE - 1uL))
+#define FAKE_NODE_MIN_SIZE	(64 * 1024 * 1024)
+#define FAKE_NODE_MIN_HASH_MASK	(~(FAKE_NODE_MIN_SIZE - 1UL))
 #endif
 
 #endif
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index 781ad74..57a991b 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -1,16 +1,13 @@
 #ifndef _AM_X86_MPSPEC_H
 #define _AM_X86_MPSPEC_H
 
+#include <linux/init.h>
+
 #include <asm/mpspec_def.h>
 
 #ifdef CONFIG_X86_32
 #include <mach_mpspec.h>
 
-extern int mp_bus_id_to_type[MAX_MP_BUSSES];
-extern int mp_bus_id_to_node[MAX_MP_BUSSES];
-extern int mp_bus_id_to_local[MAX_MP_BUSSES];
-extern int quad_local_to_mp_bus_id[NR_CPUS/4][4];
-
 extern unsigned int def_to_bigsmp;
 extern int apic_version[MAX_APICS];
 extern u8 apicid_2_node[];
@@ -24,27 +21,30 @@
 /* Each PCI slot may be a combo card with its own bus.  4 IRQ pins per slot. */
 #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
 
-extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
+extern void early_find_smp_config(void);
+extern void early_get_smp_config(void);
 
 #endif
 
+#if defined(CONFIG_MCA) || defined(CONFIG_EISA)
+extern int mp_bus_id_to_type[MAX_MP_BUSSES];
+#endif
+
+extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
+
 extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
 
 extern unsigned int boot_cpu_physical_apicid;
 extern int smp_found_config;
-extern int nr_ioapics;
-extern int mp_irq_entries;
-extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
 extern int mpc_default_type;
 extern unsigned long mp_lapic_addr;
 
 extern void find_smp_config(void);
 extern void get_smp_config(void);
 
+void __cpuinit generic_processor_info(int apicid, int version);
 #ifdef CONFIG_ACPI
-extern void mp_register_lapic(u8 id, u8 enabled);
-extern void mp_register_lapic_address(u64 address);
-extern void mp_register_ioapic(u8 id, u32 address, u32 gsi_base);
+extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
 extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
 				   u32 gsi);
 extern void mp_config_acpi_legacy_irqs(void);
@@ -53,8 +53,7 @@
 
 #define PHYSID_ARRAY_SIZE	BITS_TO_LONGS(MAX_APICS)
 
-struct physid_mask
-{
+struct physid_mask {
 	unsigned long mask[PHYSID_ARRAY_SIZE];
 };
 
@@ -63,34 +62,34 @@
 #define physid_set(physid, map)			set_bit(physid, (map).mask)
 #define physid_clear(physid, map)		clear_bit(physid, (map).mask)
 #define physid_isset(physid, map)		test_bit(physid, (map).mask)
-#define physid_test_and_set(physid, map) \
+#define physid_test_and_set(physid, map)			\
 	test_and_set_bit(physid, (map).mask)
 
-#define physids_and(dst, src1, src2) \
+#define physids_and(dst, src1, src2)					\
 	bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
 
-#define physids_or(dst, src1, src2) \
+#define physids_or(dst, src1, src2)					\
 	bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
 
-#define physids_clear(map) \
+#define physids_clear(map)					\
 	bitmap_zero((map).mask, MAX_APICS)
 
-#define physids_complement(dst, src) \
+#define physids_complement(dst, src)				\
 	bitmap_complement((dst).mask, (src).mask, MAX_APICS)
 
-#define physids_empty(map) \
+#define physids_empty(map)					\
 	bitmap_empty((map).mask, MAX_APICS)
 
-#define physids_equal(map1, map2) \
+#define physids_equal(map1, map2)				\
 	bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
 
-#define physids_weight(map) \
+#define physids_weight(map)					\
 	bitmap_weight((map).mask, MAX_APICS)
 
-#define physids_shift_right(d, s, n) \
+#define physids_shift_right(d, s, n)				\
 	bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
 
-#define physids_shift_left(d, s, n) \
+#define physids_shift_left(d, s, n)				\
 	bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
 
 #define physids_coerce(map)			((map).mask[0])
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h
index 3504617..dc6ef85 100644
--- a/include/asm-x86/mpspec_def.h
+++ b/include/asm-x86/mpspec_def.h
@@ -11,7 +11,7 @@
  * information is.
  */
 
-#define SMP_MAGIC_IDENT	(('_'<<24)|('P'<<16)|('M'<<8)|'_')
+#define SMP_MAGIC_IDENT	(('_'<<24) | ('P'<<16) | ('M'<<8) | '_')
 
 #ifdef CONFIG_X86_32
 # define MAX_MPC_ENTRY 1024
@@ -23,8 +23,7 @@
 # define MAX_APICS 255
 #endif
 
-struct intel_mp_floating
-{
+struct intel_mp_floating {
 	char mpf_signature[4];		/* "_MP_"			*/
 	unsigned int mpf_physptr;	/* Configuration table address	*/
 	unsigned char mpf_length;	/* Our length (paragraphs)	*/
@@ -39,14 +38,13 @@
 
 #define MPC_SIGNATURE "PCMP"
 
-struct mp_config_table
-{
+struct mp_config_table {
 	char mpc_signature[4];
 	unsigned short mpc_length;	/* Size of table */
-	char  mpc_spec;			/* 0x01 */
-	char  mpc_checksum;
-	char  mpc_oem[8];
-	char  mpc_productid[12];
+	char mpc_spec;			/* 0x01 */
+	char mpc_checksum;
+	char mpc_oem[8];
+	char mpc_productid[12];
 	unsigned int mpc_oemptr;	/* 0 if not present */
 	unsigned short mpc_oemsize;	/* 0 if not present */
 	unsigned short mpc_oemcount;
@@ -71,8 +69,7 @@
 #define CPU_MODEL_MASK		0x00F0
 #define CPU_FAMILY_MASK		0x0F00
 
-struct mpc_config_processor
-{
+struct mpc_config_processor {
 	unsigned char mpc_type;
 	unsigned char mpc_apicid;	/* Local APIC number */
 	unsigned char mpc_apicver;	/* Its versions */
@@ -82,8 +79,7 @@
 	unsigned int mpc_reserved[2];
 };
 
-struct mpc_config_bus
-{
+struct mpc_config_bus {
 	unsigned char mpc_type;
 	unsigned char mpc_busid;
 	unsigned char mpc_bustype[6];
@@ -111,8 +107,7 @@
 
 #define MPC_APIC_USABLE		0x01
 
-struct mpc_config_ioapic
-{
+struct mpc_config_ioapic {
 	unsigned char mpc_type;
 	unsigned char mpc_apicid;
 	unsigned char mpc_apicver;
@@ -120,8 +115,7 @@
 	unsigned int mpc_apicaddr;
 };
 
-struct mpc_config_intsrc
-{
+struct mpc_config_intsrc {
 	unsigned char mpc_type;
 	unsigned char mpc_irqtype;
 	unsigned short mpc_irqflag;
@@ -144,8 +138,7 @@
 
 #define MP_APIC_ALL	0xFF
 
-struct mpc_config_lintsrc
-{
+struct mpc_config_lintsrc {
 	unsigned char mpc_type;
 	unsigned char mpc_irqtype;
 	unsigned short mpc_irqflag;
@@ -157,8 +150,7 @@
 
 #define MPC_OEM_SIGNATURE "_OEM"
 
-struct mp_config_oemtable
-{
+struct mp_config_oemtable {
 	char oem_signature[4];
 	unsigned short oem_length;	/* Size of table */
 	char  oem_rev;			/* 0x01 */
@@ -166,17 +158,6 @@
 	char  mpc_oem[8];
 };
 
-struct mpc_config_translation
-{
-	unsigned char mpc_type;
-	unsigned char trans_len;
-	unsigned char trans_type;
-	unsigned char trans_quad;
-	unsigned char trans_global;
-	unsigned char trans_local;
-	unsigned short trans_reserved;
-};
-
 /*
  *	Default configurations
  *
@@ -196,4 +177,3 @@
 	MP_BUS_MCA,
 };
 #endif
-
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h
index 5b8acdd..296f29c 100644
--- a/include/asm-x86/msidef.h
+++ b/include/asm-x86/msidef.h
@@ -11,7 +11,8 @@
 
 #define MSI_DATA_VECTOR_SHIFT		0
 #define  MSI_DATA_VECTOR_MASK		0x000000ff
-#define	 MSI_DATA_VECTOR(v)		(((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK)
+#define	 MSI_DATA_VECTOR(v)		(((v) << MSI_DATA_VECTOR_SHIFT) & \
+					 MSI_DATA_VECTOR_MASK)
 
 #define MSI_DATA_DELIVERY_MODE_SHIFT	8
 #define  MSI_DATA_DELIVERY_FIXED	(0 << MSI_DATA_DELIVERY_MODE_SHIFT)
@@ -37,11 +38,14 @@
 #define	 MSI_ADDR_DEST_MODE_LOGICAL	(1 << MSI_ADDR_DEST_MODE_SHIFT)
 
 #define MSI_ADDR_REDIRECTION_SHIFT	3
-#define  MSI_ADDR_REDIRECTION_CPU	(0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */
-#define  MSI_ADDR_REDIRECTION_LOWPRI	(1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */
+#define  MSI_ADDR_REDIRECTION_CPU	(0 << MSI_ADDR_REDIRECTION_SHIFT)
+					/* dedicated cpu */
+#define  MSI_ADDR_REDIRECTION_LOWPRI	(1 << MSI_ADDR_REDIRECTION_SHIFT)
+					/* lowest priority */
 
 #define MSI_ADDR_DEST_ID_SHIFT		12
 #define	 MSI_ADDR_DEST_ID_MASK		0x00ffff0
-#define  MSI_ADDR_DEST_ID(dest)		(((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK)
+#define  MSI_ADDR_DEST_ID(dest)		(((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
+					 MSI_ADDR_DEST_ID_MASK)
 
 #endif /* ASM_MSIDEF_H */
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index fae118a..09413ad 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -57,6 +57,8 @@
 #define MSR_MTRRfix4K_F8000		0x0000026f
 #define MSR_MTRRdefType			0x000002ff
 
+#define MSR_IA32_CR_PAT			0x00000277
+
 #define MSR_IA32_DEBUGCTLMSR		0x000001d9
 #define MSR_IA32_LASTBRANCHFROMIP	0x000001db
 #define MSR_IA32_LASTBRANCHTOIP		0x000001dc
@@ -83,6 +85,7 @@
 /* AMD64 MSRs. Not complete. See the architecture manual for a more
    complete list. */
 
+#define MSR_AMD64_NB_CFG		0xc001001f
 #define MSR_AMD64_IBSFETCHCTL		0xc0011030
 #define MSR_AMD64_IBSFETCHLINAD		0xc0011031
 #define MSR_AMD64_IBSFETCHPHYSAD	0xc0011032
@@ -109,6 +112,7 @@
 #define MSR_K8_SYSCFG			0xc0010010
 #define MSR_K8_HWCR			0xc0010015
 #define MSR_K8_ENABLE_C1E		0xc0010055
+#define MSR_K8_TSEG_ADDR		0xc0010112
 #define K8_MTRRFIXRANGE_DRAM_ENABLE	0x00040000 /* MtrrFixDramEn bit    */
 #define K8_MTRRFIXRANGE_DRAM_MODIFY	0x00080000 /* MtrrFixDramModEn bit */
 #define K8_MTRR_RDMEM_WRMEM_MASK	0x18181818 /* Mask: RdMem|WrMem    */
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 3ca29eb..3707650 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -16,8 +16,8 @@
 static inline unsigned long long native_read_tscp(unsigned int *aux)
 {
 	unsigned long low, high;
-	asm volatile (".byte 0x0f,0x01,0xf9"
-		      : "=a" (low), "=d" (high), "=c" (*aux));
+	asm volatile(".byte 0x0f,0x01,0xf9"
+		     : "=a" (low), "=d" (high), "=c" (*aux));
 	return low | ((u64)high >> 32);
 }
 
@@ -29,7 +29,7 @@
  */
 #ifdef CONFIG_X86_64
 #define DECLARE_ARGS(val, low, high)	unsigned low, high
-#define EAX_EDX_VAL(val, low, high)	(low | ((u64)(high) << 32))
+#define EAX_EDX_VAL(val, low, high)	((low) | ((u64)(high) << 32))
 #define EAX_EDX_ARGS(val, low, high)	"a" (low), "d" (high)
 #define EAX_EDX_RET(val, low, high)	"=a" (low), "=d" (high)
 #else
@@ -57,7 +57,7 @@
 		     ".section .fixup,\"ax\"\n\t"
 		     "3:  mov %3,%0 ; jmp 1b\n\t"
 		     ".previous\n\t"
-		     _ASM_EXTABLE(2b,3b)
+		     _ASM_EXTABLE(2b, 3b)
 		     : "=r" (*err), EAX_EDX_RET(val, low, high)
 		     : "c" (msr), "i" (-EFAULT));
 	return EAX_EDX_VAL(val, low, high);
@@ -78,10 +78,10 @@
 		     ".section .fixup,\"ax\"\n\t"
 		     "3:  mov %4,%0 ; jmp 1b\n\t"
 		     ".previous\n\t"
-		     _ASM_EXTABLE(2b,3b)
+		     _ASM_EXTABLE(2b, 3b)
 		     : "=a" (err)
 		     : "c" (msr), "0" (low), "d" (high),
-		       "i" (-EFAULT));
+		     "i" (-EFAULT));
 	return err;
 }
 
@@ -116,23 +116,23 @@
  * pointer indirection), this allows gcc to optimize better
  */
 
-#define rdmsr(msr,val1,val2)						\
-	do {								\
-		u64 __val = native_read_msr(msr);			\
-		(val1) = (u32)__val;					\
-		(val2) = (u32)(__val >> 32);				\
-	} while(0)
+#define rdmsr(msr, val1, val2)					\
+do {								\
+	u64 __val = native_read_msr((msr));			\
+	(val1) = (u32)__val;					\
+	(val2) = (u32)(__val >> 32);				\
+} while (0)
 
 static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
 {
 	native_write_msr(msr, low, high);
 }
 
-#define rdmsrl(msr,val)							\
-	((val) = native_read_msr(msr))
+#define rdmsrl(msr, val)			\
+	((val) = native_read_msr((msr)))
 
 #define wrmsrl(msr, val)						\
-	native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32))
+	native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
 
 /* wrmsr with exception handling */
 static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
@@ -141,14 +141,22 @@
 }
 
 /* rdmsr with exception handling */
-#define rdmsr_safe(msr,p1,p2)						\
-	({								\
-		int __err;						\
-		u64 __val = native_read_msr_safe(msr, &__err);		\
-		(*p1) = (u32)__val;					\
-		(*p2) = (u32)(__val >> 32);				\
-		__err;							\
-	})
+#define rdmsr_safe(msr, p1, p2)					\
+({								\
+	int __err;						\
+	u64 __val = native_read_msr_safe((msr), &__err);	\
+	(*p1) = (u32)__val;					\
+	(*p2) = (u32)(__val >> 32);				\
+	__err;							\
+})
+
+static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+{
+	int err;
+
+	*p = native_read_msr_safe(msr, &err);
+	return err;
+}
 
 #define rdtscl(low)						\
 	((low) = (u32)native_read_tsc())
@@ -156,35 +164,37 @@
 #define rdtscll(val)						\
 	((val) = native_read_tsc())
 
-#define rdpmc(counter,low,high)					\
-	do {							\
-		u64 _l = native_read_pmc(counter);		\
-		(low)  = (u32)_l;				\
-		(high) = (u32)(_l >> 32);			\
-	} while(0)
+#define rdpmc(counter, low, high)			\
+do {							\
+	u64 _l = native_read_pmc((counter));		\
+	(low)  = (u32)_l;				\
+	(high) = (u32)(_l >> 32);			\
+} while (0)
 
-#define rdtscp(low, high, aux)						\
-       do {                                                            \
-		unsigned long long _val = native_read_tscp(&(aux));     \
-		(low) = (u32)_val;                                      \
-		(high) = (u32)(_val >> 32);                             \
-       } while (0)
+#define rdtscp(low, high, aux)					\
+do {                                                            \
+	unsigned long long _val = native_read_tscp(&(aux));     \
+	(low) = (u32)_val;                                      \
+	(high) = (u32)(_val >> 32);                             \
+} while (0)
 
 #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
 
 #endif	/* !CONFIG_PARAVIRT */
 
 
-#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
+#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val),		\
+					     (u32)((val) >> 32))
 
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+#define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2))
 
-#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
+#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
 
 #ifdef CONFIG_SMP
 void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
 void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+
 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
 #else  /*  CONFIG_SMP  */
 static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
@@ -195,7 +205,8 @@
 {
 	wrmsr(msr_no, l, h);
 }
-static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
+				    u32 *l, u32 *h)
 {
 	return rdmsr_safe(msr_no, l, h);
 }
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
index 319d065..a69a01a 100644
--- a/include/asm-x86/mtrr.h
+++ b/include/asm-x86/mtrr.h
@@ -28,8 +28,7 @@
 
 #define	MTRR_IOCTL_BASE	'M'
 
-struct mtrr_sentry
-{
+struct mtrr_sentry {
     unsigned long base;    /*  Base address     */
     unsigned int size;    /*  Size of region   */
     unsigned int type;     /*  Type of region   */
@@ -41,8 +40,7 @@
    will break. */
 
 #ifdef __i386__
-struct mtrr_gentry
-{
+struct mtrr_gentry {
     unsigned int regnum;   /*  Register number  */
     unsigned long base;    /*  Base address     */
     unsigned int size;    /*  Size of region   */
@@ -51,8 +49,7 @@
 
 #else /* __i386__ */
 
-struct mtrr_gentry
-{
+struct mtrr_gentry {
     unsigned long base;    /*  Base address     */
     unsigned int size;    /*  Size of region   */
     unsigned int regnum;   /*  Register number  */
@@ -86,38 +83,45 @@
 
 /*  The following functions are for use by other drivers  */
 # ifdef CONFIG_MTRR
+extern u8 mtrr_type_lookup(u64 addr, u64 end);
 extern void mtrr_save_fixed_ranges(void *);
 extern void mtrr_save_state(void);
-extern int mtrr_add (unsigned long base, unsigned long size,
-		     unsigned int type, bool increment);
-extern int mtrr_add_page (unsigned long base, unsigned long size,
-		     unsigned int type, bool increment);
-extern int mtrr_del (int reg, unsigned long base, unsigned long size);
-extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
+extern int mtrr_add(unsigned long base, unsigned long size,
+		    unsigned int type, bool increment);
+extern int mtrr_add_page(unsigned long base, unsigned long size,
+			 unsigned int type, bool increment);
+extern int mtrr_del(int reg, unsigned long base, unsigned long size);
+extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
 extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
 extern void mtrr_ap_init(void);
 extern void mtrr_bp_init(void);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
+extern int amd_special_default_mtrr(void);
 #  else
+static inline u8 mtrr_type_lookup(u64 addr, u64 end)
+{
+	/*
+	 * Return no-MTRRs:
+	 */
+	return 0xff;
+}
 #define mtrr_save_fixed_ranges(arg) do {} while (0)
 #define mtrr_save_state() do {} while (0)
-static __inline__ int mtrr_add (unsigned long base, unsigned long size,
+static inline int mtrr_add(unsigned long base, unsigned long size,
+			   unsigned int type, bool increment)
+{
+    return -ENODEV;
+}
+static inline int mtrr_add_page(unsigned long base, unsigned long size,
 				unsigned int type, bool increment)
 {
     return -ENODEV;
 }
-static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
-				unsigned int type, bool increment)
+static inline int mtrr_del(int reg, unsigned long base, unsigned long size)
 {
     return -ENODEV;
 }
-static __inline__ int mtrr_del (int reg, unsigned long base,
-				unsigned long size)
-{
-    return -ENODEV;
-}
-static __inline__ int mtrr_del_page (int reg, unsigned long base,
-				unsigned long size)
+static inline int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 {
     return -ENODEV;
 }
@@ -125,7 +129,9 @@
 {
 	return 0;
 }
-static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
+static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
+{
+}
 
 #define mtrr_ap_init() do {} while (0)
 #define mtrr_bp_init() do {} while (0)
@@ -134,15 +140,13 @@
 #ifdef CONFIG_COMPAT
 #include <linux/compat.h>
 
-struct mtrr_sentry32
-{
+struct mtrr_sentry32 {
     compat_ulong_t base;    /*  Base address     */
     compat_uint_t size;    /*  Size of region   */
     compat_uint_t type;     /*  Type of region   */
 };
 
-struct mtrr_gentry32
-{
+struct mtrr_gentry32 {
     compat_ulong_t regnum;   /*  Register number  */
     compat_uint_t base;    /*  Base address     */
     compat_uint_t size;    /*  Size of region   */
@@ -151,16 +155,17 @@
 
 #define MTRR_IOCTL_BASE 'M'
 
-#define MTRRIOC32_ADD_ENTRY        _IOW(MTRR_IOCTL_BASE,  0, struct mtrr_sentry32)
-#define MTRRIOC32_SET_ENTRY        _IOW(MTRR_IOCTL_BASE,  1, struct mtrr_sentry32)
-#define MTRRIOC32_DEL_ENTRY        _IOW(MTRR_IOCTL_BASE,  2, struct mtrr_sentry32)
-#define MTRRIOC32_GET_ENTRY        _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
-#define MTRRIOC32_KILL_ENTRY       _IOW(MTRR_IOCTL_BASE,  4, struct mtrr_sentry32)
-#define MTRRIOC32_ADD_PAGE_ENTRY   _IOW(MTRR_IOCTL_BASE,  5, struct mtrr_sentry32)
-#define MTRRIOC32_SET_PAGE_ENTRY   _IOW(MTRR_IOCTL_BASE,  6, struct mtrr_sentry32)
-#define MTRRIOC32_DEL_PAGE_ENTRY   _IOW(MTRR_IOCTL_BASE,  7, struct mtrr_sentry32)
-#define MTRRIOC32_GET_PAGE_ENTRY   _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
-#define MTRRIOC32_KILL_PAGE_ENTRY  _IOW(MTRR_IOCTL_BASE,  9, struct mtrr_sentry32)
+#define MTRRIOC32_ADD_ENTRY      _IOW(MTRR_IOCTL_BASE,  0, struct mtrr_sentry32)
+#define MTRRIOC32_SET_ENTRY      _IOW(MTRR_IOCTL_BASE,  1, struct mtrr_sentry32)
+#define MTRRIOC32_DEL_ENTRY      _IOW(MTRR_IOCTL_BASE,  2, struct mtrr_sentry32)
+#define MTRRIOC32_GET_ENTRY      _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
+#define MTRRIOC32_KILL_ENTRY     _IOW(MTRR_IOCTL_BASE,  4, struct mtrr_sentry32)
+#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE,  5, struct mtrr_sentry32)
+#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE,  6, struct mtrr_sentry32)
+#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE,  7, struct mtrr_sentry32)
+#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
+#define MTRRIOC32_KILL_PAGE_ENTRY		\
+				 _IOW(MTRR_IOCTL_BASE,  9, struct mtrr_sentry32)
 #endif /* CONFIG_COMPAT */
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h
index bbeefb9..73e928e 100644
--- a/include/asm-x86/mutex_32.h
+++ b/include/asm-x86/mutex_32.h
@@ -9,7 +9,7 @@
 #ifndef _ASM_MUTEX_H
 #define _ASM_MUTEX_H
 
-#include "asm/alternative.h"
+#include <asm/alternative.h>
 
 /**
  *  __mutex_fastpath_lock - try to take the lock by moving the count
@@ -21,22 +21,20 @@
  * wasn't 1 originally. This function MUST leave the value lower than 1
  * even when the "1" assertion wasn't true.
  */
-#define __mutex_fastpath_lock(count, fail_fn)				\
-do {									\
-	unsigned int dummy;						\
-									\
-	typecheck(atomic_t *, count);					\
+#define __mutex_fastpath_lock(count, fail_fn)			\
+do {								\
+	unsigned int dummy;					\
+								\
+	typecheck(atomic_t *, count);				\
 	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
-									\
-	__asm__ __volatile__(						\
-		LOCK_PREFIX "   decl (%%eax)	\n"			\
-			"   jns 1f		\n"			\
-			"   call "#fail_fn"	\n"			\
-			"1:			\n"			\
-									\
-		:"=a" (dummy)						\
-		: "a" (count)						\
-		: "memory", "ecx", "edx");				\
+								\
+	asm volatile(LOCK_PREFIX "   decl (%%eax)\n"		\
+		     "   jns 1f	\n"				\
+		     "   call " #fail_fn "\n"			\
+		     "1:\n"					\
+		     : "=a" (dummy)				\
+		     : "a" (count)				\
+		     : "memory", "ecx", "edx");			\
 } while (0)
 
 
@@ -50,8 +48,8 @@
  * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  * or anything the slow path function returns
  */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+					       int (*fail_fn)(atomic_t *))
 {
 	if (unlikely(atomic_dec_return(count) < 0))
 		return fail_fn(count);
@@ -72,22 +70,20 @@
  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
  * to return 0 otherwise.
  */
-#define __mutex_fastpath_unlock(count, fail_fn)				\
-do {									\
-	unsigned int dummy;						\
-									\
-	typecheck(atomic_t *, count);					\
+#define __mutex_fastpath_unlock(count, fail_fn)			\
+do {								\
+	unsigned int dummy;					\
+								\
+	typecheck(atomic_t *, count);				\
 	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
-									\
-	__asm__ __volatile__(						\
-		LOCK_PREFIX "   incl (%%eax)	\n"			\
-			"   jg	1f		\n"			\
-			"   call "#fail_fn"	\n"			\
-			"1:			\n"			\
-									\
-		:"=a" (dummy)						\
-		: "a" (count)						\
-		: "memory", "ecx", "edx");				\
+								\
+	asm volatile(LOCK_PREFIX "   incl (%%eax)\n"		\
+		     "   jg	1f\n"				\
+		     "   call " #fail_fn "\n"			\
+		     "1:\n"					\
+		     : "=a" (dummy)				\
+		     : "a" (count)				\
+		     : "memory", "ecx", "edx");			\
 } while (0)
 
 #define __mutex_slowpath_needs_to_unlock()	1
@@ -104,8 +100,8 @@
  * Additionally, if the value was < 0 originally, this function must not leave
  * it to 0 on failure.
  */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+					   int (*fail_fn)(atomic_t *))
 {
 	/*
 	 * We have two variants here. The cmpxchg based one is the best one
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
index 6c2949a..f3fae9b 100644
--- a/include/asm-x86/mutex_64.h
+++ b/include/asm-x86/mutex_64.h
@@ -16,23 +16,21 @@
  *
  * Atomically decrements @v and calls <fail_fn> if the result is negative.
  */
-#define __mutex_fastpath_lock(v, fail_fn)				\
-do {									\
-	unsigned long dummy;						\
-									\
-	typecheck(atomic_t *, v);					\
-	typecheck_fn(void (*)(atomic_t *), fail_fn);			\
-									\
-	__asm__ __volatile__(						\
-		LOCK_PREFIX "   decl (%%rdi)	\n"			\
-			"   jns 1f		\n"			\
-			"   call "#fail_fn"	\n"			\
-			"1:"						\
-									\
-		:"=D" (dummy)						\
-		: "D" (v)						\
-		: "rax", "rsi", "rdx", "rcx",				\
-		  "r8", "r9", "r10", "r11", "memory");			\
+#define __mutex_fastpath_lock(v, fail_fn)			\
+do {								\
+	unsigned long dummy;					\
+								\
+	typecheck(atomic_t *, v);				\
+	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
+								\
+	asm volatile(LOCK_PREFIX "   decl (%%rdi)\n"		\
+		     "   jns 1f		\n"			\
+		     "   call " #fail_fn "\n"			\
+		     "1:"					\
+		     : "=D" (dummy)				\
+		     : "D" (v)					\
+		     : "rax", "rsi", "rdx", "rcx",		\
+		       "r8", "r9", "r10", "r11", "memory");	\
 } while (0)
 
 /**
@@ -45,9 +43,8 @@
  * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  * or anything the slow path function returns
  */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count,
-			     int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+					       int (*fail_fn)(atomic_t *))
 {
 	if (unlikely(atomic_dec_return(count) < 0))
 		return fail_fn(count);
@@ -62,23 +59,21 @@
  *
  * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
  */
-#define __mutex_fastpath_unlock(v, fail_fn)				\
-do {									\
-	unsigned long dummy;						\
-									\
-	typecheck(atomic_t *, v);					\
-	typecheck_fn(void (*)(atomic_t *), fail_fn);			\
-									\
-	__asm__ __volatile__(						\
-		LOCK_PREFIX "   incl (%%rdi)	\n"			\
-			"   jg 1f		\n"			\
-			"   call "#fail_fn"	\n"			\
-			"1:			  "			\
-									\
-		:"=D" (dummy)						\
-		: "D" (v)						\
-		: "rax", "rsi", "rdx", "rcx",				\
-		  "r8", "r9", "r10", "r11", "memory");			\
+#define __mutex_fastpath_unlock(v, fail_fn)			\
+do {								\
+	unsigned long dummy;					\
+								\
+	typecheck(atomic_t *, v);				\
+	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
+								\
+	asm volatile(LOCK_PREFIX "   incl (%%rdi)\n"		\
+		     "   jg 1f\n"				\
+		     "   call " #fail_fn "\n"			\
+		     "1:"					\
+		     : "=D" (dummy)				\
+		     : "D" (v)					\
+		     : "rax", "rsi", "rdx", "rcx",		\
+		       "r8", "r9", "r10", "r11", "memory");	\
 } while (0)
 
 #define __mutex_slowpath_needs_to_unlock()	1
@@ -93,8 +88,8 @@
  * if it wasn't 1 originally. [the fallback function is never used on
  * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
  */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+					   int (*fail_fn)(atomic_t *))
 {
 	if (likely(atomic_cmpxchg(count, 1, 0) == 1))
 		return 1;
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 53ccac1..1e36302 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -1,5 +1,93 @@
-#ifdef CONFIG_X86_32
-# include "nmi_32.h"
+#ifndef _ASM_X86_NMI_H_
+#define _ASM_X86_NMI_H_
+
+#include <linux/pm.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#ifdef ARCH_HAS_NMI_WATCHDOG
+
+/**
+ * do_nmi_callback
+ *
+ * Check to see if a callback exists and execute it.  Return 1
+ * if the handler exists and was handled successfully.
+ */
+int do_nmi_callback(struct pt_regs *regs, int cpu);
+
+#ifdef CONFIG_PM
+
+/** Replace the PM callback routine for NMI. */
+struct pm_dev *set_nmi_pm_callback(pm_callback callback);
+
+/** Unset the PM callback routine back to the default. */
+void unset_nmi_pm_callback(struct pm_dev *dev);
+
 #else
-# include "nmi_64.h"
+
+static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback)
+{
+	return 0;
+}
+
+static inline void unset_nmi_pm_callback(struct pm_dev *dev)
+{
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_X86_64
+extern void default_do_nmi(struct pt_regs *);
+extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
+extern void nmi_watchdog_default(void);
+#else
+#define nmi_watchdog_default() do {} while (0)
+#endif
+
+extern int check_nmi_watchdog(void);
+extern int nmi_watchdog_enabled;
+extern int unknown_nmi_panic;
+extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
+extern int avail_to_resrv_perfctr_nmi(unsigned int);
+extern int reserve_perfctr_nmi(unsigned int);
+extern void release_perfctr_nmi(unsigned int);
+extern int reserve_evntsel_nmi(unsigned int);
+extern void release_evntsel_nmi(unsigned int);
+
+extern void setup_apic_nmi_watchdog(void *);
+extern void stop_apic_nmi_watchdog(void *);
+extern void disable_timer_nmi_watchdog(void);
+extern void enable_timer_nmi_watchdog(void);
+extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
+
+extern atomic_t nmi_active;
+extern unsigned int nmi_watchdog;
+#define NMI_DISABLED    -1
+#define NMI_NONE	0
+#define NMI_IO_APIC	1
+#define NMI_LOCAL_APIC	2
+#define NMI_INVALID	3
+#define NMI_DEFAULT	NMI_DISABLED
+
+struct ctl_table;
+struct file;
+extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
+			void __user *, size_t *, loff_t *);
+extern int unknown_nmi_panic;
+
+void __trigger_all_cpu_backtrace(void);
+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
+#endif
+
+void lapic_watchdog_stop(void);
+int lapic_watchdog_init(unsigned nmi_hz);
+int lapic_wd_event(unsigned nmi_hz);
+unsigned lapic_adjust_nmi_hz(unsigned hz);
+int lapic_watchdog_ok(void);
+void disable_lapic_nmi_watchdog(void);
+void enable_lapic_nmi_watchdog(void);
+void stop_nmi(void);
+void restart_nmi(void);
+
 #endif
diff --git a/include/asm-x86/nmi_32.h b/include/asm-x86/nmi_32.h
deleted file mode 100644
index 7206c7e..0000000
--- a/include/asm-x86/nmi_32.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef ASM_NMI_H
-#define ASM_NMI_H
-
-#include <linux/pm.h>
-#include <asm/irq.h>
-
-#ifdef ARCH_HAS_NMI_WATCHDOG
-
-/**
- * do_nmi_callback
- *
- * Check to see if a callback exists and execute it.  Return 1
- * if the handler exists and was handled successfully.
- */
-int do_nmi_callback(struct pt_regs *regs, int cpu);
-
-extern int nmi_watchdog_enabled;
-extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
-extern int avail_to_resrv_perfctr_nmi(unsigned int);
-extern int reserve_perfctr_nmi(unsigned int);
-extern void release_perfctr_nmi(unsigned int);
-extern int reserve_evntsel_nmi(unsigned int);
-extern void release_evntsel_nmi(unsigned int);
-
-extern void setup_apic_nmi_watchdog (void *);
-extern void stop_apic_nmi_watchdog (void *);
-extern void disable_timer_nmi_watchdog(void);
-extern void enable_timer_nmi_watchdog(void);
-extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
-
-extern atomic_t nmi_active;
-extern unsigned int nmi_watchdog;
-#define NMI_DISABLED    -1
-#define NMI_NONE	0
-#define NMI_IO_APIC	1
-#define NMI_LOCAL_APIC	2
-#define NMI_INVALID	3
-#define NMI_DEFAULT	NMI_DISABLED
-
-struct ctl_table;
-struct file;
-extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
-			void __user *, size_t *, loff_t *);
-extern int unknown_nmi_panic;
-
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
-
-#endif
-
-void lapic_watchdog_stop(void);
-int lapic_watchdog_init(unsigned nmi_hz);
-int lapic_wd_event(unsigned nmi_hz);
-unsigned lapic_adjust_nmi_hz(unsigned hz);
-int lapic_watchdog_ok(void);
-void disable_lapic_nmi_watchdog(void);
-void enable_lapic_nmi_watchdog(void);
-void stop_nmi(void);
-void restart_nmi(void);
-
-#endif /* ASM_NMI_H */
diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h
deleted file mode 100644
index 2eeb74e..0000000
--- a/include/asm-x86/nmi_64.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef ASM_NMI_H
-#define ASM_NMI_H
-
-#include <linux/pm.h>
-#include <asm/io.h>
- 
-/**
- * do_nmi_callback
- *
- * Check to see if a callback exists and execute it.  Return 1
- * if the handler exists and was handled successfully.
- */
-int do_nmi_callback(struct pt_regs *regs, int cpu);
-
-#ifdef CONFIG_PM
- 
-/** Replace the PM callback routine for NMI. */
-struct pm_dev * set_nmi_pm_callback(pm_callback callback);
-
-/** Unset the PM callback routine back to the default. */
-void unset_nmi_pm_callback(struct pm_dev * dev);
-
-#else
-
-static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
-{
-	return 0;
-} 
- 
-static inline void unset_nmi_pm_callback(struct pm_dev * dev)
-{
-}
-
-#endif /* CONFIG_PM */
- 
-extern void default_do_nmi(struct pt_regs *);
-extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
-
-#define get_nmi_reason() inb(0x61)
-
-extern int unknown_nmi_panic;
-extern int nmi_watchdog_enabled;
-
-extern int check_nmi_watchdog(void);
-extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
-extern int avail_to_resrv_perfctr_nmi(unsigned int);
-extern int reserve_perfctr_nmi(unsigned int);
-extern void release_perfctr_nmi(unsigned int);
-extern int reserve_evntsel_nmi(unsigned int);
-extern void release_evntsel_nmi(unsigned int);
-
-extern void setup_apic_nmi_watchdog (void *);
-extern void stop_apic_nmi_watchdog (void *);
-extern void disable_timer_nmi_watchdog(void);
-extern void enable_timer_nmi_watchdog(void);
-extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
-
-extern void nmi_watchdog_default(void);
-
-extern atomic_t nmi_active;
-extern unsigned int nmi_watchdog;
-#define NMI_DISABLED    -1
-#define NMI_NONE	0
-#define NMI_IO_APIC	1
-#define NMI_LOCAL_APIC	2
-#define NMI_INVALID	3
-#define NMI_DEFAULT	NMI_DISABLED
-
-struct ctl_table;
-struct file;
-extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
-			void __user *, size_t *, loff_t *);
-
-extern int unknown_nmi_panic;
-
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
-
-
-void lapic_watchdog_stop(void);
-int lapic_watchdog_init(unsigned nmi_hz);
-int lapic_wd_event(unsigned nmi_hz);
-unsigned lapic_adjust_nmi_hz(unsigned hz);
-int lapic_watchdog_ok(void);
-void disable_lapic_nmi_watchdog(void);
-void enable_lapic_nmi_watchdog(void);
-void stop_nmi(void);
-void restart_nmi(void);
-
-#endif /* ASM_NMI_H */
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h
index b3930ae5..ad0bedd 100644
--- a/include/asm-x86/nops.h
+++ b/include/asm-x86/nops.h
@@ -5,6 +5,8 @@
 
 /* generic versions from gas
    1: nop
+   the following instructions are NOT nops in 64-bit mode,
+   for 64-bit mode use K8 or P6 nops instead
    2: movl %esi,%esi
    3: leal 0x00(%esi),%esi
    4: leal 0x00(,%esi,1),%esi
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 15fe07c..32c22ae 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -1,11 +1,12 @@
-#ifndef _ASM_X8664_NUMA_H 
+#ifndef _ASM_X8664_NUMA_H
 #define _ASM_X8664_NUMA_H 1
 
 #include <linux/nodemask.h>
 #include <asm/apicdef.h>
 
 struct bootnode {
-	u64 start,end; 
+	u64 start;
+	u64 end;
 };
 
 extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h
index 38f710d..94b86c3 100644
--- a/include/asm-x86/numaq.h
+++ b/include/asm-x86/numaq.h
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2002, IBM Corp.
  *
- * All rights reserved.          
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -33,7 +33,8 @@
 /*
  * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
  */
-#define SYS_CFG_DATA_PRIV_ADDR		0x0009d000 /* place for scd in private quad space */
+#define SYS_CFG_DATA_PRIV_ADDR		0x0009d000 /* place for scd in private
+						      quad space */
 
 /*
  * Communication area for each processor on lynxer-processor tests.
@@ -139,7 +140,7 @@
 	unsigned int	low_shrd_mem_base; /* 0 or 512MB or 1GB */
 	unsigned int	low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */
 					/* may not be totally populated */
-	unsigned int	split_mem_enbl; /* 0 for no low shared memory */ 
+	unsigned int	split_mem_enbl; /* 0 for no low shared memory */
 	unsigned int	mmio_sz; /* Size of total system memory mapped I/O */
 				 /* (in MB). */
 	unsigned int	quad_spin_lock; /* Spare location used for quad */
@@ -152,7 +153,7 @@
 	/*
 	 *	memory configuration area for each quad
 	 */
-        struct	eachquadmem eq[MAX_NUMNODES];	/* indexed by quad id */
+	struct		eachquadmem eq[MAX_NUMNODES];	/* indexed by quad id */
 };
 
 static inline unsigned long *get_zholes_size(int nid)
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index a05b289..6724a4b 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -33,10 +33,8 @@
 
 #ifdef CONFIG_X86_64
 #include <asm/page_64.h>
-#define max_pfn_mapped		end_pfn_map
 #else
 #include <asm/page_32.h>
-#define max_pfn_mapped		max_low_pfn
 #endif	/* CONFIG_X86_64 */
 
 #define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
@@ -50,6 +48,8 @@
 
 extern int page_is_ram(unsigned long pagenr);
 
+extern unsigned long max_pfn_mapped;
+
 struct page;
 
 static inline void clear_user_page(void *page, unsigned long vaddr,
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index 5f7257f..424e82f 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -47,7 +47,10 @@
 typedef unsigned long	pgprotval_t;
 typedef unsigned long	phys_addr_t;
 
-typedef union { pteval_t pte, pte_low; } pte_t;
+typedef union {
+	pteval_t pte;
+	pteval_t pte_low;
+} pte_t;
 
 #endif	/* __ASSEMBLY__ */
 #endif	/* CONFIG_X86_PAE */
@@ -61,7 +64,7 @@
 #endif
 
 #ifndef __ASSEMBLY__
-#define __phys_addr(x)		((x)-PAGE_OFFSET)
+#define __phys_addr(x)		((x) - PAGE_OFFSET)
 #define __phys_reloc_hide(x)	RELOC_HIDE((x), 0)
 
 #ifdef CONFIG_FLATMEM
@@ -78,7 +81,7 @@
 extern int sysctl_legacy_va_layout;
 
 #define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
-#define MAXMEM			(-__PAGE_OFFSET-__VMALLOC_RESERVE)
+#define MAXMEM			(-__PAGE_OFFSET - __VMALLOC_RESERVE)
 
 #ifdef CONFIG_X86_USE_3DNOW
 #include <asm/mmx.h>
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index 1435460..6ea7285 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -5,7 +5,7 @@
 
 #define THREAD_ORDER	1
 #define THREAD_SIZE  (PAGE_SIZE << THREAD_ORDER)
-#define CURRENT_MASK (~(THREAD_SIZE-1))
+#define CURRENT_MASK (~(THREAD_SIZE - 1))
 
 #define EXCEPTION_STACK_ORDER 0
 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
@@ -48,10 +48,10 @@
 #define __VIRTUAL_MASK_SHIFT	48
 
 /*
- * Kernel image size is limited to 128 MB (see level2_kernel_pgt in
+ * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
  * arch/x86/kernel/head_64.S), and it is mapped here:
  */
-#define KERNEL_IMAGE_SIZE	(128*1024*1024)
+#define KERNEL_IMAGE_SIZE	(512 * 1024 * 1024)
 #define KERNEL_IMAGE_START	_AC(0xffffffff80000000, UL)
 
 #ifndef __ASSEMBLY__
@@ -59,7 +59,6 @@
 void copy_page(void *to, void *from);
 
 extern unsigned long end_pfn;
-extern unsigned long end_pfn_map;
 extern unsigned long phys_base;
 
 extern unsigned long __phys_addr(unsigned long);
@@ -81,6 +80,9 @@
 
 #define vmemmap ((struct page *)VMEMMAP_START)
 
+extern unsigned long init_memory_mapping(unsigned long start,
+					 unsigned long end);
+
 #endif	/* !__ASSEMBLY__ */
 
 #ifdef CONFIG_FLATMEM
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h
index c996ec4..6f0d042 100644
--- a/include/asm-x86/param.h
+++ b/include/asm-x86/param.h
@@ -3,8 +3,8 @@
 
 #ifdef __KERNEL__
 # define HZ		CONFIG_HZ	/* Internal kernel timer frequency */
-# define USER_HZ	100		/* .. some user interfaces are in "ticks" */
-# define CLOCKS_PER_SEC	(USER_HZ)       /* like times() */
+# define USER_HZ	100		/* some user interfaces are */
+# define CLOCKS_PER_SEC	(USER_HZ)       /* in "ticks" like times() */
 #endif
 
 #ifndef HZ
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index d6236eb..3d41939 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -231,7 +231,8 @@
 	void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
 			   pte_t *ptep, pte_t pteval);
 	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
-	void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+	void (*pte_update)(struct mm_struct *mm, unsigned long addr,
+			   pte_t *ptep);
 	void (*pte_update_defer)(struct mm_struct *mm,
 				 unsigned long addr, pte_t *ptep);
 
@@ -246,7 +247,8 @@
 	void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
 	void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
 				pte_t *ptep, pte_t pte);
-	void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+	void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
+			  pte_t *ptep);
 	void (*pmd_clear)(pmd_t *pmdp);
 
 #endif	/* CONFIG_X86_PAE */
@@ -274,8 +276,7 @@
 /* This contains all the paravirt structures: we get a convenient
  * number for each function using the offset which we use to indicate
  * what to patch. */
-struct paravirt_patch_template
-{
+struct paravirt_patch_template {
 	struct pv_init_ops pv_init_ops;
 	struct pv_time_ops pv_time_ops;
 	struct pv_cpu_ops pv_cpu_ops;
@@ -660,43 +661,56 @@
 }
 
 /* These should all do BUG_ON(_err), but our headers are too tangled. */
-#define rdmsr(msr,val1,val2) do {		\
+#define rdmsr(msr, val1, val2)			\
+do {						\
 	int _err;				\
 	u64 _l = paravirt_read_msr(msr, &_err);	\
 	val1 = (u32)_l;				\
 	val2 = _l >> 32;			\
-} while(0)
+} while (0)
 
-#define wrmsr(msr,val1,val2) do {		\
+#define wrmsr(msr, val1, val2)			\
+do {						\
 	paravirt_write_msr(msr, val1, val2);	\
-} while(0)
+} while (0)
 
-#define rdmsrl(msr,val) do {			\
+#define rdmsrl(msr, val)			\
+do {						\
 	int _err;				\
 	val = paravirt_read_msr(msr, &_err);	\
-} while(0)
+} while (0)
 
-#define wrmsrl(msr,val)		wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
-#define wrmsr_safe(msr,a,b)	paravirt_write_msr(msr, a, b)
+#define wrmsrl(msr, val)	wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
+#define wrmsr_safe(msr, a, b)	paravirt_write_msr(msr, a, b)
 
 /* rdmsr with exception handling */
-#define rdmsr_safe(msr,a,b) ({			\
+#define rdmsr_safe(msr, a, b)			\
+({						\
 	int _err;				\
 	u64 _l = paravirt_read_msr(msr, &_err);	\
 	(*a) = (u32)_l;				\
 	(*b) = _l >> 32;			\
-	_err; })
+	_err;					\
+})
 
+static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+{
+	int err;
+
+	*p = paravirt_read_msr(msr, &err);
+	return err;
+}
 
 static inline u64 paravirt_read_tsc(void)
 {
 	return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
 }
 
-#define rdtscl(low) do {			\
+#define rdtscl(low)				\
+do {						\
 	u64 _l = paravirt_read_tsc();		\
 	low = (int)_l;				\
-} while(0)
+} while (0)
 
 #define rdtscll(val) (val = paravirt_read_tsc())
 
@@ -711,11 +725,12 @@
 	return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
 }
 
-#define rdpmc(counter,low,high) do {		\
+#define rdpmc(counter, low, high)		\
+do {						\
 	u64 _l = paravirt_read_pmc(counter);	\
 	low = (u32)_l;				\
 	high = _l >> 32;			\
-} while(0)
+} while (0)
 
 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
 {
@@ -794,7 +809,8 @@
 }
 
 /* The paravirtualized I/O functions */
-static inline void slow_down_io(void) {
+static inline void slow_down_io(void)
+{
 	pv_cpu_ops.io_delay();
 #ifdef REALLY_SLOW_IO
 	pv_cpu_ops.io_delay();
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h
index 019cbca..3c4ffeb 100644
--- a/include/asm-x86/parport.h
+++ b/include/asm-x86/parport.h
@@ -1,10 +1,10 @@
 #ifndef _ASM_X86_PARPORT_H
 #define _ASM_X86_PARPORT_H
 
-static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
-static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
 {
-	return parport_pc_find_isa_ports (autoirq, autodma);
+	return parport_pc_find_isa_ports(autoirq, autodma);
 }
 
 #endif /* _ASM_X86_PARPORT_H */
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h
new file mode 100644
index 0000000..8b822b5
--- /dev/null
+++ b/include/asm-x86/pat.h
@@ -0,0 +1,16 @@
+
+#ifndef _ASM_PAT_H
+#define _ASM_PAT_H 1
+
+#include <linux/types.h>
+
+extern int pat_wc_enabled;
+
+extern void pat_init(void);
+
+extern int reserve_memtype(u64 start, u64 end,
+		unsigned long req_type, unsigned long *ret_type);
+extern int free_memtype(u64 start, u64 end);
+
+#endif
+
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h
index 6823fa4..5b21485 100644
--- a/include/asm-x86/pci-direct.h
+++ b/include/asm-x86/pci-direct.h
@@ -4,7 +4,7 @@
 #include <linux/types.h>
 
 /* Direct PCI access. This is used for PCI accesses in early boot before
-   the PCI subsystem works. */ 
+   the PCI subsystem works. */
 
 extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
 extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
index c61190c..ddd8e24 100644
--- a/include/asm-x86/pci.h
+++ b/include/asm-x86/pci.h
@@ -8,14 +8,13 @@
 #include <asm/scatterlist.h>
 #include <asm/io.h>
 
-
 #ifdef __KERNEL__
 
 struct pci_sysdata {
 	int		domain;		/* PCI domain */
 	int		node;		/* NUMA node */
 #ifdef CONFIG_X86_64
-	void*		iommu;		/* IOMMU private data */
+	void		*iommu;		/* IOMMU private data */
 #endif
 };
 
@@ -52,7 +51,7 @@
 #define PCIBIOS_MIN_CARDBUS_IO	0x4000
 
 void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
+struct pci_bus *pcibios_scan_root(int bus);
 
 void pcibios_set_master(struct pci_dev *dev);
 void pcibios_penalize_isa_irq(int irq, int active);
@@ -62,7 +61,8 @@
 
 #define HAVE_PCI_MMAP
 extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			       enum pci_mmap_state mmap_state, int write_combine);
+			       enum pci_mmap_state mmap_state,
+			       int write_combine);
 
 
 #ifdef CONFIG_PCI
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
index 3746903..df867e5 100644
--- a/include/asm-x86/pci_64.h
+++ b/include/asm-x86/pci_64.h
@@ -1,12 +1,10 @@
 #ifndef __x8664_PCI_H
 #define __x8664_PCI_H
 
-
 #ifdef __KERNEL__
 
-
 #ifdef CONFIG_CALGARY_IOMMU
-static inline void* pci_iommu(struct pci_bus *bus)
+static inline void *pci_iommu(struct pci_bus *bus)
 {
 	struct pci_sysdata *sd = bus->sysdata;
 	return sd->iommu;
@@ -19,11 +17,10 @@
 }
 #endif /* CONFIG_CALGARY_IOMMU */
 
-
-extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
-
-
+extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
+			      int reg, int len, u32 *value);
+extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
+			       int reg, int len, u32 value);
 
 extern void pci_iommu_alloc(void);
 
@@ -65,5 +62,4 @@
 
 #endif /* __KERNEL__ */
 
-
 #endif /* __x8664_PCI_H */
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h
index c0305bf..101fb9e 100644
--- a/include/asm-x86/pda.h
+++ b/include/asm-x86/pda.h
@@ -22,7 +22,6 @@
 					   offset 40!!! */
 #endif
 	char *irqstackptr;
-	unsigned int nodenumber;	/* number of current node */
 	unsigned int __softirq_pending;
 	unsigned int __nmi_count;	/* number of NMI on this CPUs */
 	short mmu_state;
@@ -58,34 +57,36 @@
 
 #define pda_offset(field) offsetof(struct x8664_pda, field)
 
-#define pda_to_op(op, field, val) do {		\
-	typedef typeof(_proxy_pda.field) T__;	\
-	if (0) { T__ tmp__; tmp__ = (val); }	/* type checking */ \
-	switch (sizeof(_proxy_pda.field)) {	\
-	case 2:					\
-		asm(op "w %1,%%gs:%c2" :	\
-		    "+m" (_proxy_pda.field) :	\
-		    "ri" ((T__)val),		\
-		    "i"(pda_offset(field)));	\
-		break;				\
-	case 4:					\
-		asm(op "l %1,%%gs:%c2" :	\
-		    "+m" (_proxy_pda.field) :	\
-		    "ri" ((T__)val),		\
-		    "i" (pda_offset(field)));	\
-		break;				\
-	case 8:					\
-		asm(op "q %1,%%gs:%c2":		\
-		    "+m" (_proxy_pda.field) :	\
-		    "ri" ((T__)val),		\
-		    "i"(pda_offset(field)));	\
-		break;				\
-	default:				\
-		__bad_pda_field();		\
-	}					\
-	} while (0)
+#define pda_to_op(op, field, val)					\
+do {									\
+	typedef typeof(_proxy_pda.field) T__;				\
+	if (0) { T__ tmp__; tmp__ = (val); }	/* type checking */	\
+	switch (sizeof(_proxy_pda.field)) {				\
+	case 2:								\
+		asm(op "w %1,%%gs:%c2" :				\
+		    "+m" (_proxy_pda.field) :				\
+		    "ri" ((T__)val),					\
+		    "i"(pda_offset(field)));				\
+		break;							\
+	case 4:								\
+		asm(op "l %1,%%gs:%c2" :				\
+		    "+m" (_proxy_pda.field) :				\
+		    "ri" ((T__)val),					\
+		    "i" (pda_offset(field)));				\
+		break;							\
+	case 8:								\
+		asm(op "q %1,%%gs:%c2":					\
+		    "+m" (_proxy_pda.field) :				\
+		    "ri" ((T__)val),					\
+		    "i"(pda_offset(field)));				\
+		break;							\
+	default:							\
+		__bad_pda_field();					\
+	}								\
+} while (0)
 
-#define pda_from_op(op,field) ({		\
+#define pda_from_op(op, field)			\
+({						\
 	typeof(_proxy_pda.field) ret__;		\
 	switch (sizeof(_proxy_pda.field)) {	\
 	case 2:					\
@@ -93,23 +94,24 @@
 		    "=r" (ret__) :		\
 		    "i" (pda_offset(field)),	\
 		    "m" (_proxy_pda.field));	\
-		 break;				\
+		break;				\
 	case 4:					\
 		asm(op "l %%gs:%c1,%0":		\
 		    "=r" (ret__):		\
 		    "i" (pda_offset(field)),	\
 		    "m" (_proxy_pda.field));	\
-		 break;				\
+		break;				\
 	case 8:					\
 		asm(op "q %%gs:%c1,%0":		\
 		    "=r" (ret__) :		\
 		    "i" (pda_offset(field)),	\
 		    "m" (_proxy_pda.field));	\
-		 break;				\
+		break;				\
 	default:				\
 		__bad_pda_field();		\
-       }					\
-       ret__; })
+	}					\
+	ret__;					\
+})
 
 #define read_pda(field)		pda_from_op("mov", field)
 #define write_pda(field, val)	pda_to_op("mov", field, val)
@@ -118,12 +120,13 @@
 #define or_pda(field, val)	pda_to_op("or", field, val)
 
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
-#define test_and_clear_bit_pda(bit, field) ({		\
-	int old__;						\
-	asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0"		\
-	    : "=r" (old__), "+m" (_proxy_pda.field)		\
-	    : "dIr" (bit), "i" (pda_offset(field)) : "memory");	\
-	old__;							\
+#define test_and_clear_bit_pda(bit, field)				\
+({									\
+	int old__;							\
+	asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0"			\
+		     : "=r" (old__), "+m" (_proxy_pda.field)		\
+		     : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
+	old__;								\
 })
 
 #endif
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 0dec00f..736fc3b 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -85,58 +85,62 @@
  * don't give an lvalue though). */
 extern void __bad_percpu_size(void);
 
-#define percpu_to_op(op,var,val)				\
-	do {							\
-		typedef typeof(var) T__;			\
-		if (0) { T__ tmp__; tmp__ = (val); }		\
-		switch (sizeof(var)) {				\
-		case 1:						\
-			asm(op "b %1,"__percpu_seg"%0"		\
-			    : "+m" (var)			\
-			    :"ri" ((T__)val));			\
-			break;					\
-		case 2:						\
-			asm(op "w %1,"__percpu_seg"%0"		\
-			    : "+m" (var)			\
-			    :"ri" ((T__)val));			\
-			break;					\
-		case 4:						\
-			asm(op "l %1,"__percpu_seg"%0"		\
-			    : "+m" (var)			\
-			    :"ri" ((T__)val));			\
-			break;					\
-		default: __bad_percpu_size();			\
-		}						\
-	} while (0)
+#define percpu_to_op(op, var, val)			\
+do {							\
+	typedef typeof(var) T__;			\
+	if (0) {					\
+		T__ tmp__;				\
+		tmp__ = (val);				\
+	}						\
+	switch (sizeof(var)) {				\
+	case 1:						\
+		asm(op "b %1,"__percpu_seg"%0"		\
+		    : "+m" (var)			\
+		    : "ri" ((T__)val));			\
+		break;					\
+	case 2:						\
+		asm(op "w %1,"__percpu_seg"%0"		\
+		    : "+m" (var)			\
+		    : "ri" ((T__)val));			\
+		break;					\
+	case 4:						\
+		asm(op "l %1,"__percpu_seg"%0"		\
+		    : "+m" (var)			\
+		    : "ri" ((T__)val));			\
+		break;					\
+	default: __bad_percpu_size();			\
+	}						\
+} while (0)
 
-#define percpu_from_op(op,var)					\
-	({							\
-		typeof(var) ret__;				\
-		switch (sizeof(var)) {				\
-		case 1:						\
-			asm(op "b "__percpu_seg"%1,%0"		\
-			    : "=r" (ret__)			\
-			    : "m" (var));			\
-			break;					\
-		case 2:						\
-			asm(op "w "__percpu_seg"%1,%0"		\
-			    : "=r" (ret__)			\
-			    : "m" (var));			\
-			break;					\
-		case 4:						\
-			asm(op "l "__percpu_seg"%1,%0"		\
-			    : "=r" (ret__)			\
-			    : "m" (var));			\
-			break;					\
-		default: __bad_percpu_size();			\
-		}						\
-		ret__; })
+#define percpu_from_op(op, var)				\
+({							\
+	typeof(var) ret__;				\
+	switch (sizeof(var)) {				\
+	case 1:						\
+		asm(op "b "__percpu_seg"%1,%0"		\
+		    : "=r" (ret__)			\
+		    : "m" (var));			\
+		break;					\
+	case 2:						\
+		asm(op "w "__percpu_seg"%1,%0"		\
+		    : "=r" (ret__)			\
+		    : "m" (var));			\
+		break;					\
+	case 4:						\
+		asm(op "l "__percpu_seg"%1,%0"		\
+		    : "=r" (ret__)			\
+		    : "m" (var));			\
+		break;					\
+	default: __bad_percpu_size();			\
+	}						\
+	ret__;						\
+})
 
 #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
-#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val)
-#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val)
-#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val)
-#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val)
+#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
+#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
+#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
+#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
 #endif /* !__ASSEMBLY__ */
 #endif /* !CONFIG_X86_64 */
 #endif /* _ASM_X86_PERCPU_H_ */
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
index 701404f..46bc52c 100644
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -26,7 +26,8 @@
 	native_set_pte(ptep, pte);
 }
 
-static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
+static inline void native_set_pte_present(struct mm_struct *mm,
+					  unsigned long addr,
 					  pte_t *ptep, pte_t pte)
 {
 	native_set_pte(ptep, pte);
@@ -37,7 +38,8 @@
 	native_set_pmd(pmdp, __pmd(0));
 }
 
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
+static inline void native_pte_clear(struct mm_struct *mm,
+				    unsigned long addr, pte_t *xp)
 {
 	*xp = native_make_pte(0);
 }
@@ -61,16 +63,18 @@
  */
 #define PTE_FILE_MAX_BITS	29
 
-#define pte_to_pgoff(pte) \
-	((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
+#define pte_to_pgoff(pte)						\
+	((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5))
 
-#define pgoff_to_pte(off) \
-	((pte_t) { .pte_low = (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
+#define pgoff_to_pte(off)						\
+	((pte_t) { .pte_low = (((off) & 0x1f) << 1) +			\
+			(((off) >> 5) << 8) + _PAGE_FILE })
 
 /* Encode and de-code a swap entry */
 #define __swp_type(x)			(((x).val >> 1) & 0x1f)
 #define __swp_offset(x)			((x).val >> 8)
-#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __swp_entry(type, offset)				\
+	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_low })
 #define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
 
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index 1d763ee..8b4a9d4 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -8,22 +8,26 @@
  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  */
 
-#define pte_ERROR(e) \
-	printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
-#define pmd_ERROR(e) \
-	printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-#define pgd_ERROR(e) \
-	printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
-
+#define pte_ERROR(e)							\
+	printk("%s:%d: bad pte %p(%08lx%08lx).\n",			\
+	       __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
+#define pmd_ERROR(e)							\
+	printk("%s:%d: bad pmd %p(%016Lx).\n",				\
+	       __FILE__, __LINE__, &(e), pmd_val(e))
+#define pgd_ERROR(e)							\
+	printk("%s:%d: bad pgd %p(%016Lx).\n",				\
+	       __FILE__, __LINE__, &(e), pgd_val(e))
 
 static inline int pud_none(pud_t pud)
 {
 	return pud_val(pud) == 0;
 }
+
 static inline int pud_bad(pud_t pud)
 {
 	return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
 }
+
 static inline int pud_present(pud_t pud)
 {
 	return pud_val(pud) & _PAGE_PRESENT;
@@ -48,7 +52,8 @@
  * we are justified in merely clearing the PTE present bit, followed
  * by a set.  The ordering here is important.
  */
-static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
+static inline void native_set_pte_present(struct mm_struct *mm,
+					  unsigned long addr,
 					  pte_t *ptep, pte_t pte)
 {
 	ptep->pte_low = 0;
@@ -60,15 +65,17 @@
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
-	set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
+	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
 }
+
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-	set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
+	set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
 }
+
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
-	set_64bit((unsigned long long *)(pudp),native_pud_val(pud));
+	set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
 }
 
 /*
@@ -76,7 +83,8 @@
  * entry, so clear the bottom half first and enforce ordering with a compiler
  * barrier.
  */
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+				    pte_t *ptep)
 {
 	ptep->pte_low = 0;
 	smp_wmb();
@@ -107,20 +115,19 @@
 	 * current pgd to avoid unnecessary TLB flushes.
 	 */
 	pgd = read_cr3();
-	if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
+	if (__pa(pudp) >= pgd && __pa(pudp) <
+	    (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
 		write_cr3(pgd);
 }
 
-#define pud_page(pud) \
-((struct page *) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PAGE_MASK))
 
-#define pud_page_vaddr(pud) \
-((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
 
 
 /* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
-			pmd_index(address))
+#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) +	\
+				  pmd_index(address))
 
 #ifdef CONFIG_SMP
 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
@@ -161,7 +168,8 @@
  * put the 32 bits of offset into the high part.
  */
 #define pte_to_pgoff(pte) ((pte).pte_high)
-#define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
+#define pgoff_to_pte(off)						\
+	((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
 #define PTE_FILE_MAX_BITS       32
 
 /* Encode and de-code a swap entry */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 9cf472a..f1d9f4a 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -4,13 +4,13 @@
 #define USER_PTRS_PER_PGD	((TASK_SIZE-1)/PGDIR_SIZE+1)
 #define FIRST_USER_ADDRESS	0
 
-#define _PAGE_BIT_PRESENT	0
-#define _PAGE_BIT_RW		1
-#define _PAGE_BIT_USER		2
-#define _PAGE_BIT_PWT		3
-#define _PAGE_BIT_PCD		4
-#define _PAGE_BIT_ACCESSED	5
-#define _PAGE_BIT_DIRTY		6
+#define _PAGE_BIT_PRESENT	0	/* is present */
+#define _PAGE_BIT_RW		1	/* writeable */
+#define _PAGE_BIT_USER		2	/* userspace addressable */
+#define _PAGE_BIT_PWT		3	/* page write through */
+#define _PAGE_BIT_PCD		4	/* page cache disabled */
+#define _PAGE_BIT_ACCESSED	5	/* was accessed (raised by CPU) */
+#define _PAGE_BIT_DIRTY		6	/* was written to (raised by CPU) */
 #define _PAGE_BIT_FILE		6
 #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
 #define _PAGE_BIT_PAT		7	/* on 4KB pages */
@@ -48,24 +48,39 @@
 #endif
 
 /* If _PAGE_PRESENT is clear, we use these: */
-#define _PAGE_FILE	_PAGE_DIRTY	/* nonlinear file mapping, saved PTE; unset:swap */
+#define _PAGE_FILE	_PAGE_DIRTY	/* nonlinear file mapping,
+					 * saved PTE; unset:swap */
 #define _PAGE_PROTNONE	_PAGE_PSE	/* if the user mapped it with PROT_NONE;
 					   pte_present gives true */
 
-#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |	\
+			 _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |	\
+			 _PAGE_DIRTY)
 
 #define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 
-#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define _PAGE_CACHE_MASK	(_PAGE_PCD | _PAGE_PWT)
+#define _PAGE_CACHE_WB		(0)
+#define _PAGE_CACHE_WC		(_PAGE_PWT)
+#define _PAGE_CACHE_UC_MINUS	(_PAGE_PCD)
+#define _PAGE_CACHE_UC		(_PAGE_PCD | _PAGE_PWT)
 
-#define PAGE_SHARED_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY_EXEC		__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+				 _PAGE_ACCESSED | _PAGE_NX)
+
+#define PAGE_SHARED_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_RW |	\
+					 _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
+					 _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
+					 _PAGE_ACCESSED)
 #define PAGE_COPY		PAGE_COPY_NOEXEC
-#define PAGE_READONLY		__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
+					 _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
+					 _PAGE_ACCESSED)
 
 #ifdef CONFIG_X86_32
 #define _PAGE_KERNEL_EXEC \
@@ -84,6 +99,7 @@
 #define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
 #define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
 #define __PAGE_KERNEL_EXEC_NOCACHE	(__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_WC		(__PAGE_KERNEL | _PAGE_CACHE_WC)
 #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
 #define __PAGE_KERNEL_UC_MINUS		(__PAGE_KERNEL | _PAGE_PCD)
 #define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
@@ -101,6 +117,7 @@
 #define PAGE_KERNEL_RO			MAKE_GLOBAL(__PAGE_KERNEL_RO)
 #define PAGE_KERNEL_EXEC		MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
 #define PAGE_KERNEL_RX			MAKE_GLOBAL(__PAGE_KERNEL_RX)
+#define PAGE_KERNEL_WC			MAKE_GLOBAL(__PAGE_KERNEL_WC)
 #define PAGE_KERNEL_NOCACHE		MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
 #define PAGE_KERNEL_UC_MINUS		MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS)
 #define PAGE_KERNEL_EXEC_NOCACHE	MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE)
@@ -134,7 +151,7 @@
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 
 extern spinlock_t pgd_lock;
@@ -144,30 +161,101 @@
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
-static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_RW; }
-static inline int pte_file(pte_t pte)		{ return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_huge(pte_t pte)		{ return pte_val(pte) & _PAGE_PSE; }
-static inline int pte_global(pte_t pte) 	{ return pte_val(pte) & _PAGE_GLOBAL; }
-static inline int pte_exec(pte_t pte)		{ return !(pte_val(pte) & _PAGE_NX); }
-
-static inline int pmd_large(pmd_t pte) {
-	return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
-		(_PAGE_PSE|_PAGE_PRESENT);
+static inline int pte_dirty(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_DIRTY;
 }
 
-static inline pte_t pte_mkclean(pte_t pte)	{ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); }
-static inline pte_t pte_mkold(pte_t pte)	{ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); }
-static inline pte_t pte_wrprotect(pte_t pte)	{ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); }
-static inline pte_t pte_mkexec(pte_t pte)	{ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); }
-static inline pte_t pte_mkdirty(pte_t pte)	{ return __pte(pte_val(pte) | _PAGE_DIRTY); }
-static inline pte_t pte_mkyoung(pte_t pte)	{ return __pte(pte_val(pte) | _PAGE_ACCESSED); }
-static inline pte_t pte_mkwrite(pte_t pte)	{ return __pte(pte_val(pte) | _PAGE_RW); }
-static inline pte_t pte_mkhuge(pte_t pte)	{ return __pte(pte_val(pte) | _PAGE_PSE); }
-static inline pte_t pte_clrhuge(pte_t pte)	{ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); }
-static inline pte_t pte_mkglobal(pte_t pte)	{ return __pte(pte_val(pte) | _PAGE_GLOBAL); }
-static inline pte_t pte_clrglobal(pte_t pte)	{ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); }
+static inline int pte_young(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_write(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_RW;
+}
+
+static inline int pte_file(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_FILE;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_PSE;
+}
+
+static inline int pte_global(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_GLOBAL;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+	return !(pte_val(pte) & _PAGE_NX);
+}
+
+static inline int pmd_large(pmd_t pte)
+{
+	return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+		(_PAGE_PSE | _PAGE_PRESENT);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW);
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_PSE);
+}
+
+static inline pte_t pte_clrhuge(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE);
+}
+
+static inline pte_t pte_mkglobal(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_GLOBAL);
+}
+
+static inline pte_t pte_clrglobal(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
+}
 
 extern pteval_t __supported_pte_mask;
 
@@ -334,7 +422,8 @@
 })
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+				       pte_t *ptep)
 {
 	pte_t pte = native_ptep_get_and_clear(ptep);
 	pte_update(mm, addr, ptep);
@@ -342,7 +431,9 @@
 }
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+					    unsigned long addr, pte_t *ptep,
+					    int full)
 {
 	pte_t pte;
 	if (full) {
@@ -358,7 +449,8 @@
 }
 
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+				      unsigned long addr, pte_t *ptep)
 {
 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
 	pte_update(mm, addr, ptep);
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 4e6a0fc..c4a6436 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -40,13 +40,13 @@
 #ifdef CONFIG_X86_PAE
 # include <asm/pgtable-3level-defs.h>
 # define PMD_SIZE	(1UL << PMD_SHIFT)
-# define PMD_MASK	(~(PMD_SIZE-1))
+# define PMD_MASK	(~(PMD_SIZE - 1))
 #else
 # include <asm/pgtable-2level-defs.h>
 #endif
 
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
-#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+#define PGDIR_MASK	(~(PGDIR_SIZE - 1))
 
 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
@@ -58,21 +58,22 @@
  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  * area for the same reason. ;)
  */
-#define VMALLOC_OFFSET	(8*1024*1024)
-#define VMALLOC_START	(((unsigned long) high_memory + \
-			2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
+#define VMALLOC_OFFSET	(8 * 1024 * 1024)
+#define VMALLOC_START	(((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \
+			 & ~(VMALLOC_OFFSET - 1))
 #ifdef CONFIG_X86_PAE
 #define LAST_PKMAP 512
 #else
 #define LAST_PKMAP 1024
 #endif
 
-#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
+#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1))	\
+		    & PMD_MASK)
 
 #ifdef CONFIG_HIGHMEM
-# define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
+# define VMALLOC_END	(PKMAP_BASE - 2 * PAGE_SIZE)
 #else
-# define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
+# define VMALLOC_END	(FIXADDR_START - 2 * PAGE_SIZE)
 #endif
 
 /*
@@ -88,10 +89,16 @@
 #define pte_present(x)	((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
 
 /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
-#define pmd_none(x)	(!(unsigned long)pmd_val(x))
-#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
-#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define pmd_none(x)	(!(unsigned long)pmd_val((x)))
+#define pmd_present(x)	(pmd_val((x)) & _PAGE_PRESENT)
 
+extern int pmd_bad(pmd_t pmd);
+
+#define pmd_bad_v1(x)							\
+	(_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER)))
+#define	pmd_bad_v2(x)							\
+	(_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER |	\
+					    _PAGE_PSE | _PAGE_NX)))
 
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 
@@ -117,17 +124,18 @@
 }
 
 /*
- * Macro to mark a page protection value as "uncacheable".  On processors which do not support
- * it, this is a no-op.
+ * Macro to mark a page protection value as "uncacheable".
+ * On processors which do not support it, this is a no-op.
  */
-#define pgprot_noncached(prot)	((boot_cpu_data.x86 > 3)					  \
-				 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
+#define pgprot_noncached(prot)					\
+	((boot_cpu_data.x86 > 3)				\
+	 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))	\
+	 : (prot))
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
  */
-
 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
 
 /*
@@ -136,20 +144,20 @@
  * this macro returns the index of the entry in the pgd page which would
  * control the given virtual address
  */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_index_k(addr) pgd_index(addr)
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#define pgd_index_k(addr) pgd_index((addr))
 
 /*
  * pgd_offset() returns a (pgd_t *)
  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  */
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
 
 /*
  * a shortcut which implies the use of the kernel's pgd, instead
  * of a process's
  */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
 
 static inline int pud_large(pud_t pud) { return 0; }
 
@@ -159,8 +167,8 @@
  * this macro returns the index of the entry in the pmd page which would
  * control the given virtual address
  */
-#define pmd_index(address) \
-		(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_index(address)				\
+	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 
 /*
  * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
@@ -168,43 +176,45 @@
  * this macro returns the index of the entry in the pte page which would
  * control the given virtual address
  */
-#define pte_index(address) \
-		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
-	((pte_t *) pmd_page_vaddr(*(dir)) +  pte_index(address))
+#define pte_index(address)					\
+	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address)				\
+	((pte_t *)pmd_page_vaddr(*(dir)) +  pte_index((address)))
 
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
 
-#define pmd_page_vaddr(pmd) \
-		((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page_vaddr(pmd)					\
+	((unsigned long)__va(pmd_val((pmd)) & PAGE_MASK))
 
 #if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
-	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
-	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#define pte_offset_map(dir, address)					\
+	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) +		\
+	 pte_index((address)))
+#define pte_offset_map_nested(dir, address)				\
+	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) +		\
+	 pte_index((address)))
+#define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
 #else
-#define pte_offset_map(dir, address) \
-	((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+#define pte_offset_map(dir, address)					\
+	((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
+#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
 #define pte_unmap(pte) do { } while (0)
 #define pte_unmap_nested(pte) do { } while (0)
 #endif
 
 /* Clear a kernel PTE and flush it from the TLB */
-#define kpte_clear_flush(ptep, vaddr)					\
-do {									\
-	pte_clear(&init_mm, vaddr, ptep);				\
-	__flush_tlb_one(vaddr);						\
+#define kpte_clear_flush(ptep, vaddr)		\
+do {						\
+	pte_clear(&init_mm, (vaddr), (ptep));	\
+	__flush_tlb_one((vaddr));		\
 } while (0)
 
 /*
  * The i386 doesn't have any external MMU info: the kernel page
  * tables contain all the necessary information.
  */
-#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define update_mmu_cache(vma, address, pte) do { } while (0)
 
 void native_pagetable_setup_start(pgd_t *base);
 void native_pagetable_setup_done(pgd_t *base);
@@ -233,7 +243,7 @@
 #define kern_addr_valid(kaddr)	(0)
 #endif
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
-		remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)	\
+	remap_pfn_range(vma, vaddr, pfn, size, prot)
 
 #endif /* _I386_PGTABLE_H */
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 0a0b77b..9fd87d0 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -52,14 +52,18 @@
 
 #ifndef __ASSEMBLY__
 
-#define pte_ERROR(e) \
-	printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
-#define pmd_ERROR(e) \
-	printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-#define pud_ERROR(e) \
-	printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
-#define pgd_ERROR(e) \
-	printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
+#define pte_ERROR(e)					\
+	printk("%s:%d: bad pte %p(%016lx).\n",		\
+	       __FILE__, __LINE__, &(e), pte_val(e))
+#define pmd_ERROR(e)					\
+	printk("%s:%d: bad pmd %p(%016lx).\n",		\
+	       __FILE__, __LINE__, &(e), pmd_val(e))
+#define pud_ERROR(e)					\
+	printk("%s:%d: bad pud %p(%016lx).\n",		\
+	       __FILE__, __LINE__, &(e), pud_val(e))
+#define pgd_ERROR(e)					\
+	printk("%s:%d: bad pgd %p(%016lx).\n",		\
+	       __FILE__, __LINE__, &(e), pgd_val(e))
 
 #define pgd_none(x)	(!pgd_val(x))
 #define pud_none(x)	(!pud_val(x))
@@ -87,7 +91,8 @@
 #ifdef CONFIG_SMP
 	return native_make_pte(xchg(&xp->pte, 0));
 #else
-	/* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */
+	/* native_local_ptep_get_and_clear,
+	   but duplicated because of cyclic dependency */
 	pte_t ret = *xp;
 	native_pte_clear(NULL, 0, xp);
 	return ret;
@@ -119,7 +124,7 @@
 	*pgdp = pgd;
 }
 
-static inline void native_pgd_clear(pgd_t * pgd)
+static inline void native_pgd_clear(pgd_t *pgd)
 {
 	native_set_pgd(pgd, native_make_pgd(0));
 }
@@ -128,19 +133,19 @@
 
 #endif /* !__ASSEMBLY__ */
 
-#define PMD_SIZE	(_AC(1,UL) << PMD_SHIFT)
-#define PMD_MASK	(~(PMD_SIZE-1))
-#define PUD_SIZE	(_AC(1,UL) << PUD_SHIFT)
-#define PUD_MASK	(~(PUD_SIZE-1))
-#define PGDIR_SIZE	(_AC(1,UL) << PGDIR_SHIFT)
-#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+#define PMD_SIZE	(_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE - 1))
+#define PUD_SIZE	(_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK	(~(PUD_SIZE - 1))
+#define PGDIR_SIZE	(_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE - 1))
 
 
-#define MAXMEM		 _AC(0x3fffffffffff, UL)
+#define MAXMEM		 _AC(0x00003fffffffffff, UL)
 #define VMALLOC_START    _AC(0xffffc20000000000, UL)
 #define VMALLOC_END      _AC(0xffffe1ffffffffff, UL)
 #define VMEMMAP_START	 _AC(0xffffe20000000000, UL)
-#define MODULES_VADDR    _AC(0xffffffff88000000, UL)
+#define MODULES_VADDR    _AC(0xffffffffa0000000, UL)
 #define MODULES_END      _AC(0xfffffffffff00000, UL)
 #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
 
@@ -153,26 +158,28 @@
 
 static inline unsigned long pud_bad(pud_t pud)
 {
-	return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+	return pud_val(pud) &
+		~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX);
 }
 
 static inline unsigned long pmd_bad(pmd_t pmd)
 {
-	return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+	return pmd_val(pmd) &
+		~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX);
 }
 
-#define pte_none(x)	(!pte_val(x))
-#define pte_present(x)	(pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_none(x)	(!pte_val((x)))
+#define pte_present(x)	(pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
 
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))	/* FIXME: is this right? */
-#define pte_page(x)	pfn_to_page(pte_pfn(x))
-#define pte_pfn(x)  ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+#define pages_to_mb(x)	((x) >> (20 - PAGE_SHIFT))   /* FIXME: is this right? */
+#define pte_page(x)	pfn_to_page(pte_pfn((x)))
+#define pte_pfn(x)	((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
 
 /*
  * Macro to mark a page protection value as "uncacheable".
  */
-#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
-
+#define pgprot_noncached(prot)					\
+	(__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT))
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
@@ -182,75 +189,81 @@
 /*
  * Level 4 access.
  */
-#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
-#define pgd_page(pgd)		(pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
+#define pgd_page_vaddr(pgd)						\
+	((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK))
+#define pgd_page(pgd)		(pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#define pgd_offset(mm, address)	((mm)->pgd + pgd_index((address)))
+#define pgd_offset_k(address) (init_level4_pgt + pgd_index((address)))
 #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
 static inline int pgd_large(pgd_t pgd) { return 0; }
 #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
 
 /* PUD - Level3 access */
 /* to find an entry in a page-table-directory. */
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
-#define pud_page(pud)		(pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
-#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
+#define pud_page_vaddr(pud)						\
+	((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
+#define pud_page(pud)	(pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+#define pud_offset(pgd, address)					\
+	((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
+#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
 
 static inline int pud_large(pud_t pte)
 {
-	return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
-		(_PAGE_PSE|_PAGE_PRESENT);
+	return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+		(_PAGE_PSE | _PAGE_PRESENT);
 }
 
 /* PMD  - Level 2 access */
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
-#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK))
+#define pmd_page(pmd)		(pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
 
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
-			pmd_index(address))
-#define pmd_none(x)	(!pmd_val(x))
-#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
-#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-#define pmd_pfn(x)  ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
+				  pmd_index(address))
+#define pmd_none(x)	(!pmd_val((x)))
+#define pmd_present(x)	(pmd_val((x)) & _PAGE_PRESENT)
+#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
+#define pmd_pfn(x)  ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
 
-#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE })
+#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) |	\
+					    _PAGE_FILE })
 #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
 
 /* PTE - Level 1 access. */
 
 /* page, protection -> pte */
-#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
- 
-#define pte_index(address) \
-		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn((page)), (pgprot))
+
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
-			pte_index(address))
+					 pte_index((address)))
 
 /* x86-64 always has all page tables mapped. */
-#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
-#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
+#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
+#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
 #define pte_unmap(pte) /* NOP */
-#define pte_unmap_nested(pte) /* NOP */ 
+#define pte_unmap_nested(pte) /* NOP */
 
-#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define update_mmu_cache(vma, address, pte) do { } while (0)
+
+extern int direct_gbpages;
 
 /* Encode and de-code a swap entry */
 #define __swp_type(x)			(((x).val >> 1) & 0x3f)
 #define __swp_offset(x)			((x).val >> 8)
-#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
+#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | \
+							 ((offset) << 8) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
 #define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
 
-extern int kern_addr_valid(unsigned long addr); 
+extern int kern_addr_valid(unsigned long addr);
 extern void cleanup_highmap(void);
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
-		remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)	\
+	remap_pfn_range(vma, vaddr, pfn, size, prot)
 
 #define HAVE_ARCH_UNMAPPED_AREA
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
@@ -263,8 +276,10 @@
 
 /* fs/proc/kcore.c */
 #define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
-#define	kc_offset_to_vaddr(o) \
-   (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
+#define	kc_offset_to_vaddr(o)				\
+	(((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1)))	\
+	 ? ((o) | ~__VIRTUAL_MASK)			\
+	 : (o))
 
 #define __HAVE_ARCH_PTE_SAME
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h
index bb7133d..fe312a5 100644
--- a/include/asm-x86/posix_types.h
+++ b/include/asm-x86/posix_types.h
@@ -1,11 +1,5 @@
 #ifdef __KERNEL__
-# ifdef CONFIG_X86_32
-#  include "posix_types_32.h"
-# else
-#  include "posix_types_64.h"
-# endif
-#else
-# ifdef __i386__
+# if defined(CONFIG_X86_32) || defined(__i386__)
 #  include "posix_types_32.h"
 # else
 #  include "posix_types_64.h"
diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h
index 015e539..b031efd 100644
--- a/include/asm-x86/posix_types_32.h
+++ b/include/asm-x86/posix_types_32.h
@@ -45,32 +45,39 @@
 #if defined(__KERNEL__)
 
 #undef	__FD_SET
-#define __FD_SET(fd,fdsetp) \
-		__asm__ __volatile__("btsl %1,%0": \
-			"+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+#define __FD_SET(fd,fdsetp)					\
+	asm volatile("btsl %1,%0":				\
+		     "+m" (*(__kernel_fd_set *)(fdsetp))	\
+		     : "r" ((int)(fd)))
 
 #undef	__FD_CLR
-#define __FD_CLR(fd,fdsetp) \
-		__asm__ __volatile__("btrl %1,%0": \
-			"+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+#define __FD_CLR(fd,fdsetp)					\
+	asm volatile("btrl %1,%0":				\
+		     "+m" (*(__kernel_fd_set *)(fdsetp))	\
+		     : "r" ((int) (fd)))
 
 #undef	__FD_ISSET
-#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
-		unsigned char __result; \
-		__asm__ __volatile__("btl %1,%2 ; setb %0" \
-			:"=q" (__result) :"r" ((int) (fd)), \
-			"m" (*(__kernel_fd_set *) (fdsetp))); \
-		__result; }))
+#define __FD_ISSET(fd,fdsetp)					\
+	(__extension__						\
+	 ({							\
+	 unsigned char __result;				\
+	 asm volatile("btl %1,%2 ; setb %0"			\
+		      : "=q" (__result)				\
+		      : "r" ((int)(fd)),			\
+			"m" (*(__kernel_fd_set *)(fdsetp)));	\
+	 __result;						\
+}))
 
 #undef	__FD_ZERO
-#define __FD_ZERO(fdsetp) \
-do { \
-	int __d0, __d1; \
-	__asm__ __volatile__("cld ; rep ; stosl" \
-			:"=m" (*(__kernel_fd_set *) (fdsetp)), \
-			  "=&c" (__d0), "=&D" (__d1) \
-			:"a" (0), "1" (__FDSET_LONGS), \
-			"2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
+#define __FD_ZERO(fdsetp)					\
+do {								\
+	int __d0, __d1;						\
+	asm volatile("cld ; rep ; stosl"			\
+		     : "=m" (*(__kernel_fd_set *)(fdsetp)),	\
+		       "=&c" (__d0), "=&D" (__d1)		\
+		     : "a" (0), "1" (__FDSET_LONGS),		\
+		       "2" ((__kernel_fd_set *)(fdsetp))	\
+		     : "memory");				\
 } while (0)
 
 #endif /* defined(__KERNEL__) */
diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h
index 9926aa4..d6624c9 100644
--- a/include/asm-x86/posix_types_64.h
+++ b/include/asm-x86/posix_types_64.h
@@ -46,7 +46,7 @@
 #ifdef __KERNEL__
 
 #undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
 {
 	unsigned long _tmp = fd / __NFDBITS;
 	unsigned long _rem = fd % __NFDBITS;
@@ -54,7 +54,7 @@
 }
 
 #undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
 {
 	unsigned long _tmp = fd / __NFDBITS;
 	unsigned long _rem = fd % __NFDBITS;
@@ -62,7 +62,7 @@
 }
 
 #undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
+static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
 {
 	unsigned long _tmp = fd / __NFDBITS;
 	unsigned long _rem = fd % __NFDBITS;
@@ -74,36 +74,36 @@
  * for 256 and 1024-bit fd_sets respectively)
  */
 #undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+static inline void __FD_ZERO(__kernel_fd_set *p)
 {
 	unsigned long *tmp = p->fds_bits;
 	int i;
 
 	if (__builtin_constant_p(__FDSET_LONGS)) {
 		switch (__FDSET_LONGS) {
-			case 32:
-			  tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
-			  tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
-			  tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
-			  tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
-			  tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
-			  tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
-			  tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
-			  tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
-			  return;
-			case 16:
-			  tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
-			  tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
-			  tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
-			  tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
-			  return;
-			case 8:
-			  tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
-			  tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
-			  return;
-			case 4:
-			  tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
-			  return;
+		case 32:
+			tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+			tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+			tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+			tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+			tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
+			tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
+			tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
+			tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
+			return;
+		case 16:
+			tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+			tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+			tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+			tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+			return;
+		case 8:
+			tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+			tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+			return;
+		case 4:
+			tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+			return;
 		}
 	}
 	i = __FDSET_LONGS;
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 45a2f0a..6e26c7c 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -3,8 +3,7 @@
 
 #include <asm/processor-flags.h>
 
-/* migration helpers, for KVM - will be removed in 2.6.25: */
-#include <asm/vm86.h>
+/* migration helper, for KVM - will be removed in 2.6.25: */
 #define Xgt_desc_struct	desc_ptr
 
 /* Forward declaration, a strange C thing */
@@ -24,6 +23,7 @@
 #include <asm/msr.h>
 #include <asm/desc_defs.h>
 #include <asm/nops.h>
+
 #include <linux/personality.h>
 #include <linux/cpumask.h>
 #include <linux/cache.h>
@@ -37,16 +37,18 @@
 static inline void *current_text_addr(void)
 {
 	void *pc;
-	asm volatile("mov $1f,%0\n1:":"=r" (pc));
+
+	asm volatile("mov $1f, %0; 1:":"=r" (pc));
+
 	return pc;
 }
 
 #ifdef CONFIG_X86_VSMP
-#define ARCH_MIN_TASKALIGN	(1 << INTERNODE_CACHE_SHIFT)
-#define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
+# define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
+# define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
 #else
-#define ARCH_MIN_TASKALIGN	16
-#define ARCH_MIN_MMSTRUCT_ALIGN	0
+# define ARCH_MIN_TASKALIGN		16
+# define ARCH_MIN_MMSTRUCT_ALIGN	0
 #endif
 
 /*
@@ -56,69 +58,82 @@
  */
 
 struct cpuinfo_x86 {
-	__u8	x86;		/* CPU family */
-	__u8	x86_vendor;	/* CPU vendor */
-	__u8	x86_model;
-	__u8	x86_mask;
+	__u8			x86;		/* CPU family */
+	__u8			x86_vendor;	/* CPU vendor */
+	__u8			x86_model;
+	__u8			x86_mask;
 #ifdef CONFIG_X86_32
-	char	wp_works_ok;	/* It doesn't on 386's */
-	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
-	char	hard_math;
-	char	rfu;
-	char	fdiv_bug;
-	char	f00f_bug;
-	char	coma_bug;
-	char	pad0;
+	char			wp_works_ok;	/* It doesn't on 386's */
+
+	/* Problems on some 486Dx4's and old 386's: */
+	char			hlt_works_ok;
+	char			hard_math;
+	char			rfu;
+	char			fdiv_bug;
+	char			f00f_bug;
+	char			coma_bug;
+	char			pad0;
 #else
-	/* number of 4K pages in DTLB/ITLB combined(in pages)*/
-	int     x86_tlbsize;
-	__u8    x86_virt_bits, x86_phys_bits;
-	/* cpuid returned core id bits */
-	__u8    x86_coreid_bits;
-	/* Max extended CPUID function supported */
-	__u32   extended_cpuid_level;
+	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
+	int			 x86_tlbsize;
+	__u8			x86_virt_bits;
+	__u8			x86_phys_bits;
+	/* CPUID returned core id bits: */
+	__u8			x86_coreid_bits;
+	/* Max extended CPUID function supported: */
+	__u32			extended_cpuid_level;
 #endif
-	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
-	__u32	x86_capability[NCAPINTS];
-	char	x86_vendor_id[16];
-	char	x86_model_id[64];
-	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
-				    call  */
-	int 	x86_cache_alignment;	/* In bytes */
-	int	x86_power;
-	unsigned long loops_per_jiffy;
+	/* Maximum supported CPUID level, -1=no CPUID: */
+	int			cpuid_level;
+	__u32			x86_capability[NCAPINTS];
+	char			x86_vendor_id[16];
+	char			x86_model_id[64];
+	/* in KB - valid for CPUS which support this call: */
+	int			x86_cache_size;
+	int			x86_cache_alignment;	/* In bytes */
+	int			x86_power;
+	unsigned long		loops_per_jiffy;
 #ifdef CONFIG_SMP
-	cpumask_t llc_shared_map;	/* cpus sharing the last level cache */
+	/* cpus sharing the last level cache: */
+	cpumask_t		llc_shared_map;
 #endif
-	u16 x86_max_cores;		/* cpuid returned max cores value */
-	u16 apicid;
-	u16 x86_clflush_size;
+	/* cpuid returned max cores value: */
+	u16			 x86_max_cores;
+	u16			apicid;
+	u16			initial_apicid;
+	u16			x86_clflush_size;
 #ifdef CONFIG_SMP
-	u16 booted_cores;		/* number of cores as seen by OS */
-	u16 phys_proc_id; 		/* Physical processor id. */
-	u16 cpu_core_id;  		/* Core id */
-	u16 cpu_index;			/* index into per_cpu list */
+	/* number of cores as seen by the OS: */
+	u16			booted_cores;
+	/* Physical processor id: */
+	u16			phys_proc_id;
+	/* Core id: */
+	u16			cpu_core_id;
+	/* Index into per_cpu list: */
+	u16			cpu_index;
 #endif
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
-#define X86_VENDOR_INTEL 0
-#define X86_VENDOR_CYRIX 1
-#define X86_VENDOR_AMD 2
-#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
-#define X86_VENDOR_CENTAUR 5
-#define X86_VENDOR_TRANSMETA 7
-#define X86_VENDOR_NSC 8
-#define X86_VENDOR_NUM 9
-#define X86_VENDOR_UNKNOWN 0xff
+#define X86_VENDOR_INTEL	0
+#define X86_VENDOR_CYRIX	1
+#define X86_VENDOR_AMD		2
+#define X86_VENDOR_UMC		3
+#define X86_VENDOR_NEXGEN	4
+#define X86_VENDOR_CENTAUR	5
+#define X86_VENDOR_TRANSMETA	7
+#define X86_VENDOR_NSC		8
+#define X86_VENDOR_NUM		9
+
+#define X86_VENDOR_UNKNOWN	0xff
 
 /*
  * capabilities of CPUs
  */
-extern struct cpuinfo_x86 boot_cpu_data;
-extern struct cpuinfo_x86 new_cpu_data;
-extern struct tss_struct doublefault_tss;
-extern __u32 cleared_cpu_caps[NCAPINTS];
+extern struct cpuinfo_x86	boot_cpu_data;
+extern struct cpuinfo_x86	new_cpu_data;
+
+extern struct tss_struct	doublefault_tss;
+extern __u32			cleared_cpu_caps[NCAPINTS];
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
@@ -129,7 +144,18 @@
 #define current_cpu_data	boot_cpu_data
 #endif
 
-void cpu_detect(struct cpuinfo_x86 *c);
+static inline int hlt_works(int cpu)
+{
+#ifdef CONFIG_X86_32
+	return cpu_data(cpu).hlt_works_ok;
+#else
+	return 1;
+#endif
+}
+
+#define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
+
+extern void cpu_detect(struct cpuinfo_x86 *c);
 
 extern void identify_cpu(struct cpuinfo_x86 *);
 extern void identify_boot_cpu(void);
@@ -146,15 +172,15 @@
 #endif
 
 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
-					 unsigned int *ecx, unsigned int *edx)
+				unsigned int *ecx, unsigned int *edx)
 {
 	/* ecx is often an input as well as an output. */
-	__asm__("cpuid"
-		: "=a" (*eax),
-		  "=b" (*ebx),
-		  "=c" (*ecx),
-		  "=d" (*edx)
-		: "0" (*eax), "2" (*ecx));
+	asm("cpuid"
+	    : "=a" (*eax),
+	      "=b" (*ebx),
+	      "=c" (*ecx),
+	      "=d" (*edx)
+	    : "0" (*eax), "2" (*ecx));
 }
 
 static inline void load_cr3(pgd_t *pgdir)
@@ -165,54 +191,67 @@
 #ifdef CONFIG_X86_32
 /* This is the TSS defined by the hardware. */
 struct x86_hw_tss {
-	unsigned short	back_link, __blh;
-	unsigned long	sp0;
-	unsigned short	ss0, __ss0h;
-	unsigned long	sp1;
-	unsigned short	ss1, __ss1h;	/* ss1 caches MSR_IA32_SYSENTER_CS */
-	unsigned long	sp2;
-	unsigned short	ss2, __ss2h;
-	unsigned long	__cr3;
-	unsigned long	ip;
-	unsigned long	flags;
-	unsigned long	ax, cx, dx, bx;
-	unsigned long	sp, bp, si, di;
-	unsigned short	es, __esh;
-	unsigned short	cs, __csh;
-	unsigned short	ss, __ssh;
-	unsigned short	ds, __dsh;
-	unsigned short	fs, __fsh;
-	unsigned short	gs, __gsh;
-	unsigned short	ldt, __ldth;
-	unsigned short	trace, io_bitmap_base;
+	unsigned short		back_link, __blh;
+	unsigned long		sp0;
+	unsigned short		ss0, __ss0h;
+	unsigned long		sp1;
+	/* ss1 caches MSR_IA32_SYSENTER_CS: */
+	unsigned short		ss1, __ss1h;
+	unsigned long		sp2;
+	unsigned short		ss2, __ss2h;
+	unsigned long		__cr3;
+	unsigned long		ip;
+	unsigned long		flags;
+	unsigned long		ax;
+	unsigned long		cx;
+	unsigned long		dx;
+	unsigned long		bx;
+	unsigned long		sp;
+	unsigned long		bp;
+	unsigned long		si;
+	unsigned long		di;
+	unsigned short		es, __esh;
+	unsigned short		cs, __csh;
+	unsigned short		ss, __ssh;
+	unsigned short		ds, __dsh;
+	unsigned short		fs, __fsh;
+	unsigned short		gs, __gsh;
+	unsigned short		ldt, __ldth;
+	unsigned short		trace;
+	unsigned short		io_bitmap_base;
+
 } __attribute__((packed));
 #else
 struct x86_hw_tss {
-	u32 reserved1;
-	u64 sp0;
-	u64 sp1;
-	u64 sp2;
-	u64 reserved2;
-	u64 ist[7];
-	u32 reserved3;
-	u32 reserved4;
-	u16 reserved5;
-	u16 io_bitmap_base;
+	u32			reserved1;
+	u64			sp0;
+	u64			sp1;
+	u64			sp2;
+	u64			reserved2;
+	u64			ist[7];
+	u32			reserved3;
+	u32			reserved4;
+	u16			reserved5;
+	u16			io_bitmap_base;
+
 } __attribute__((packed)) ____cacheline_aligned;
 #endif
 
 /*
- * Size of io_bitmap.
+ * IO-bitmap sizes:
  */
-#define IO_BITMAP_BITS  65536
-#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
-#define INVALID_IO_BITMAP_OFFSET 0x8000
-#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
+#define IO_BITMAP_BITS			65536
+#define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET		offsetof(struct tss_struct, io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET	0x8000
+#define INVALID_IO_BITMAP_OFFSET_LAZY	0x9000
 
 struct tss_struct {
-	struct x86_hw_tss x86_tss;
+	/*
+	 * The hardware state:
+	 */
+	struct x86_hw_tss	x86_tss;
 
 	/*
 	 * The extra 1 is there because the CPU will access an
@@ -220,90 +259,108 @@
 	 * bitmap. The extra byte must be all 1 bits, and must
 	 * be within the limit.
 	 */
-	unsigned long	io_bitmap[IO_BITMAP_LONGS + 1];
+	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
 	/*
 	 * Cache the current maximum and the last task that used the bitmap:
 	 */
-	unsigned long io_bitmap_max;
-	struct thread_struct *io_bitmap_owner;
+	unsigned long		io_bitmap_max;
+	struct thread_struct	*io_bitmap_owner;
+
 	/*
-	 * pads the TSS to be cacheline-aligned (size is 0x100)
+	 * Pad the TSS to be cacheline-aligned (size is 0x100):
 	 */
-	unsigned long __cacheline_filler[35];
+	unsigned long		__cacheline_filler[35];
 	/*
-	 * .. and then another 0x100 bytes for emergency kernel stack
+	 * .. and then another 0x100 bytes for the emergency kernel stack:
 	 */
-	unsigned long stack[64];
+	unsigned long		stack[64];
+
 } __attribute__((packed));
 
 DECLARE_PER_CPU(struct tss_struct, init_tss);
 
-/* Save the original ist values for checking stack pointers during debugging */
+/*
+ * Save the original ist values for checking stack pointers during debugging
+ */
 struct orig_ist {
-	unsigned long ist[7];
+	unsigned long		ist[7];
 };
 
 #define	MXCSR_DEFAULT		0x1f80
 
 struct i387_fsave_struct {
-	u32	cwd;
-	u32	swd;
-	u32	twd;
-	u32	fip;
-	u32	fcs;
-	u32	foo;
-	u32	fos;
-	u32	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
-	u32	status;		/* software status information */
+	u32			cwd;	/* FPU Control Word		*/
+	u32			swd;	/* FPU Status Word		*/
+	u32			twd;	/* FPU Tag Word			*/
+	u32			fip;	/* FPU IP Offset		*/
+	u32			fcs;	/* FPU IP Selector		*/
+	u32			foo;	/* FPU Operand Pointer Offset	*/
+	u32			fos;	/* FPU Operand Pointer Selector	*/
+
+	/* 8*10 bytes for each FP-reg = 80 bytes:			*/
+	u32			st_space[20];
+
+	/* Software status information [not touched by FSAVE ]:		*/
+	u32			status;
 };
 
 struct i387_fxsave_struct {
-	u16	cwd;
-	u16	swd;
-	u16	twd;
-	u16	fop;
+	u16			cwd; /* Control Word			*/
+	u16			swd; /* Status Word			*/
+	u16			twd; /* Tag Word			*/
+	u16			fop; /* Last Instruction Opcode		*/
 	union {
 		struct {
-			u64	rip;
-			u64	rdp;
+			u64	rip; /* Instruction Pointer		*/
+			u64	rdp; /* Data Pointer			*/
 		};
 		struct {
-			u32	fip;
-			u32	fcs;
-			u32	foo;
-			u32	fos;
+			u32	fip; /* FPU IP Offset			*/
+			u32	fcs; /* FPU IP Selector			*/
+			u32	foo; /* FPU Operand Offset		*/
+			u32	fos; /* FPU Operand Selector		*/
 		};
 	};
-	u32	mxcsr;
-	u32	mxcsr_mask;
-	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
-	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 256 bytes */
-	u32	padding[24];
+	u32			mxcsr;		/* MXCSR Register State */
+	u32			mxcsr_mask;	/* MXCSR Mask		*/
+
+	/* 8*16 bytes for each FP-reg = 128 bytes:			*/
+	u32			st_space[32];
+
+	/* 16*16 bytes for each XMM-reg = 256 bytes:			*/
+	u32			xmm_space[64];
+
+	u32			padding[24];
+
 } __attribute__((aligned(16)));
 
 struct i387_soft_struct {
-	u32	cwd;
-	u32	swd;
-	u32	twd;
-	u32	fip;
-	u32	fcs;
-	u32	foo;
-	u32	fos;
-	u32	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
-	u8	ftop, changed, lookahead, no_update, rm, alimit;
-	struct info	*info;
-	u32	entry_eip;
+	u32			cwd;
+	u32			swd;
+	u32			twd;
+	u32			fip;
+	u32			fcs;
+	u32			foo;
+	u32			fos;
+	/* 8*10 bytes for each FP-reg = 80 bytes: */
+	u32			st_space[20];
+	u8			ftop;
+	u8			changed;
+	u8			lookahead;
+	u8			no_update;
+	u8			rm;
+	u8			alimit;
+	struct info		*info;
+	u32			entry_eip;
 };
 
 union i387_union {
 	struct i387_fsave_struct	fsave;
 	struct i387_fxsave_struct	fxsave;
-	struct i387_soft_struct 	soft;
+	struct i387_soft_struct		soft;
 };
 
-#ifdef CONFIG_X86_32
-DECLARE_PER_CPU(u8, cpu_llc_id);
-#else
+#ifdef CONFIG_X86_64
 DECLARE_PER_CPU(struct orig_ist, orig_ist);
 #endif
 
@@ -313,42 +370,50 @@
 extern unsigned short num_cache_leaves;
 
 struct thread_struct {
-/* cached TLS descriptors. */
-	struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
-	unsigned long	sp0;
-	unsigned long	sp;
+	/* Cached TLS descriptors: */
+	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
+	unsigned long		sp0;
+	unsigned long		sp;
 #ifdef CONFIG_X86_32
-	unsigned long	sysenter_cs;
+	unsigned long		sysenter_cs;
 #else
-	unsigned long 	usersp;	/* Copy from PDA */
-	unsigned short	es, ds, fsindex, gsindex;
+	unsigned long		usersp;	/* Copy from PDA */
+	unsigned short		es;
+	unsigned short		ds;
+	unsigned short		fsindex;
+	unsigned short		gsindex;
 #endif
-	unsigned long	ip;
-	unsigned long	fs;
-	unsigned long	gs;
-/* Hardware debugging registers */
-	unsigned long	debugreg0;
-	unsigned long	debugreg1;
-	unsigned long	debugreg2;
-	unsigned long	debugreg3;
-	unsigned long	debugreg6;
-	unsigned long	debugreg7;
-/* fault info */
-	unsigned long	cr2, trap_no, error_code;
-/* floating point info */
+	unsigned long		ip;
+	unsigned long		fs;
+	unsigned long		gs;
+	/* Hardware debugging registers: */
+	unsigned long		debugreg0;
+	unsigned long		debugreg1;
+	unsigned long		debugreg2;
+	unsigned long		debugreg3;
+	unsigned long		debugreg6;
+	unsigned long		debugreg7;
+	/* Fault info: */
+	unsigned long		cr2;
+	unsigned long		trap_no;
+	unsigned long		error_code;
+	/* Floating point info: */
 	union i387_union	i387 __attribute__((aligned(16)));;
 #ifdef CONFIG_X86_32
-/* virtual 86 mode info */
+	/* Virtual 86 mode info */
 	struct vm86_struct __user *vm86_info;
 	unsigned long		screen_bitmap;
-	unsigned long		v86flags, v86mask, saved_sp0;
-	unsigned int		saved_fs, saved_gs;
+	unsigned long		v86flags;
+	unsigned long		v86mask;
+	unsigned long		saved_sp0;
+	unsigned int		saved_fs;
+	unsigned int		saved_gs;
 #endif
-/* IO permissions */
-	unsigned long	*io_bitmap_ptr;
-	unsigned long	iopl;
-/* max allowed port in the bitmap, in bytes: */
-	unsigned io_bitmap_max;
+	/* IO permissions: */
+	unsigned long		*io_bitmap_ptr;
+	unsigned long		iopl;
+	/* Max allowed port in the bitmap, in bytes: */
+	unsigned		io_bitmap_max;
 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set.  */
 	unsigned long	debugctlmsr;
 /* Debug Store - if not 0 points to a DS Save Area configuration;
@@ -358,21 +423,27 @@
 
 static inline unsigned long native_get_debugreg(int regno)
 {
-	unsigned long val = 0; 	/* Damn you, gcc! */
+	unsigned long val = 0;	/* Damn you, gcc! */
 
 	switch (regno) {
 	case 0:
-		asm("mov %%db0, %0" :"=r" (val)); break;
+		asm("mov %%db0, %0" :"=r" (val));
+		break;
 	case 1:
-		asm("mov %%db1, %0" :"=r" (val)); break;
+		asm("mov %%db1, %0" :"=r" (val));
+		break;
 	case 2:
-		asm("mov %%db2, %0" :"=r" (val)); break;
+		asm("mov %%db2, %0" :"=r" (val));
+		break;
 	case 3:
-		asm("mov %%db3, %0" :"=r" (val)); break;
+		asm("mov %%db3, %0" :"=r" (val));
+		break;
 	case 6:
-		asm("mov %%db6, %0" :"=r" (val)); break;
+		asm("mov %%db6, %0" :"=r" (val));
+		break;
 	case 7:
-		asm("mov %%db7, %0" :"=r" (val)); break;
+		asm("mov %%db7, %0" :"=r" (val));
+		break;
 	default:
 		BUG();
 	}
@@ -383,22 +454,22 @@
 {
 	switch (regno) {
 	case 0:
-		asm("mov %0,%%db0"	: /* no output */ :"r" (value));
+		asm("mov %0, %%db0"	::"r" (value));
 		break;
 	case 1:
-		asm("mov %0,%%db1"	: /* no output */ :"r" (value));
+		asm("mov %0, %%db1"	::"r" (value));
 		break;
 	case 2:
-		asm("mov %0,%%db2"	: /* no output */ :"r" (value));
+		asm("mov %0, %%db2"	::"r" (value));
 		break;
 	case 3:
-		asm("mov %0,%%db3"	: /* no output */ :"r" (value));
+		asm("mov %0, %%db3"	::"r" (value));
 		break;
 	case 6:
-		asm("mov %0,%%db6"	: /* no output */ :"r" (value));
+		asm("mov %0, %%db6"	::"r" (value));
 		break;
 	case 7:
-		asm("mov %0,%%db7"	: /* no output */ :"r" (value));
+		asm("mov %0, %%db7"	::"r" (value));
 		break;
 	default:
 		BUG();
@@ -412,23 +483,24 @@
 {
 #ifdef CONFIG_X86_32
 	unsigned int reg;
-	__asm__ __volatile__ ("pushfl;"
-			      "popl %0;"
-			      "andl %1, %0;"
-			      "orl %2, %0;"
-			      "pushl %0;"
-			      "popfl"
-				: "=&r" (reg)
-				: "i" (~X86_EFLAGS_IOPL), "r" (mask));
+
+	asm volatile ("pushfl;"
+		      "popl %0;"
+		      "andl %1, %0;"
+		      "orl %2, %0;"
+		      "pushl %0;"
+		      "popfl"
+		      : "=&r" (reg)
+		      : "i" (~X86_EFLAGS_IOPL), "r" (mask));
 #endif
 }
 
-static inline void native_load_sp0(struct tss_struct *tss,
-				   struct thread_struct *thread)
+static inline void
+native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
 {
 	tss->x86_tss.sp0 = thread->sp0;
 #ifdef CONFIG_X86_32
-	/* Only happens when SEP is enabled, no need to test "SEP"arately */
+	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
 	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
 		tss->x86_tss.ss1 = thread->sysenter_cs;
 		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
@@ -446,8 +518,8 @@
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
-#define __cpuid native_cpuid
-#define paravirt_enabled() 0
+#define __cpuid			native_cpuid
+#define paravirt_enabled()	0
 
 /*
  * These special macros can be used to get or set a debugging register
@@ -473,11 +545,12 @@
  * enable), so that any CPU's that boot up
  * after us can get the correct flags.
  */
-extern unsigned long mmu_cr4_features;
+extern unsigned long		mmu_cr4_features;
 
 static inline void set_in_cr4(unsigned long mask)
 {
 	unsigned cr4;
+
 	mmu_cr4_features |= mask;
 	cr4 = read_cr4();
 	cr4 |= mask;
@@ -487,6 +560,7 @@
 static inline void clear_in_cr4(unsigned long mask)
 {
 	unsigned cr4;
+
 	mmu_cr4_features &= ~mask;
 	cr4 = read_cr4();
 	cr4 &= ~mask;
@@ -494,42 +568,42 @@
 }
 
 struct microcode_header {
-	unsigned int hdrver;
-	unsigned int rev;
-	unsigned int date;
-	unsigned int sig;
-	unsigned int cksum;
-	unsigned int ldrver;
-	unsigned int pf;
-	unsigned int datasize;
-	unsigned int totalsize;
-	unsigned int reserved[3];
+	unsigned int		hdrver;
+	unsigned int		rev;
+	unsigned int		date;
+	unsigned int		sig;
+	unsigned int		cksum;
+	unsigned int		ldrver;
+	unsigned int		pf;
+	unsigned int		datasize;
+	unsigned int		totalsize;
+	unsigned int		reserved[3];
 };
 
 struct microcode {
-	struct microcode_header hdr;
-	unsigned int bits[0];
+	struct microcode_header	hdr;
+	unsigned int		bits[0];
 };
 
-typedef struct microcode microcode_t;
-typedef struct microcode_header microcode_header_t;
+typedef struct microcode	microcode_t;
+typedef struct microcode_header	microcode_header_t;
 
 /* microcode format is extended from prescott processors */
 struct extended_signature {
-	unsigned int sig;
-	unsigned int pf;
-	unsigned int cksum;
+	unsigned int		sig;
+	unsigned int		pf;
+	unsigned int		cksum;
 };
 
 struct extended_sigtable {
-	unsigned int count;
-	unsigned int cksum;
-	unsigned int reserved[3];
+	unsigned int		count;
+	unsigned int		cksum;
+	unsigned int		reserved[3];
 	struct extended_signature sigs[0];
 };
 
 typedef struct {
-	unsigned long seg;
+	unsigned long		seg;
 } mm_segment_t;
 
 
@@ -541,7 +615,7 @@
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
-/* Prepare to copy thread state - unlazy all lazy status */
+/* Prepare to copy thread state - unlazy all lazy state */
 extern void prepare_to_copy(struct task_struct *tsk);
 
 unsigned long get_wchan(struct task_struct *p);
@@ -578,118 +652,137 @@
 	unsigned int eax, ebx, ecx, edx;
 
 	cpuid(op, &eax, &ebx, &ecx, &edx);
+
 	return eax;
 }
+
 static inline unsigned int cpuid_ebx(unsigned int op)
 {
 	unsigned int eax, ebx, ecx, edx;
 
 	cpuid(op, &eax, &ebx, &ecx, &edx);
+
 	return ebx;
 }
+
 static inline unsigned int cpuid_ecx(unsigned int op)
 {
 	unsigned int eax, ebx, ecx, edx;
 
 	cpuid(op, &eax, &ebx, &ecx, &edx);
+
 	return ecx;
 }
+
 static inline unsigned int cpuid_edx(unsigned int op)
 {
 	unsigned int eax, ebx, ecx, edx;
 
 	cpuid(op, &eax, &ebx, &ecx, &edx);
+
 	return edx;
 }
 
 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
 static inline void rep_nop(void)
 {
-	__asm__ __volatile__("rep;nop": : :"memory");
+	asm volatile("rep; nop" ::: "memory");
 }
 
-/* Stop speculative execution */
+static inline void cpu_relax(void)
+{
+	rep_nop();
+}
+
+/* Stop speculative execution: */
 static inline void sync_core(void)
 {
 	int tmp;
+
 	asm volatile("cpuid" : "=a" (tmp) : "0" (1)
-					  : "ebx", "ecx", "edx", "memory");
+		     : "ebx", "ecx", "edx", "memory");
 }
 
-#define cpu_relax()   rep_nop()
-
 static inline void __monitor(const void *eax, unsigned long ecx,
-		unsigned long edx)
+			     unsigned long edx)
 {
-	/* "monitor %eax,%ecx,%edx;" */
-	asm volatile(
-		".byte 0x0f,0x01,0xc8;"
-		: :"a" (eax), "c" (ecx), "d"(edx));
+	/* "monitor %eax, %ecx, %edx;" */
+	asm volatile(".byte 0x0f, 0x01, 0xc8;"
+		     :: "a" (eax), "c" (ecx), "d"(edx));
 }
 
 static inline void __mwait(unsigned long eax, unsigned long ecx)
 {
-	/* "mwait %eax,%ecx;" */
-	asm volatile(
-		".byte 0x0f,0x01,0xc9;"
-		: :"a" (eax), "c" (ecx));
+	/* "mwait %eax, %ecx;" */
+	asm volatile(".byte 0x0f, 0x01, 0xc9;"
+		     :: "a" (eax), "c" (ecx));
 }
 
 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
 {
-	/* "mwait %eax,%ecx;" */
-	asm volatile(
-		"sti; .byte 0x0f,0x01,0xc9;"
-		: :"a" (eax), "c" (ecx));
+	/* "mwait %eax, %ecx;" */
+	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+		     :: "a" (eax), "c" (ecx));
 }
 
 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
 
-extern int force_mwait;
+extern int			force_mwait;
 
 extern void select_idle_routine(const struct cpuinfo_x86 *c);
 
-extern unsigned long boot_option_idle_override;
+extern unsigned long		boot_option_idle_override;
 
 extern void enable_sep_cpu(void);
 extern int sysenter_setup(void);
 
 /* Defined in head.S */
-extern struct desc_ptr early_gdt_descr;
+extern struct desc_ptr		early_gdt_descr;
 
 extern void cpu_set_gdt(int);
 extern void switch_to_new_gdt(void);
 extern void cpu_init(void);
 extern void init_gdt(int cpu);
 
-/* from system description table in BIOS.  Mostly for MCA use, but
- * others may find it useful. */
-extern unsigned int machine_id;
-extern unsigned int machine_submodel_id;
-extern unsigned int BIOS_revision;
+static inline void update_debugctlmsr(unsigned long debugctlmsr)
+{
+#ifndef CONFIG_X86_DEBUGCTLMSR
+	if (boot_cpu_data.x86 < 6)
+		return;
+#endif
+	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
+}
 
-/* Boot loader type from the setup header */
-extern int bootloader_type;
+/*
+ * from system description table in BIOS. Mostly for MCA use, but
+ * others may find it useful:
+ */
+extern unsigned int		machine_id;
+extern unsigned int		machine_submodel_id;
+extern unsigned int		BIOS_revision;
 
-extern char ignore_fpu_irq;
-#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+/* Boot loader type from the setup header: */
+extern int			bootloader_type;
+
+extern char			ignore_fpu_irq;
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
 #define ARCH_HAS_PREFETCHW
 #define ARCH_HAS_SPINLOCK_PREFETCH
 
 #ifdef CONFIG_X86_32
-#define BASE_PREFETCH	ASM_NOP4
-#define ARCH_HAS_PREFETCH
+# define BASE_PREFETCH		ASM_NOP4
+# define ARCH_HAS_PREFETCH
 #else
-#define BASE_PREFETCH	"prefetcht0 (%1)"
+# define BASE_PREFETCH		"prefetcht0 (%1)"
 #endif
 
-/* Prefetch instructions for Pentium III and AMD Athlon */
-/* It's not worth to care about 3dnow! prefetches for the K6
-   because they are microcoded there and very slow.
-   However we don't do prefetches for pre XP Athlons currently
-   That should be fixed. */
+/*
+ * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
+ *
+ * It's not worth to care about 3dnow prefetches for the K6
+ * because they are microcoded there and very slow.
+ */
 static inline void prefetch(const void *x)
 {
 	alternative_input(BASE_PREFETCH,
@@ -698,8 +791,11 @@
 			  "r" (x));
 }
 
-/* 3dnow! prefetch to get an exclusive cache line. Useful for
-   spinlocks to avoid one state transition in the cache coherency protocol. */
+/*
+ * 3dnow prefetch to get an exclusive cache line.
+ * Useful for spinlocks to avoid one state transition in the
+ * cache coherency protocol:
+ */
 static inline void prefetchw(const void *x)
 {
 	alternative_input(BASE_PREFETCH,
@@ -708,21 +804,25 @@
 			  "r" (x));
 }
 
-#define spin_lock_prefetch(x)	prefetchw(x)
+static inline void spin_lock_prefetch(const void *x)
+{
+	prefetchw(x);
+}
+
 #ifdef CONFIG_X86_32
 /*
  * User space process size: 3GB (default).
  */
-#define TASK_SIZE	(PAGE_OFFSET)
-#define STACK_TOP	TASK_SIZE
-#define STACK_TOP_MAX	STACK_TOP
+#define TASK_SIZE		PAGE_OFFSET
+#define STACK_TOP		TASK_SIZE
+#define STACK_TOP_MAX		STACK_TOP
 
-#define INIT_THREAD  {							\
-	.sp0 = sizeof(init_stack) + (long)&init_stack,			\
-	.vm86_info = NULL,						\
-	.sysenter_cs = __KERNEL_CS,					\
-	.io_bitmap_ptr = NULL,						\
-	.fs = __KERNEL_PERCPU,						\
+#define INIT_THREAD  {							  \
+	.sp0			= sizeof(init_stack) + (long)&init_stack, \
+	.vm86_info		= NULL,					  \
+	.sysenter_cs		= __KERNEL_CS,				  \
+	.io_bitmap_ptr		= NULL,					  \
+	.fs			= __KERNEL_PERCPU,			  \
 }
 
 /*
@@ -731,29 +831,16 @@
  * permission bitmap. The extra byte must be all 1 bits, and must
  * be within the limit.
  */
-#define INIT_TSS  {							\
-	.x86_tss = {							\
+#define INIT_TSS  {							  \
+	.x86_tss = {							  \
 		.sp0		= sizeof(init_stack) + (long)&init_stack, \
-		.ss0		= __KERNEL_DS,				\
-		.ss1		= __KERNEL_CS,				\
-		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		\
-	 },								\
-	.io_bitmap	= { [0 ... IO_BITMAP_LONGS] = ~0 },		\
+		.ss0		= __KERNEL_DS,				  \
+		.ss1		= __KERNEL_CS,				  \
+		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		  \
+	 },								  \
+	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },	  \
 }
 
-#define start_thread(regs, new_eip, new_esp) do {		\
-	__asm__("movl %0,%%gs": :"r" (0));			\
-	regs->fs = 0;						\
-	set_fs(USER_DS);					\
-	regs->ds = __USER_DS;					\
-	regs->es = __USER_DS;					\
-	regs->ss = __USER_DS;					\
-	regs->cs = __USER_CS;					\
-	regs->ip = new_eip;					\
-	regs->sp = new_esp;					\
-} while (0)
-
-
 extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
 #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
@@ -780,24 +867,24 @@
        __regs__ - 1;                                                   \
 })
 
-#define KSTK_ESP(task) (task_pt_regs(task)->sp)
+#define KSTK_ESP(task)		(task_pt_regs(task)->sp)
 
 #else
 /*
  * User space process size. 47bits minus one guard page.
  */
-#define TASK_SIZE64	(0x800000000000UL - 4096)
+#define TASK_SIZE64	((1UL << 47) - PAGE_SIZE)
 
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
-			   0xc0000000 : 0xFFFFe000)
+#define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
+					0xc0000000 : 0xFFFFe000)
 
-#define TASK_SIZE 		(test_thread_flag(TIF_IA32) ? \
-				 IA32_PAGE_OFFSET : TASK_SIZE64)
-#define TASK_SIZE_OF(child) 	((test_tsk_thread_flag(child, TIF_IA32)) ? \
-				  IA32_PAGE_OFFSET : TASK_SIZE64)
+#define TASK_SIZE		(test_thread_flag(TIF_IA32) ? \
+					IA32_PAGE_OFFSET : TASK_SIZE64)
+#define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_IA32)) ? \
+					IA32_PAGE_OFFSET : TASK_SIZE64)
 
 #define STACK_TOP		TASK_SIZE
 #define STACK_TOP_MAX		TASK_SIZE64
@@ -810,33 +897,25 @@
 	.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
 }
 
-#define start_thread(regs, new_rip, new_rsp) do { 			     \
-	asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));  \
-	load_gs_index(0);						     \
-	(regs)->ip = (new_rip);						     \
-	(regs)->sp = (new_rsp);						     \
-	write_pda(oldrsp, (new_rsp));					     \
-	(regs)->cs = __USER_CS;						     \
-	(regs)->ss = __USER_DS;						     \
-	(regs)->flags = 0x200;						     \
-	set_fs(USER_DS);						     \
-} while (0)
-
 /*
  * Return saved PC of a blocked thread.
  * What is this good for? it will be always the scheduler or ret_from_fork.
  */
-#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
+#define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
 
-#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
-#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
+#define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
+#define KSTK_ESP(tsk)		-1 /* sorry. doesn't work for syscall. */
 #endif /* CONFIG_X86_64 */
 
-/* This decides where the kernel will search for a free chunk of vm
+extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+					       unsigned long new_sp);
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
 #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
 
-#define KSTK_EIP(task) (task_pt_regs(task)->ip)
+#define KSTK_EIP(task)		(task_pt_regs(task)->ip)
 
 #endif
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
index 68563c0..1e17bcc 100644
--- a/include/asm-x86/proto.h
+++ b/include/asm-x86/proto.h
@@ -7,8 +7,6 @@
 
 extern void early_idt_handler(void);
 
-extern void init_memory_mapping(unsigned long start, unsigned long end);
-
 extern void system_call(void);
 extern void syscall_init(void);
 
@@ -26,7 +24,7 @@
 
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
 
-#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
-#define round_down(x,y) ((x) & ~((y)-1))
+#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
+#define round_down(x, y) ((x) & ~((y) - 1))
 
 #endif
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index d9e04b4..24ec061 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -36,23 +36,23 @@
 #else /* __KERNEL__ */
 
 struct pt_regs {
-	long bx;
-	long cx;
-	long dx;
-	long si;
-	long di;
-	long bp;
-	long ax;
-	int  ds;
-	int  es;
-	int  fs;
+	unsigned long bx;
+	unsigned long cx;
+	unsigned long dx;
+	unsigned long si;
+	unsigned long di;
+	unsigned long bp;
+	unsigned long ax;
+	unsigned long ds;
+	unsigned long es;
+	unsigned long fs;
 	/* int  gs; */
-	long orig_ax;
-	long ip;
-	int  cs;
-	long flags;
-	long sp;
-	int  ss;
+	unsigned long orig_ax;
+	unsigned long ip;
+	unsigned long cs;
+	unsigned long flags;
+	unsigned long sp;
+	unsigned long ss;
 };
 
 #include <asm/vm86.h>
@@ -140,12 +140,16 @@
 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
 
 #ifdef CONFIG_X86_32
-extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+			 int error_code);
 #else
 void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
 #endif
 
-#define regs_return_value(regs) ((regs)->ax)
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+	return regs->ax;
+}
 
 /*
  * user_mode_vm(regs) determines whether a register set came from user mode.
@@ -166,8 +170,8 @@
 static inline int user_mode_vm(struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
-	return ((regs->cs & SEGMENT_RPL_MASK) |
-		(regs->flags & VM_MASK)) >= USER_RPL;
+	return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+		USER_RPL;
 #else
 	return user_mode(regs);
 #endif
@@ -176,7 +180,7 @@
 static inline int v8086_mode(struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
-	return (regs->flags & VM_MASK);
+	return (regs->flags & X86_VM_MASK);
 #else
 	return 0;	/* No V86 mode support in long mode */
 #endif
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index e9e3ffc..6b5233b 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -3,8 +3,7 @@
 
 struct pt_regs;
 
-struct machine_ops
-{
+struct machine_ops {
 	void (*restart)(char *cmd);
 	void (*halt)(void);
 	void (*power_off)(void);
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 46f725b..2557514 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -3,16 +3,17 @@
 
 #include <asm/asm.h>
 
-#define TRACE_RESUME(user) do {					\
+#define TRACE_RESUME(user)					\
+do {								\
 	if (pm_trace_enabled) {					\
 		void *tracedata;				\
 		asm volatile(_ASM_MOV_UL " $1f,%0\n"		\
-			".section .tracedata,\"a\"\n"		\
-			"1:\t.word %c1\n\t"			\
-			_ASM_PTR " %c2\n"			\
-			".previous"				\
-			:"=r" (tracedata)			\
-			: "i" (__LINE__), "i" (__FILE__));	\
+			     ".section .tracedata,\"a\"\n"	\
+			     "1:\t.word %c1\n\t"		\
+			     _ASM_PTR " %c2\n"			\
+			     ".previous"			\
+			     :"=r" (tracedata)			\
+			     : "i" (__LINE__), "i" (__FILE__));	\
 		generate_resume_trace(tracedata, user);		\
 	}							\
 } while (0)
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
index 97cdcc9..3451c57 100644
--- a/include/asm-x86/rio.h
+++ b/include/asm-x86/rio.h
@@ -11,53 +11,53 @@
 #define RIO_TABLE_VERSION	3
 
 struct rio_table_hdr {
-	u8 version;      /* Version number of this data structure  */
-	u8 num_scal_dev; /* # of Scalability devices               */
-	u8 num_rio_dev;  /* # of RIO I/O devices                   */
+	u8 version;		/* Version number of this data structure  */
+	u8 num_scal_dev;	/* # of Scalability devices               */
+	u8 num_rio_dev;		/* # of RIO I/O devices                   */
 } __attribute__((packed));
 
 struct scal_detail {
-	u8 node_id;      /* Scalability Node ID                    */
-	u32 CBAR;        /* Address of 1MB register space          */
-	u8 port0node;    /* Node ID port connected to: 0xFF=None   */
-	u8 port0port;    /* Port num port connected to: 0,1,2, or  */
-	                 /* 0xFF=None                              */
-	u8 port1node;    /* Node ID port connected to: 0xFF = None */
-	u8 port1port;    /* Port num port connected to: 0,1,2, or  */
-	                 /* 0xFF=None                              */
-	u8 port2node;    /* Node ID port connected to: 0xFF = None */
-	u8 port2port;    /* Port num port connected to: 0,1,2, or  */
-	                 /* 0xFF=None                              */
-	u8 chassis_num;  /* 1 based Chassis number (1 = boot node) */
+	u8 node_id;		/* Scalability Node ID                    */
+	u32 CBAR;		/* Address of 1MB register space          */
+	u8 port0node;		/* Node ID port connected to: 0xFF=None   */
+	u8 port0port;		/* Port num port connected to: 0,1,2, or  */
+				/* 0xFF=None                              */
+	u8 port1node;		/* Node ID port connected to: 0xFF = None */
+	u8 port1port;		/* Port num port connected to: 0,1,2, or  */
+				/* 0xFF=None                              */
+	u8 port2node;		/* Node ID port connected to: 0xFF = None */
+	u8 port2port;		/* Port num port connected to: 0,1,2, or  */
+				/* 0xFF=None                              */
+	u8 chassis_num;		/* 1 based Chassis number (1 = boot node) */
 } __attribute__((packed));
 
 struct rio_detail {
-	u8 node_id;      /* RIO Node ID                            */
-	u32 BBAR;        /* Address of 1MB register space          */
-	u8 type;         /* Type of device                         */
-	u8 owner_id;     /* Node ID of Hurricane that owns this    */
-	                 /* node                                   */
-	u8 port0node;    /* Node ID port connected to: 0xFF=None   */
-	u8 port0port;    /* Port num port connected to: 0,1,2, or  */
-	                 /* 0xFF=None                              */
-	u8 port1node;    /* Node ID port connected to: 0xFF=None   */
-	u8 port1port;    /* Port num port connected to: 0,1,2, or  */
-	                 /* 0xFF=None                              */
-	u8 first_slot;   /* Lowest slot number below this Calgary  */
-	u8 status;       /* Bit 0 = 1 : the XAPIC is used          */
-	                 /*       = 0 : the XAPIC is not used, ie: */
-	                 /*            ints fwded to another XAPIC */
-	                 /*           Bits1:7 Reserved             */
-	u8 WP_index;     /* instance index - lower ones have       */
-	                 /*     lower slot numbers/PCI bus numbers */
-	u8 chassis_num;  /* 1 based Chassis number                 */
+	u8 node_id;		/* RIO Node ID                            */
+	u32 BBAR;		/* Address of 1MB register space          */
+	u8 type;		/* Type of device                         */
+	u8 owner_id;		/* Node ID of Hurricane that owns this    */
+				/* node                                   */
+	u8 port0node;		/* Node ID port connected to: 0xFF=None   */
+	u8 port0port;		/* Port num port connected to: 0,1,2, or  */
+				/* 0xFF=None                              */
+	u8 port1node;		/* Node ID port connected to: 0xFF=None   */
+	u8 port1port;		/* Port num port connected to: 0,1,2, or  */
+				/* 0xFF=None                              */
+	u8 first_slot;		/* Lowest slot number below this Calgary  */
+	u8 status;		/* Bit 0 = 1 : the XAPIC is used          */
+				/*       = 0 : the XAPIC is not used, ie: */
+				/*            ints fwded to another XAPIC */
+				/*           Bits1:7 Reserved             */
+	u8 WP_index;		/* instance index - lower ones have       */
+				/*     lower slot numbers/PCI bus numbers */
+	u8 chassis_num;		/* 1 based Chassis number                 */
 } __attribute__((packed));
 
 enum {
-	HURR_SCALABILTY	= 0,  /* Hurricane Scalability info */
-	HURR_RIOIB	= 2,  /* Hurricane RIOIB info       */
-	COMPAT_CALGARY	= 4,  /* Compatibility Calgary      */
-	ALT_CALGARY	= 5,  /* Second Planar Calgary      */
+	HURR_SCALABILTY	= 0,	/* Hurricane Scalability info */
+	HURR_RIOIB	= 2,	/* Hurricane RIOIB info       */
+	COMPAT_CALGARY	= 4,	/* Compatibility Calgary      */
+	ALT_CALGARY	= 5,	/* Second Planar Calgary      */
 };
 
 /*
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h
index 520a379..750f2a3 100644
--- a/include/asm-x86/rwsem.h
+++ b/include/asm-x86/rwsem.h
@@ -56,14 +56,16 @@
 /*
  * the semaphore definition
  */
-struct rw_semaphore {
-	signed long		count;
+
 #define RWSEM_UNLOCKED_VALUE		0x00000000
 #define RWSEM_ACTIVE_BIAS		0x00000001
 #define RWSEM_ACTIVE_MASK		0x0000ffff
 #define RWSEM_WAITING_BIAS		(-0x00010000)
 #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+struct rw_semaphore {
+	signed long		count;
 	spinlock_t		wait_lock;
 	struct list_head	wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -78,11 +80,13 @@
 #endif
 
 
-#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
-  LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_INITIALIZER(name)				\
+{								\
+	RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+	LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
+}
 
-#define DECLARE_RWSEM(name) \
+#define DECLARE_RWSEM(name)					\
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
 
 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
@@ -100,16 +104,16 @@
  */
 static inline void __down_read(struct rw_semaphore *sem)
 {
-	__asm__ __volatile__(
-		"# beginning down_read\n\t"
-LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
-		"  jns        1f\n"
-		"  call call_rwsem_down_read_failed\n"
-		"1:\n\t"
-		"# ending down_read\n\t"
-		: "+m" (sem->count)
-		: "a" (sem)
-		: "memory", "cc");
+	asm volatile("# beginning down_read\n\t"
+		     LOCK_PREFIX "  incl      (%%eax)\n\t"
+		     /* adds 0x00000001, returns the old value */
+		     "  jns        1f\n"
+		     "  call call_rwsem_down_read_failed\n"
+		     "1:\n\t"
+		     "# ending down_read\n\t"
+		     : "+m" (sem->count)
+		     : "a" (sem)
+		     : "memory", "cc");
 }
 
 /*
@@ -118,21 +122,20 @@
 static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
 	__s32 result, tmp;
-	__asm__ __volatile__(
-		"# beginning __down_read_trylock\n\t"
-		"  movl      %0,%1\n\t"
-		"1:\n\t"
-		"  movl	     %1,%2\n\t"
-		"  addl      %3,%2\n\t"
-		"  jle	     2f\n\t"
-LOCK_PREFIX	"  cmpxchgl  %2,%0\n\t"
-		"  jnz	     1b\n\t"
-		"2:\n\t"
-		"# ending __down_read_trylock\n\t"
-		: "+m" (sem->count), "=&a" (result), "=&r" (tmp)
-		: "i" (RWSEM_ACTIVE_READ_BIAS)
-		: "memory", "cc");
-	return result>=0 ? 1 : 0;
+	asm volatile("# beginning __down_read_trylock\n\t"
+		     "  movl      %0,%1\n\t"
+		     "1:\n\t"
+		     "  movl	     %1,%2\n\t"
+		     "  addl      %3,%2\n\t"
+		     "  jle	     2f\n\t"
+		     LOCK_PREFIX "  cmpxchgl  %2,%0\n\t"
+		     "  jnz	     1b\n\t"
+		     "2:\n\t"
+		     "# ending __down_read_trylock\n\t"
+		     : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+		     : "i" (RWSEM_ACTIVE_READ_BIAS)
+		     : "memory", "cc");
+	return result >= 0 ? 1 : 0;
 }
 
 /*
@@ -143,17 +146,18 @@
 	int tmp;
 
 	tmp = RWSEM_ACTIVE_WRITE_BIAS;
-	__asm__ __volatile__(
-		"# beginning down_write\n\t"
-LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
-		"  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
-		"  jz        1f\n"
-		"  call call_rwsem_down_write_failed\n"
-		"1:\n"
-		"# ending down_write"
-		: "+m" (sem->count), "=d" (tmp)
-		: "a" (sem), "1" (tmp)
-		: "memory", "cc");
+	asm volatile("# beginning down_write\n\t"
+		     LOCK_PREFIX "  xadd      %%edx,(%%eax)\n\t"
+		     /* subtract 0x0000ffff, returns the old value */
+		     "  testl     %%edx,%%edx\n\t"
+		     /* was the count 0 before? */
+		     "  jz        1f\n"
+		     "  call call_rwsem_down_write_failed\n"
+		     "1:\n"
+		     "# ending down_write"
+		     : "+m" (sem->count), "=d" (tmp)
+		     : "a" (sem), "1" (tmp)
+		     : "memory", "cc");
 }
 
 static inline void __down_write(struct rw_semaphore *sem)
@@ -167,7 +171,7 @@
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
 	signed long ret = cmpxchg(&sem->count,
-				  RWSEM_UNLOCKED_VALUE, 
+				  RWSEM_UNLOCKED_VALUE,
 				  RWSEM_ACTIVE_WRITE_BIAS);
 	if (ret == RWSEM_UNLOCKED_VALUE)
 		return 1;
@@ -180,16 +184,16 @@
 static inline void __up_read(struct rw_semaphore *sem)
 {
 	__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
-	__asm__ __volatile__(
-		"# beginning __up_read\n\t"
-LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
-		"  jns        1f\n\t"
-		"  call call_rwsem_wake\n"
-		"1:\n"
-		"# ending __up_read\n"
-		: "+m" (sem->count), "=d" (tmp)
-		: "a" (sem), "1" (tmp)
-		: "memory", "cc");
+	asm volatile("# beginning __up_read\n\t"
+		     LOCK_PREFIX "  xadd      %%edx,(%%eax)\n\t"
+		     /* subtracts 1, returns the old value */
+		     "  jns        1f\n\t"
+		     "  call call_rwsem_wake\n"
+		     "1:\n"
+		     "# ending __up_read\n"
+		     : "+m" (sem->count), "=d" (tmp)
+		     : "a" (sem), "1" (tmp)
+		     : "memory", "cc");
 }
 
 /*
@@ -197,17 +201,18 @@
  */
 static inline void __up_write(struct rw_semaphore *sem)
 {
-	__asm__ __volatile__(
-		"# beginning __up_write\n\t"
-		"  movl      %2,%%edx\n\t"
-LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
-		"  jz       1f\n"
-		"  call call_rwsem_wake\n"
-		"1:\n\t"
-		"# ending __up_write\n"
-		: "+m" (sem->count)
-		: "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
-		: "memory", "cc", "edx");
+	asm volatile("# beginning __up_write\n\t"
+		     "  movl      %2,%%edx\n\t"
+		     LOCK_PREFIX "  xaddl     %%edx,(%%eax)\n\t"
+		     /* tries to transition
+			0xffff0001 -> 0x00000000 */
+		     "  jz       1f\n"
+		     "  call call_rwsem_wake\n"
+		     "1:\n\t"
+		     "# ending __up_write\n"
+		     : "+m" (sem->count)
+		     : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
+		     : "memory", "cc", "edx");
 }
 
 /*
@@ -215,16 +220,16 @@
  */
 static inline void __downgrade_write(struct rw_semaphore *sem)
 {
-	__asm__ __volatile__(
-		"# beginning __downgrade_write\n\t"
-LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
-		"  jns       1f\n\t"
-		"  call call_rwsem_downgrade_wake\n"
-		"1:\n\t"
-		"# ending __downgrade_write\n"
-		: "+m" (sem->count)
-		: "a" (sem), "i" (-RWSEM_WAITING_BIAS)
-		: "memory", "cc");
+	asm volatile("# beginning __downgrade_write\n\t"
+		     LOCK_PREFIX "  addl      %2,(%%eax)\n\t"
+		     /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+		     "  jns       1f\n\t"
+		     "  call call_rwsem_downgrade_wake\n"
+		     "1:\n\t"
+		     "# ending __downgrade_write\n"
+		     : "+m" (sem->count)
+		     : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
+		     : "memory", "cc");
 }
 
 /*
@@ -232,10 +237,9 @@
  */
 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
 {
-	__asm__ __volatile__(
-LOCK_PREFIX	"addl %1,%0"
-		: "+m" (sem->count)
-		: "ir" (delta));
+	asm volatile(LOCK_PREFIX "addl %1,%0"
+		     : "+m" (sem->count)
+		     : "ir" (delta));
 }
 
 /*
@@ -245,12 +249,11 @@
 {
 	int tmp = delta;
 
-	__asm__ __volatile__(
-LOCK_PREFIX	"xadd %0,%1"
-		: "+r" (tmp), "+m" (sem->count)
-		: : "memory");
+	asm volatile(LOCK_PREFIX "xadd %0,%1"
+		     : "+r" (tmp), "+m" (sem->count)
+		     : : "memory");
 
-	return tmp+delta;
+	return tmp + delta;
 }
 
 static inline int rwsem_is_locked(struct rw_semaphore *sem)
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h
index 23f0535..ed5131d 100644
--- a/include/asm-x86/segment.h
+++ b/include/asm-x86/segment.h
@@ -191,13 +191,14 @@
 #define SEGMENT_TI_MASK		0x4
 
 #define IDT_ENTRIES 256
+#define NUM_EXCEPTION_VECTORS 32
 #define GDT_SIZE (GDT_ENTRIES * 8)
 #define GDT_ENTRY_TLS_ENTRIES 3
 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
-extern const char early_idt_handlers[IDT_ENTRIES][10];
+extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
 #endif
 #endif
 
diff --git a/include/asm-x86/semaphore.h b/include/asm-x86/semaphore.h
index 572c0b6..d9b2034 100644
--- a/include/asm-x86/semaphore.h
+++ b/include/asm-x86/semaphore.h
@@ -1,5 +1 @@
-#ifdef CONFIG_X86_32
-# include "semaphore_32.h"
-#else
-# include "semaphore_64.h"
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h
deleted file mode 100644
index ac96d38..0000000
--- a/include/asm-x86/semaphore_32.h
+++ /dev/null
@@ -1,175 +0,0 @@
-#ifndef _I386_SEMAPHORE_H
-#define _I386_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
- *                     the original code and to make semaphore waits
- *                     interruptible so that processes waiting on
- *                     semaphores can be killed.
- * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
- *		       functions in asm/sempahore-helper.h while fixing a
- *		       potential and subtle race discovered by Ulrich Schmid
- *		       in down_interruptible(). Since I started to play here I
- *		       also implemented the `trylock' semaphore operation.
- *          1999-07-02 Artur Skawina <skawina@geocities.com>
- *                     Optimized "0(ecx)" -> "(ecx)" (the assembler does not
- *                     do this). Changed calling sequences from push/jmp to
- *                     traditional call/ret.
- * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
- *		       Some hacks to ensure compatibility with recent
- *		       GCC snapshots, to avoid stack corruption when compiling
- *		       with -fomit-frame-pointer. It's not sure if this will
- *		       be fixed in GCC, as our previous implementation was a
- *		       bit dubious.
- *
- * If you would like to see an analysis of this implementation, please
- * ftp to gcom.com and download the file
- * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
- *
- */
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.sleepers	= 0,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- *	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
- */
-	atomic_set(&sem->count, val);
-	sem->sleepers = 0;
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern asmregparm void __down_failed(atomic_t *count_ptr);
-extern asmregparm int  __down_failed_interruptible(atomic_t *count_ptr);
-extern asmregparm int  __down_failed_trylock(atomic_t *count_ptr);
-extern asmregparm void __up_wakeup(atomic_t *count_ptr);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/i386/kernel/semaphore.c
- */
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-	__asm__ __volatile__(
-		"# atomic down operation\n\t"
-		LOCK_PREFIX "decl %0\n\t"     /* --sem->count */
-		"jns 2f\n"
-		"\tlea %0,%%eax\n\t"
-		"call __down_failed\n"
-		"2:"
-		:"+m" (sem->count)
-		:
-		:"memory","ax");
-}
-
-/*
- * Interruptible try to acquire a semaphore.  If we obtained
- * it, return zero.  If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int result;
-
-	might_sleep();
-	__asm__ __volatile__(
-		"# atomic interruptible down operation\n\t"
-		"xorl %0,%0\n\t"
-		LOCK_PREFIX "decl %1\n\t"     /* --sem->count */
-		"jns 2f\n\t"
-		"lea %1,%%eax\n\t"
-		"call __down_failed_interruptible\n"
-		"2:"
-		:"=&a" (result), "+m" (sem->count)
-		:
-		:"memory");
-	return result;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
-	int result;
-
-	__asm__ __volatile__(
-		"# atomic interruptible down operation\n\t"
-		"xorl %0,%0\n\t"
-		LOCK_PREFIX "decl %1\n\t"     /* --sem->count */
-		"jns 2f\n\t"
-		"lea %1,%%eax\n\t"
-		"call __down_failed_trylock\n\t"
-		"2:\n"
-		:"=&a" (result), "+m" (sem->count)
-		:
-		:"memory");
-	return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
-	__asm__ __volatile__(
-		"# atomic up operation\n\t"
-		LOCK_PREFIX "incl %0\n\t"     /* ++sem->count */
-		"jg 1f\n\t"
-		"lea %0,%%eax\n\t"
-		"call __up_wakeup\n"
-		"1:"
-		:"+m" (sem->count)
-		:
-		:"memory","ax");
-}
-
-#endif
-#endif
diff --git a/include/asm-x86/semaphore_64.h b/include/asm-x86/semaphore_64.h
deleted file mode 100644
index 7969430..0000000
--- a/include/asm-x86/semaphore_64.h
+++ /dev/null
@@ -1,180 +0,0 @@
-#ifndef _X86_64_SEMAPHORE_H
-#define _X86_64_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
- *                     the original code and to make semaphore waits
- *                     interruptible so that processes waiting on
- *                     semaphores can be killed.
- * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
- *		       functions in asm/sempahore-helper.h while fixing a
- *		       potential and subtle race discovered by Ulrich Schmid
- *		       in down_interruptible(). Since I started to play here I
- *		       also implemented the `trylock' semaphore operation.
- *          1999-07-02 Artur Skawina <skawina@geocities.com>
- *                     Optimized "0(ecx)" -> "(ecx)" (the assembler does not
- *                     do this). Changed calling sequences from push/jmp to
- *                     traditional call/ret.
- * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
- *		       Some hacks to ensure compatibility with recent
- *		       GCC snapshots, to avoid stack corruption when compiling
- *		       with -fomit-frame-pointer. It's not sure if this will
- *		       be fixed in GCC, as our previous implementation was a
- *		       bit dubious.
- *
- * If you would like to see an analysis of this implementation, please
- * ftp to gcom.com and download the file
- * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
- *
- */
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <asm/rwlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-#include <linux/stringify.h>
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.sleepers	= 0,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- *	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
- */
-	atomic_set(&sem->count, val);
-	sem->sleepers = 0;
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
-asmlinkage int  __down_failed_trylock(void  /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int  __down_interruptible(struct semaphore * sem);
-asmlinkage int  __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/x86_64/kernel/semaphore.c
- */
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-
-	__asm__ __volatile__(
-		"# atomic down operation\n\t"
-		LOCK_PREFIX "decl %0\n\t"     /* --sem->count */
-		"jns 1f\n\t"
-		"call __down_failed\n"
-		"1:"
-		:"=m" (sem->count)
-		:"D" (sem)
-		:"memory");
-}
-
-/*
- * Interruptible try to acquire a semaphore.  If we obtained
- * it, return zero.  If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int result;
-
-	might_sleep();
-
-	__asm__ __volatile__(
-		"# atomic interruptible down operation\n\t"
-		"xorl %0,%0\n\t"
-		LOCK_PREFIX "decl %1\n\t"     /* --sem->count */
-		"jns 2f\n\t"
-		"call __down_failed_interruptible\n"
-		"2:\n"
-		:"=&a" (result), "=m" (sem->count)
-		:"D" (sem)
-		:"memory");
-	return result;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
-	int result;
-
-	__asm__ __volatile__(
-		"# atomic interruptible down operation\n\t"
-		"xorl %0,%0\n\t"
-		LOCK_PREFIX "decl %1\n\t"     /* --sem->count */
-		"jns 2f\n\t"
-		"call __down_failed_trylock\n\t"
-		"2:\n"
-		:"=&a" (result), "=m" (sem->count)
-		:"D" (sem)
-		:"memory","cc");
-	return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
-	__asm__ __volatile__(
-		"# atomic up operation\n\t"
-		LOCK_PREFIX "incl %0\n\t"     /* ++sem->count */
-		"jg 1f\n\t"
-		"call __up_wakeup\n"
-		"1:"
-		:"=m" (sem->count)
-		:"D" (sem)
-		:"memory");
-}
-#endif /* __KERNEL__ */
-#endif
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index 071e054..fa6763a 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -4,6 +4,10 @@
 #define COMMAND_LINE_SIZE 2048
 
 #ifndef __ASSEMBLY__
+
+/* Interrupt control for vSMPowered x86_64 systems */
+void vsmp_init(void);
+
 char *machine_specific_memory_setup(void);
 #ifndef CONFIG_PARAVIRT
 #define paravirt_post_allocator_init()	do {} while (0)
@@ -51,8 +55,8 @@
 char * __init machine_specific_memory_setup(void);
 char *memory_setup(void);
 
-int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
-int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
+int __init copy_e820_map(struct e820entry *biosmap, int nr_map);
+int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map);
 void __init add_memory_region(unsigned long long start,
 			      unsigned long long size, int type);
 
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
index d743947..2f9c884 100644
--- a/include/asm-x86/sigcontext.h
+++ b/include/asm-x86/sigcontext.h
@@ -79,7 +79,7 @@
 	unsigned long flags;
 	unsigned long sp_at_signal;
 	unsigned short ss, __ssh;
-	struct _fpstate __user * fpstate;
+	struct _fpstate __user *fpstate;
 	unsigned long oldmask;
 	unsigned long cr2;
 };
@@ -107,7 +107,7 @@
 	unsigned long eflags;
 	unsigned long esp_at_signal;
 	unsigned short ss, __ssh;
-	struct _fpstate __user * fpstate;
+	struct _fpstate __user *fpstate;
 	unsigned long oldmask;
 	unsigned long cr2;
 };
@@ -121,7 +121,8 @@
 struct _fpstate {
 	__u16	cwd;
 	__u16	swd;
-	__u16	twd;	/* Note this is not the same as the 32bit/x87/FSAVE twd */
+	__u16	twd;		/* Note this is not the same as the
+				   32bit/x87/FSAVE twd */
 	__u16	fop;
 	__u64	rip;
 	__u64	rdp;
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h
index 6ffab4f..57a9686 100644
--- a/include/asm-x86/sigcontext32.h
+++ b/include/asm-x86/sigcontext32.h
@@ -26,7 +26,7 @@
 	__u32 	cw;
 	__u32	sw;
 	__u32	tag;	/* not compatible to 64bit twd */
-	__u32	ipoff;			
+	__u32	ipoff;
 	__u32	cssel;
 	__u32	dataoff;
 	__u32	datasel;
@@ -39,7 +39,7 @@
 	__u32	mxcsr;
 	__u32	reserved;
 	struct _fpxreg	_fxsr_st[8];
-	struct _xmmreg	_xmm[8];	/* It's actually 16 */ 
+	struct _xmmreg	_xmm[8];	/* It's actually 16 */
 	__u32	padding[56];
 };
 
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h
index aee7eca..f15186d 100644
--- a/include/asm-x86/signal.h
+++ b/include/asm-x86/signal.h
@@ -185,61 +185,61 @@
 
 #define __HAVE_ARCH_SIG_BITOPS
 
-#define sigaddset(set,sig)		   \
-	(__builtin_constantp(sig) ?	   \
-	 __const_sigaddset((set),(sig)) :  \
-	 __gen_sigaddset((set),(sig)))
+#define sigaddset(set,sig)		    \
+	(__builtin_constantp(sig)	    \
+	 ? __const_sigaddset((set), (sig))  \
+	 : __gen_sigaddset((set), (sig)))
 
-static __inline__ void __gen_sigaddset(sigset_t *set, int _sig)
+static inline void __gen_sigaddset(sigset_t *set, int _sig)
 {
-	__asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
+	asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
 }
 
-static __inline__ void __const_sigaddset(sigset_t *set, int _sig)
+static inline void __const_sigaddset(sigset_t *set, int _sig)
 {
 	unsigned long sig = _sig - 1;
 	set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW);
 }
 
-#define sigdelset(set,sig)		   \
-	(__builtin_constant_p(sig) ?       \
-	 __const_sigdelset((set),(sig)) :  \
-	 __gen_sigdelset((set),(sig)))
+#define sigdelset(set, sig)		    \
+	(__builtin_constant_p(sig)	    \
+	 ? __const_sigdelset((set), (sig))  \
+	 : __gen_sigdelset((set), (sig)))
 
 
-static __inline__ void __gen_sigdelset(sigset_t *set, int _sig)
+static inline void __gen_sigdelset(sigset_t *set, int _sig)
 {
-	__asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
+	asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
 }
 
-static __inline__ void __const_sigdelset(sigset_t *set, int _sig)
+static inline void __const_sigdelset(sigset_t *set, int _sig)
 {
 	unsigned long sig = _sig - 1;
 	set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW));
 }
 
-static __inline__ int __const_sigismember(sigset_t *set, int _sig)
+static inline int __const_sigismember(sigset_t *set, int _sig)
 {
 	unsigned long sig = _sig - 1;
 	return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
 }
 
-static __inline__ int __gen_sigismember(sigset_t *set, int _sig)
+static inline int __gen_sigismember(sigset_t *set, int _sig)
 {
 	int ret;
-	__asm__("btl %2,%1\n\tsbbl %0,%0"
-		: "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
+	asm("btl %2,%1\n\tsbbl %0,%0"
+	    : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
 	return ret;
 }
 
-#define sigismember(set,sig)			\
-	(__builtin_constant_p(sig) ?		\
-	 __const_sigismember((set),(sig)) :	\
-	 __gen_sigismember((set),(sig)))
+#define sigismember(set, sig)			\
+	(__builtin_constant_p(sig)		\
+	 ? __const_sigismember((set), (sig))	\
+	 : __gen_sigismember((set), (sig)))
 
-static __inline__ int sigfindinword(unsigned long word)
+static inline int sigfindinword(unsigned long word)
 {
-	__asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
+	asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
 	return word;
 }
 
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index f2e8319..62ebdec3 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -1,5 +1,209 @@
-#ifdef CONFIG_X86_32
-# include "smp_32.h"
+#ifndef _ASM_X86_SMP_H_
+#define _ASM_X86_SMP_H_
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <asm/percpu.h>
+
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+#ifdef CONFIG_X86_LOCAL_APIC
+# include <asm/mpspec.h>
+# include <asm/apic.h>
+# ifdef CONFIG_X86_IO_APIC
+#  include <asm/io_apic.h>
+# endif
+#endif
+#include <asm/pda.h>
+#include <asm/thread_info.h>
+
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_initialized;
+extern cpumask_t cpu_callin_map;
+
+extern void (*mtrr_hook)(void);
+extern void zap_low_mappings(void);
+
+extern int smp_num_siblings;
+extern unsigned int num_processors;
+extern cpumask_t cpu_initialized;
+
+#ifdef CONFIG_SMP
+extern u16 x86_cpu_to_apicid_init[];
+extern u16 x86_bios_cpu_apicid_init[];
+extern void *x86_cpu_to_apicid_early_ptr;
+extern void *x86_bios_cpu_apicid_early_ptr;
 #else
-# include "smp_64.h"
+#define x86_cpu_to_apicid_early_ptr NULL
+#define x86_bios_cpu_apicid_early_ptr NULL
+#endif
+
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+DECLARE_PER_CPU(u16, cpu_llc_id);
+DECLARE_PER_CPU(u16, x86_cpu_to_apicid);
+DECLARE_PER_CPU(u16, x86_bios_cpu_apicid);
+
+/* Static state in head.S used to set up a CPU */
+extern struct {
+	void *sp;
+	unsigned short ss;
+} stack_start;
+
+struct smp_ops {
+	void (*smp_prepare_boot_cpu)(void);
+	void (*smp_prepare_cpus)(unsigned max_cpus);
+	int (*cpu_up)(unsigned cpu);
+	void (*smp_cpus_done)(unsigned max_cpus);
+
+	void (*smp_send_stop)(void);
+	void (*smp_send_reschedule)(int cpu);
+	int (*smp_call_function_mask)(cpumask_t mask,
+				      void (*func)(void *info), void *info,
+				      int wait);
+};
+
+/* Globals due to paravirt */
+extern void set_cpu_sibling_map(int cpu);
+
+#ifdef CONFIG_SMP
+#ifndef CONFIG_PARAVIRT
+#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
+#endif
+extern struct smp_ops smp_ops;
+
+static inline void smp_send_stop(void)
+{
+	smp_ops.smp_send_stop();
+}
+
+static inline void smp_prepare_boot_cpu(void)
+{
+	smp_ops.smp_prepare_boot_cpu();
+}
+
+static inline void smp_prepare_cpus(unsigned int max_cpus)
+{
+	smp_ops.smp_prepare_cpus(max_cpus);
+}
+
+static inline void smp_cpus_done(unsigned int max_cpus)
+{
+	smp_ops.smp_cpus_done(max_cpus);
+}
+
+static inline int __cpu_up(unsigned int cpu)
+{
+	return smp_ops.cpu_up(cpu);
+}
+
+static inline void smp_send_reschedule(int cpu)
+{
+	smp_ops.smp_send_reschedule(cpu);
+}
+
+static inline int smp_call_function_mask(cpumask_t mask,
+					 void (*func) (void *info), void *info,
+					 int wait)
+{
+	return smp_ops.smp_call_function_mask(mask, func, info, wait);
+}
+
+void native_smp_prepare_boot_cpu(void);
+void native_smp_prepare_cpus(unsigned int max_cpus);
+void native_smp_cpus_done(unsigned int max_cpus);
+int native_cpu_up(unsigned int cpunum);
+
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+
+extern void prefill_possible_map(void);
+
+void smp_store_cpu_info(int id);
+#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
+
+/* We don't mark CPUs online until __cpu_up(), so we need another measure */
+static inline int num_booting_cpus(void)
+{
+	return cpus_weight(cpu_callout_map);
+}
+#endif /* CONFIG_SMP */
+
+extern unsigned disabled_cpus __cpuinitdata;
+
+#ifdef CONFIG_X86_32_SMP
+/*
+ * This function is needed by all SMP systems. It must _always_ be valid
+ * from the initial startup. We map APIC_BASE very early in page_setup(),
+ * so this is correct in the x86 case.
+ */
+DECLARE_PER_CPU(int, cpu_number);
+#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
+extern int safe_smp_processor_id(void);
+
+#elif defined(CONFIG_X86_64_SMP)
+#define raw_smp_processor_id()	read_pda(cpunumber)
+
+#define stack_smp_processor_id()					\
+({								\
+	struct thread_info *ti;						\
+	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
+	ti->cpu;							\
+})
+#define safe_smp_processor_id()		smp_processor_id()
+
+#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
+#define cpu_physical_id(cpu)		boot_cpu_physical_apicid
+#define safe_smp_processor_id()		0
+#define stack_smp_processor_id() 	0
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+static inline int logical_smp_processor_id(void)
+{
+	/* we don't want to mark this access volatile - bad code generation */
+	return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
+}
+
+#ifndef CONFIG_X86_64
+static inline unsigned int read_apic_id(void)
+{
+	return *(u32 *)(APIC_BASE + APIC_ID);
+}
+#else
+extern unsigned int read_apic_id(void);
+#endif
+
+
+# ifdef APIC_DEFINITION
+extern int hard_smp_processor_id(void);
+# else
+#  include <mach_apicdef.h>
+static inline int hard_smp_processor_id(void)
+{
+	/* we don't want to mark this access volatile - bad code generation */
+	return GET_APIC_ID(read_apic_id());
+}
+# endif /* APIC_DEFINITION */
+
+#else /* CONFIG_X86_LOCAL_APIC */
+
+# ifndef CONFIG_SMP
+#  define hard_smp_processor_id()	0
+# endif
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void cpu_exit_clear(void);
+extern void cpu_uninit(void);
+extern void remove_siblinginfo(int cpu);
+#endif
+
+extern void smp_alloc_memory(void);
+extern void lock_ipi_call_lock(void);
+extern void unlock_ipi_call_lock(void);
+#endif /* __ASSEMBLY__ */
 #endif
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
deleted file mode 100644
index 56152e3..0000000
--- a/include/asm-x86/smp_32.h
+++ /dev/null
@@ -1,165 +0,0 @@
-#ifndef __ASM_SMP_H
-#define __ASM_SMP_H
-
-#ifndef __ASSEMBLY__
-#include <linux/cpumask.h>
-#include <linux/init.h>
-
-/*
- * We need the APIC definitions automatically as part of 'smp.h'
- */
-#ifdef CONFIG_X86_LOCAL_APIC
-# include <asm/mpspec.h>
-# include <asm/apic.h>
-# ifdef CONFIG_X86_IO_APIC
-#  include <asm/io_apic.h>
-# endif
-#endif
-
-extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_callin_map;
-
-extern int smp_num_siblings;
-extern unsigned int num_processors;
-
-extern void smp_alloc_memory(void);
-extern void lock_ipi_call_lock(void);
-extern void unlock_ipi_call_lock(void);
-
-extern void (*mtrr_hook) (void);
-extern void zap_low_mappings (void);
-
-extern u8 __initdata x86_cpu_to_apicid_init[];
-extern void *x86_cpu_to_apicid_early_ptr;
-
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
-DECLARE_PER_CPU(u8, cpu_llc_id);
-DECLARE_PER_CPU(u8, x86_cpu_to_apicid);
-
-#ifdef CONFIG_HOTPLUG_CPU
-extern void cpu_exit_clear(void);
-extern void cpu_uninit(void);
-extern void remove_siblinginfo(int cpu);
-#endif
-
-/* Globals due to paravirt */
-extern void set_cpu_sibling_map(int cpu);
-
-struct smp_ops
-{
-	void (*smp_prepare_boot_cpu)(void);
-	void (*smp_prepare_cpus)(unsigned max_cpus);
-	int (*cpu_up)(unsigned cpu);
-	void (*smp_cpus_done)(unsigned max_cpus);
-
-	void (*smp_send_stop)(void);
-	void (*smp_send_reschedule)(int cpu);
-	int (*smp_call_function_mask)(cpumask_t mask,
-				      void (*func)(void *info), void *info,
-				      int wait);
-};
-
-#ifdef CONFIG_SMP
-extern struct smp_ops smp_ops;
-
-static inline void smp_prepare_boot_cpu(void)
-{
-	smp_ops.smp_prepare_boot_cpu();
-}
-static inline void smp_prepare_cpus(unsigned int max_cpus)
-{
-	smp_ops.smp_prepare_cpus(max_cpus);
-}
-static inline int __cpu_up(unsigned int cpu)
-{
-	return smp_ops.cpu_up(cpu);
-}
-static inline void smp_cpus_done(unsigned int max_cpus)
-{
-	smp_ops.smp_cpus_done(max_cpus);
-}
-
-static inline void smp_send_stop(void)
-{
-	smp_ops.smp_send_stop();
-}
-static inline void smp_send_reschedule(int cpu)
-{
-	smp_ops.smp_send_reschedule(cpu);
-}
-static inline int smp_call_function_mask(cpumask_t mask,
-					 void (*func) (void *info), void *info,
-					 int wait)
-{
-	return smp_ops.smp_call_function_mask(mask, func, info, wait);
-}
-
-void native_smp_prepare_boot_cpu(void);
-void native_smp_prepare_cpus(unsigned int max_cpus);
-int native_cpu_up(unsigned int cpunum);
-void native_smp_cpus_done(unsigned int max_cpus);
-
-#ifndef CONFIG_PARAVIRT
-#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
-#endif
-
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
-
-/*
- * This function is needed by all SMP systems. It must _always_ be valid
- * from the initial startup. We map APIC_BASE very early in page_setup(),
- * so this is correct in the x86 case.
- */
-DECLARE_PER_CPU(int, cpu_number);
-#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
-
-#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
-
-extern int safe_smp_processor_id(void);
-
-void __cpuinit smp_store_cpu_info(int id);
-
-/* We don't mark CPUs online until __cpu_up(), so we need another measure */
-static inline int num_booting_cpus(void)
-{
-	return cpus_weight(cpu_callout_map);
-}
-
-#else /* CONFIG_SMP */
-
-#define safe_smp_processor_id()		0
-#define cpu_physical_id(cpu)		boot_cpu_physical_apicid
-
-#endif /* !CONFIG_SMP */
-
-#ifdef CONFIG_X86_LOCAL_APIC
-
-static __inline int logical_smp_processor_id(void)
-{
-	/* we don't want to mark this access volatile - bad code generation */
-	return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
-}
-
-# ifdef APIC_DEFINITION
-extern int hard_smp_processor_id(void);
-# else
-#  include <mach_apicdef.h>
-static inline int hard_smp_processor_id(void)
-{
-	/* we don't want to mark this access volatile - bad code generation */
-	return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID));
-}
-# endif /* APIC_DEFINITION */
-
-#else /* CONFIG_X86_LOCAL_APIC */
-
-# ifndef CONFIG_SMP
-#  define hard_smp_processor_id()	0
-# endif
-
-#endif /* CONFIG_X86_LOCAL_APIC */
-
-#endif /* !ASSEMBLY */
-#endif
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
deleted file mode 100644
index e0a7551..0000000
--- a/include/asm-x86/smp_64.h
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef __ASM_SMP_H
-#define __ASM_SMP_H
-
-#include <linux/cpumask.h>
-#include <linux/init.h>
-
-/*
- * We need the APIC definitions automatically as part of 'smp.h'
- */
-#include <asm/apic.h>
-#include <asm/io_apic.h>
-#include <asm/mpspec.h>
-#include <asm/pda.h>
-#include <asm/thread_info.h>
-
-extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_initialized;
-
-extern int smp_num_siblings;
-extern unsigned int num_processors;
-
-extern void smp_alloc_memory(void);
-extern void lock_ipi_call_lock(void);
-extern void unlock_ipi_call_lock(void);
-
-extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
-				  void *info, int wait);
-
-extern u16 __initdata x86_cpu_to_apicid_init[];
-extern u16 __initdata x86_bios_cpu_apicid_init[];
-extern void *x86_cpu_to_apicid_early_ptr;
-extern void *x86_bios_cpu_apicid_early_ptr;
-
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
-DECLARE_PER_CPU(u16, cpu_llc_id);
-DECLARE_PER_CPU(u16, x86_cpu_to_apicid);
-DECLARE_PER_CPU(u16, x86_bios_cpu_apicid);
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-	if (cpu_present(mps_cpu))
-		return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
-	else
-		return BAD_APICID;
-}
-
-#ifdef CONFIG_SMP
-
-#define SMP_TRAMPOLINE_BASE 0x6000
-
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
-extern void prefill_possible_map(void);
-extern unsigned __cpuinitdata disabled_cpus;
-
-#define raw_smp_processor_id()	read_pda(cpunumber)
-#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
-
-#define stack_smp_processor_id()					\
-	({								\
-	struct thread_info *ti;						\
-	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
-	ti->cpu;							\
-})
-
-/*
- * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies
- * scheduling and IPI sending and compresses data structures.
- */
-static inline int num_booting_cpus(void)
-{
-	return cpus_weight(cpu_callout_map);
-}
-
-extern void smp_send_reschedule(int cpu);
-
-#else /* CONFIG_SMP */
-
-extern unsigned int boot_cpu_id;
-#define cpu_physical_id(cpu)	boot_cpu_id
-#define stack_smp_processor_id() 0
-
-#endif /* !CONFIG_SMP */
-
-#define safe_smp_processor_id()		smp_processor_id()
-
-static __inline int logical_smp_processor_id(void)
-{
-	/* we don't want to mark this access volatile - bad code generation */
-	return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
-}
-
-static inline int hard_smp_processor_id(void)
-{
-	/* we don't want to mark this access volatile - bad code generation */
-	return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID));
-}
-
-#endif
-
diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h
index fa58cd5..9bd48b0 100644
--- a/include/asm-x86/sparsemem.h
+++ b/include/asm-x86/sparsemem.h
@@ -16,7 +16,7 @@
 
 #ifdef CONFIG_X86_32
 # ifdef CONFIG_X86_PAE
-#  define SECTION_SIZE_BITS	30
+#  define SECTION_SIZE_BITS	29
 #  define MAX_PHYSADDR_BITS	36
 #  define MAX_PHYSMEM_BITS	36
 # else
@@ -26,8 +26,8 @@
 # endif
 #else /* CONFIG_X86_32 */
 # define SECTION_SIZE_BITS	27 /* matt - 128 is convenient right now */
-# define MAX_PHYSADDR_BITS	40
-# define MAX_PHYSMEM_BITS	40
+# define MAX_PHYSADDR_BITS	44
+# define MAX_PHYSMEM_BITS	44
 #endif
 
 #endif /* CONFIG_SPARSEMEM */
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 23804c1..bc6376f 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -78,11 +78,11 @@
 	return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
 }
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
 	short inc = 0x0100;
 
-	__asm__ __volatile__ (
+	asm volatile (
 		LOCK_PREFIX "xaddw %w0, %1\n"
 		"1:\t"
 		"cmpb %h0, %b0\n\t"
@@ -92,42 +92,40 @@
 		/* don't need lfence here, because loads are in-order */
 		"jmp 1b\n"
 		"2:"
-		:"+Q" (inc), "+m" (lock->slock)
+		: "+Q" (inc), "+m" (lock->slock)
 		:
-		:"memory", "cc");
+		: "memory", "cc");
 }
 
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
 	int tmp;
 	short new;
 
-	asm volatile(
-		"movw %2,%w0\n\t"
-		"cmpb %h0,%b0\n\t"
-		"jne 1f\n\t"
-		"movw %w0,%w1\n\t"
-		"incb %h1\n\t"
-		"lock ; cmpxchgw %w1,%2\n\t"
-		"1:"
-		"sete %b1\n\t"
-		"movzbl %b1,%0\n\t"
-		:"=&a" (tmp), "=Q" (new), "+m" (lock->slock)
-		:
-		: "memory", "cc");
+	asm volatile("movw %2,%w0\n\t"
+		     "cmpb %h0,%b0\n\t"
+		     "jne 1f\n\t"
+		     "movw %w0,%w1\n\t"
+		     "incb %h1\n\t"
+		     "lock ; cmpxchgw %w1,%2\n\t"
+		     "1:"
+		     "sete %b1\n\t"
+		     "movzbl %b1,%0\n\t"
+		     : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
+		     :
+		     : "memory", "cc");
 
 	return tmp;
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-	__asm__ __volatile__(
-		UNLOCK_LOCK_PREFIX "incb %0"
-		:"+m" (lock->slock)
-		:
-		:"memory", "cc");
+	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
+		     : "+m" (lock->slock)
+		     :
+		     : "memory", "cc");
 }
 #else
 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
@@ -144,60 +142,57 @@
 	return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
 }
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
 	int inc = 0x00010000;
 	int tmp;
 
-	__asm__ __volatile__ (
-		"lock ; xaddl %0, %1\n"
-		"movzwl %w0, %2\n\t"
-		"shrl $16, %0\n\t"
-		"1:\t"
-		"cmpl %0, %2\n\t"
-		"je 2f\n\t"
-		"rep ; nop\n\t"
-		"movzwl %1, %2\n\t"
-		/* don't need lfence here, because loads are in-order */
-		"jmp 1b\n"
-		"2:"
-		:"+Q" (inc), "+m" (lock->slock), "=r" (tmp)
-		:
-		:"memory", "cc");
+	asm volatile("lock ; xaddl %0, %1\n"
+		     "movzwl %w0, %2\n\t"
+		     "shrl $16, %0\n\t"
+		     "1:\t"
+		     "cmpl %0, %2\n\t"
+		     "je 2f\n\t"
+		     "rep ; nop\n\t"
+		     "movzwl %1, %2\n\t"
+		     /* don't need lfence here, because loads are in-order */
+		     "jmp 1b\n"
+		     "2:"
+		     : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
+		     :
+		     : "memory", "cc");
 }
 
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
 	int tmp;
 	int new;
 
-	asm volatile(
-		"movl %2,%0\n\t"
-		"movl %0,%1\n\t"
-		"roll $16, %0\n\t"
-		"cmpl %0,%1\n\t"
-		"jne 1f\n\t"
-		"addl $0x00010000, %1\n\t"
-		"lock ; cmpxchgl %1,%2\n\t"
-		"1:"
-		"sete %b1\n\t"
-		"movzbl %b1,%0\n\t"
-		:"=&a" (tmp), "=r" (new), "+m" (lock->slock)
-		:
-		: "memory", "cc");
+	asm volatile("movl %2,%0\n\t"
+		     "movl %0,%1\n\t"
+		     "roll $16, %0\n\t"
+		     "cmpl %0,%1\n\t"
+		     "jne 1f\n\t"
+		     "addl $0x00010000, %1\n\t"
+		     "lock ; cmpxchgl %1,%2\n\t"
+		     "1:"
+		     "sete %b1\n\t"
+		     "movzbl %b1,%0\n\t"
+		     : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
+		     :
+		     : "memory", "cc");
 
 	return tmp;
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-	__asm__ __volatile__(
-		UNLOCK_LOCK_PREFIX "incw %0"
-		:"+m" (lock->slock)
-		:
-		:"memory", "cc");
+	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
+		     : "+m" (lock->slock)
+		     :
+		     : "memory", "cc");
 }
 #endif
 
diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h
index 165ab4b..f4bba13 100644
--- a/include/asm-x86/srat.h
+++ b/include/asm-x86/srat.h
@@ -1,5 +1,5 @@
 /*
- * Some of the code in this file has been gleaned from the 64 bit 
+ * Some of the code in this file has been gleaned from the 64 bit
  * discontigmem support code base.
  *
  * Copyright (C) 2002, IBM Corp.
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index c5d13a8..b49369a 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -3,7 +3,7 @@
 
 #ifdef __KERNEL__
 
-/* Let gcc decide wether to inline or use the out of line functions */
+/* Let gcc decide whether to inline or use the out of line functions */
 
 #define __HAVE_ARCH_STRCPY
 extern char *strcpy(char *dest, const char *src);
diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h
index e583da7..52b5ab3 100644
--- a/include/asm-x86/string_64.h
+++ b/include/asm-x86/string_64.h
@@ -3,26 +3,24 @@
 
 #ifdef __KERNEL__
 
-/* Written 2002 by Andi Kleen */ 
+/* Written 2002 by Andi Kleen */
 
-/* Only used for special circumstances. Stolen from i386/string.h */ 
-static __always_inline void *
-__inline_memcpy(void * to, const void * from, size_t n)
+/* Only used for special circumstances. Stolen from i386/string.h */
+static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
 {
-unsigned long d0, d1, d2;
-__asm__ __volatile__(
-	"rep ; movsl\n\t"
-	"testb $2,%b4\n\t"
-	"je 1f\n\t"
-	"movsw\n"
-	"1:\ttestb $1,%b4\n\t"
-	"je 2f\n\t"
-	"movsb\n"
-	"2:"
-	: "=&c" (d0), "=&D" (d1), "=&S" (d2)
-	:"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
-	: "memory");
-return (to);
+	unsigned long d0, d1, d2;
+	asm volatile("rep ; movsl\n\t"
+		     "testb $2,%b4\n\t"
+		     "je 1f\n\t"
+		     "movsw\n"
+		     "1:\ttestb $1,%b4\n\t"
+		     "je 2f\n\t"
+		     "movsb\n"
+		     "2:"
+		     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+		     : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
+		     : "memory");
+	return to;
 }
 
 /* Even with __builtin_ the compiler may decide to use the out of line
@@ -32,28 +30,30 @@
 #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
 extern void *memcpy(void *to, const void *from, size_t len);
 #else
-extern void *__memcpy(void *to, const void *from, size_t len); 
-#define memcpy(dst,src,len) \
-	({ size_t __len = (len);				\
-	   void *__ret;						\
-	   if (__builtin_constant_p(len) && __len >= 64)	\
-		 __ret = __memcpy((dst),(src),__len);		\
-	   else							\
-		 __ret = __builtin_memcpy((dst),(src),__len);	\
-	   __ret; }) 
+extern void *__memcpy(void *to, const void *from, size_t len);
+#define memcpy(dst, src, len)					\
+({								\
+	size_t __len = (len);					\
+	void *__ret;						\
+	if (__builtin_constant_p(len) && __len >= 64)		\
+		__ret = __memcpy((dst), (src), __len);		\
+	else							\
+		__ret = __builtin_memcpy((dst), (src), __len);	\
+	__ret;							\
+})
 #endif
 
 #define __HAVE_ARCH_MEMSET
 void *memset(void *s, int c, size_t n);
 
 #define __HAVE_ARCH_MEMMOVE
-void * memmove(void * dest,const void *src,size_t count);
+void *memmove(void *dest, const void *src, size_t count);
 
-int memcmp(const void * cs,const void * ct,size_t count);
-size_t strlen(const char * s);
-char *strcpy(char * dest,const char *src);
-char *strcat(char * dest, const char * src);
-int strcmp(const char * cs,const char * ct);
+int memcmp(const void *cs, const void *ct, size_t count);
+size_t strlen(const char *s);
+char *strcpy(char *dest, const char *src);
+char *strcat(char *dest, const char *src);
+int strcmp(const char *cs, const char *ct);
 
 #endif /* __KERNEL__ */
 
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 1bbda3a..24e1c08 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -10,7 +10,7 @@
 
 /* image of the saved processor state */
 struct saved_context {
-  	u16 es, fs, gs, ss;
+	u16 es, fs, gs, ss;
 	unsigned long cr0, cr2, cr3, cr4;
 	struct desc_ptr gdt;
 	struct desc_ptr idt;
@@ -32,11 +32,11 @@
 static inline void acpi_save_register_state(unsigned long return_point)
 {
 	saved_eip = return_point;
-	asm volatile ("movl %%esp,%0" : "=m" (saved_esp));
-	asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp));
-	asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx));
-	asm volatile ("movl %%edi,%0" : "=m" (saved_edi));
-	asm volatile ("movl %%esi,%0" : "=m" (saved_esi));
+	asm volatile("movl %%esp,%0" : "=m" (saved_esp));
+	asm volatile("movl %%ebp,%0" : "=m" (saved_ebp));
+	asm volatile("movl %%ebx,%0" : "=m" (saved_ebx));
+	asm volatile("movl %%edi,%0" : "=m" (saved_edi));
+	asm volatile("movl %%esi,%0" : "=m" (saved_esi));
 }
 
 #define acpi_restore_register_state()  do {} while (0)
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h
index 2eb92cb..dc3262b 100644
--- a/include/asm-x86/suspend_64.h
+++ b/include/asm-x86/suspend_64.h
@@ -9,8 +9,7 @@
 #include <asm/desc.h>
 #include <asm/i387.h>
 
-static inline int
-arch_prepare_suspend(void)
+static inline int arch_prepare_suspend(void)
 {
 	return 0;
 }
@@ -25,7 +24,7 @@
  */
 struct saved_context {
 	struct pt_regs regs;
-  	u16 ds, es, fs, gs, ss;
+	u16 ds, es, fs, gs, ss;
 	unsigned long gs_base, gs_kernel_base, fs_base;
 	unsigned long cr0, cr2, cr3, cr4, cr8;
 	unsigned long efer;
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index f9c5895..f5d9e74 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -8,15 +8,15 @@
 extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
 				     size_t size, int dir);
 extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
-                       dma_addr_t *dma_handle, gfp_t flags);
+				    dma_addr_t *dma_handle, gfp_t flags);
 extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-				  size_t size, int dir);
+				 size_t size, int dir);
 extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
-					 dma_addr_t dev_addr,
-					 size_t size, int dir);
+					dma_addr_t dev_addr,
+					size_t size, int dir);
 extern void swiotlb_sync_single_for_device(struct device *hwdev,
-					    dma_addr_t dev_addr,
-					    size_t size, int dir);
+					   dma_addr_t dev_addr,
+					   size_t size, int dir);
 extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
 					      dma_addr_t dev_addr,
 					      unsigned long offset,
@@ -26,18 +26,18 @@
 						 unsigned long offset,
 						 size_t size, int dir);
 extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
-				     struct scatterlist *sg, int nelems,
-				     int dir);
+				    struct scatterlist *sg, int nelems,
+				    int dir);
 extern void swiotlb_sync_sg_for_device(struct device *hwdev,
-					struct scatterlist *sg, int nelems,
-					int dir);
+				       struct scatterlist *sg, int nelems,
+				       int dir);
 extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
-		      int nents, int direction);
+			  int nents, int direction);
 extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-			 int nents, int direction);
+			     int nents, int direction);
 extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
-				   void *vaddr, dma_addr_t dma_handle);
+extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
+				  void *vaddr, dma_addr_t dma_handle);
 extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
 extern void swiotlb_init(void);
 
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h
index 6b775c9..b47a1d0 100644
--- a/include/asm-x86/sync_bitops.h
+++ b/include/asm-x86/sync_bitops.h
@@ -13,7 +13,7 @@
  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  */
 
-#define ADDR (*(volatile long *) addr)
+#define ADDR (*(volatile long *)addr)
 
 /**
  * sync_set_bit - Atomically set a bit in memory
@@ -26,12 +26,12 @@
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  */
-static inline void sync_set_bit(int nr, volatile unsigned long * addr)
+static inline void sync_set_bit(int nr, volatile unsigned long *addr)
 {
-	__asm__ __volatile__("lock; btsl %1,%0"
-			     :"+m" (ADDR)
-			     :"Ir" (nr)
-			     : "memory");
+	asm volatile("lock; btsl %1,%0"
+		     : "+m" (ADDR)
+		     : "Ir" (nr)
+		     : "memory");
 }
 
 /**
@@ -44,12 +44,12 @@
  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  * in order to ensure changes are visible on other processors.
  */
-static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
+static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
 {
-	__asm__ __volatile__("lock; btrl %1,%0"
-			     :"+m" (ADDR)
-			     :"Ir" (nr)
-			     : "memory");
+	asm volatile("lock; btrl %1,%0"
+		     : "+m" (ADDR)
+		     : "Ir" (nr)
+		     : "memory");
 }
 
 /**
@@ -61,12 +61,12 @@
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  */
-static inline void sync_change_bit(int nr, volatile unsigned long * addr)
+static inline void sync_change_bit(int nr, volatile unsigned long *addr)
 {
-	__asm__ __volatile__("lock; btcl %1,%0"
-			     :"+m" (ADDR)
-			     :"Ir" (nr)
-			     : "memory");
+	asm volatile("lock; btcl %1,%0"
+		     : "+m" (ADDR)
+		     : "Ir" (nr)
+		     : "memory");
 }
 
 /**
@@ -77,13 +77,13 @@
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
+static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	int oldbit;
 
-	__asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0"
-			     :"=r" (oldbit),"+m" (ADDR)
-			     :"Ir" (nr) : "memory");
+	asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
+		     : "=r" (oldbit), "+m" (ADDR)
+		     : "Ir" (nr) : "memory");
 	return oldbit;
 }
 
@@ -95,13 +95,13 @@
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
+static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	int oldbit;
 
-	__asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0"
-			     :"=r" (oldbit),"+m" (ADDR)
-			     :"Ir" (nr) : "memory");
+	asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
+		     : "=r" (oldbit), "+m" (ADDR)
+		     : "Ir" (nr) : "memory");
 	return oldbit;
 }
 
@@ -113,36 +113,17 @@
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr)
+static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
 {
 	int oldbit;
 
-	__asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0"
-			     :"=r" (oldbit),"+m" (ADDR)
-			     :"Ir" (nr) : "memory");
+	asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
+		     : "=r" (oldbit), "+m" (ADDR)
+		     : "Ir" (nr) : "memory");
 	return oldbit;
 }
 
-static __always_inline int sync_constant_test_bit(int nr, const volatile unsigned long *addr)
-{
-	return ((1UL << (nr & 31)) &
-		(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
-}
-
-static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr)
-{
-	int oldbit;
-
-	__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
-			     :"=r" (oldbit)
-			     :"m" (ADDR),"Ir" (nr));
-	return oldbit;
-}
-
-#define sync_test_bit(nr,addr)			\
-	(__builtin_constant_p(nr) ?		\
-	 sync_constant_test_bit((nr),(addr)) :	\
-	 sync_var_test_bit((nr),(addr)))
+#define sync_test_bit(nr, addr) test_bit(nr, addr)
 
 #undef ADDR
 
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 9cff02f..a2f04cd 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -27,22 +27,44 @@
  * Saving eflags is important. It switches not only IOPL between tasks,
  * it also protects other tasks from NT leaking through sysenter etc.
  */
-#define switch_to(prev, next, last) do {				\
-	unsigned long esi, edi;						\
-	asm volatile("pushfl\n\t"		/* Save flags */	\
-		     "pushl %%ebp\n\t"					\
-		     "movl %%esp,%0\n\t"	/* save ESP */		\
-		     "movl %5,%%esp\n\t"	/* restore ESP */	\
-		     "movl $1f,%1\n\t"		/* save EIP */		\
-		     "pushl %6\n\t"		/* restore EIP */	\
-		     "jmp __switch_to\n"				\
+#define switch_to(prev, next, last)					\
+do {									\
+	/*								\
+	 * Context-switching clobbers all registers, so we clobber	\
+	 * them explicitly, via unused output variables.		\
+	 * (EAX and EBP is not listed because EBP is saved/restored	\
+	 * explicitly for wchan access and EAX is the return value of	\
+	 * __switch_to())						\
+	 */								\
+	unsigned long ebx, ecx, edx, esi, edi;				\
+									\
+	asm volatile("pushfl\n\t"		/* save    flags */	\
+		     "pushl %%ebp\n\t"		/* save    EBP   */	\
+		     "movl %%esp,%[prev_sp]\n\t"	/* save    ESP   */ \
+		     "movl %[next_sp],%%esp\n\t"	/* restore ESP   */ \
+		     "movl $1f,%[prev_ip]\n\t"	/* save    EIP   */	\
+		     "pushl %[next_ip]\n\t"	/* restore EIP   */	\
+		     "jmp __switch_to\n"	/* regparm call  */	\
 		     "1:\t"						\
-		     "popl %%ebp\n\t"					\
-		     "popfl"						\
-		     :"=m" (prev->thread.sp), "=m" (prev->thread.ip),	\
-		      "=a" (last), "=S" (esi), "=D" (edi)		\
-		     :"m" (next->thread.sp), "m" (next->thread.ip),	\
-		      "2" (prev), "d" (next));				\
+		     "popl %%ebp\n\t"		/* restore EBP   */	\
+		     "popfl\n"			/* restore flags */	\
+									\
+		     /* output parameters */				\
+		     : [prev_sp] "=m" (prev->thread.sp),		\
+		       [prev_ip] "=m" (prev->thread.ip),		\
+		       "=a" (last),					\
+									\
+		       /* clobbered output registers: */		\
+		       "=b" (ebx), "=c" (ecx), "=d" (edx),		\
+		       "=S" (esi), "=D" (edi)				\
+		       							\
+		       /* input parameters: */				\
+		     : [next_sp]  "m" (next->thread.sp),		\
+		       [next_ip]  "m" (next->thread.ip),		\
+		       							\
+		       /* regparm parameters for __switch_to(): */	\
+		       [prev]     "a" (prev),				\
+		       [next]     "d" (next));				\
 } while (0)
 
 /*
@@ -122,35 +144,34 @@
  */
 #define loadsegment(seg, value)			\
 	asm volatile("\n"			\
-		"1:\t"				\
-		"movl %k0,%%" #seg "\n"		\
-		"2:\n"				\
-		".section .fixup,\"ax\"\n"	\
-		"3:\t"				\
-		"movl %k1, %%" #seg "\n\t"	\
-		"jmp 2b\n"			\
-		".previous\n"			\
-		_ASM_EXTABLE(1b,3b)		\
-		: :"r" (value), "r" (0))
+		     "1:\t"			\
+		     "movl %k0,%%" #seg "\n"	\
+		     "2:\n"			\
+		     ".section .fixup,\"ax\"\n"	\
+		     "3:\t"			\
+		     "movl %k1, %%" #seg "\n\t"	\
+		     "jmp 2b\n"			\
+		     ".previous\n"		\
+		     _ASM_EXTABLE(1b,3b)	\
+		     : :"r" (value), "r" (0))
 
 
 /*
  * Save a segment register away
  */
-#define savesegment(seg, value) \
+#define savesegment(seg, value)				\
 	asm volatile("mov %%" #seg ",%0":"=rm" (value))
 
 static inline unsigned long get_limit(unsigned long segment)
 {
 	unsigned long __limit;
-	__asm__("lsll %1,%0"
-		:"=r" (__limit):"r" (segment));
-	return __limit+1;
+	asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+	return __limit + 1;
 }
 
 static inline void native_clts(void)
 {
-	asm volatile ("clts");
+	asm volatile("clts");
 }
 
 /*
@@ -165,43 +186,43 @@
 static inline unsigned long native_read_cr0(void)
 {
 	unsigned long val;
-	asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
+	asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
 	return val;
 }
 
 static inline void native_write_cr0(unsigned long val)
 {
-	asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
+	asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
 }
 
 static inline unsigned long native_read_cr2(void)
 {
 	unsigned long val;
-	asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
+	asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
 	return val;
 }
 
 static inline void native_write_cr2(unsigned long val)
 {
-	asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
+	asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
 }
 
 static inline unsigned long native_read_cr3(void)
 {
 	unsigned long val;
-	asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
+	asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
 	return val;
 }
 
 static inline void native_write_cr3(unsigned long val)
 {
-	asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
+	asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
 }
 
 static inline unsigned long native_read_cr4(void)
 {
 	unsigned long val;
-	asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
+	asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
 	return val;
 }
 
@@ -213,7 +234,7 @@
 #ifdef CONFIG_X86_32
 	asm volatile("1: mov %%cr4, %0\n"
 		     "2:\n"
-		     _ASM_EXTABLE(1b,2b)
+		     _ASM_EXTABLE(1b, 2b)
 		     : "=r" (val), "=m" (__force_order) : "0" (0));
 #else
 	val = native_read_cr4();
@@ -223,7 +244,7 @@
 
 static inline void native_write_cr4(unsigned long val)
 {
-	asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
+	asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
 }
 
 #ifdef CONFIG_X86_64
@@ -244,6 +265,7 @@
 {
 	asm volatile("wbinvd": : :"memory");
 }
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
@@ -276,7 +298,7 @@
 	asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
 }
 
-#define nop() __asm__ __volatile__ ("nop")
+#define nop() asm volatile ("nop")
 
 void disable_hlt(void);
 void enable_hlt(void);
@@ -296,16 +318,7 @@
  */
 #ifdef CONFIG_X86_32
 /*
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
  * nop for these.
  */
 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
@@ -384,7 +397,7 @@
 # define smp_wmb()	barrier()
 #endif
 #define smp_read_barrier_depends()	read_barrier_depends()
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
 #else
 #define smp_mb()	barrier()
 #define smp_rmb()	barrier()
diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h
index cd955d3..b1a4ea0 100644
--- a/include/asm-x86/tce.h
+++ b/include/asm-x86/tce.h
@@ -39,7 +39,7 @@
 #define TCE_RPN_MASK     0x0000fffffffff000ULL
 
 extern void tce_build(struct iommu_table *tbl, unsigned long index,
-        unsigned int npages, unsigned long uaddr, int direction);
+		      unsigned int npages, unsigned long uaddr, int direction);
 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
 extern void * __init alloc_tce_table(void);
 extern void __init free_tce_table(void *tbl);
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
index 5bd5082..4e053fa 100644
--- a/include/asm-x86/thread_info_32.h
+++ b/include/asm-x86/thread_info_32.h
@@ -20,7 +20,8 @@
  * low level task data that entry.S needs immediate access to
  * - this struct should fit entirely inside of one cache line
  * - this struct shares the supervisor stack pages
- * - if the contents of this structure are changed, the assembly constants must also be changed
+ * - if the contents of this structure are changed,
+ *   the assembly constants must also be changed
  */
 #ifndef __ASSEMBLY__
 
@@ -30,18 +31,16 @@
 	unsigned long		flags;		/* low level flags */
 	unsigned long		status;		/* thread-synchronous flags */
 	__u32			cpu;		/* current CPU */
-	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
-
-
+	int			preempt_count;	/* 0 => preemptable,
+						   <0 => BUG */
 	mm_segment_t		addr_limit;	/* thread address space:
-					 	   0-0xBFFFFFFF for user-thead
-						   0-0xFFFFFFFF for kernel-thread
+						   0-0xBFFFFFFF user-thread
+						   0-0xFFFFFFFF kernel-thread
 						*/
 	void			*sysenter_return;
 	struct restart_block    restart_block;
-
-	unsigned long           previous_esp;   /* ESP of the previous stack in case
-						   of nested (IRQ) stacks
+	unsigned long           previous_esp;   /* ESP of the previous stack in
+						   case of nested (IRQ) stacks
 						*/
 	__u8			supervisor_stack[0];
 };
@@ -90,15 +89,16 @@
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
-	return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1));
+	return (struct thread_info *)
+		(current_stack_pointer & ~(THREAD_SIZE - 1));
 }
 
 /* thread information allocation */
 #ifdef CONFIG_DEBUG_STACK_USAGE
-#define alloc_thread_info(tsk) ((struct thread_info *) \
-	__get_free_pages(GFP_KERNEL| __GFP_ZERO, get_order(THREAD_SIZE)))
+#define alloc_thread_info(tsk) ((struct thread_info *)			\
+	__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE)))
 #else
-#define alloc_thread_info(tsk) ((struct thread_info *) \
+#define alloc_thread_info(tsk) ((struct thread_info *)			\
 	__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
 #endif
 
@@ -107,7 +107,7 @@
 #else /* !__ASSEMBLY__ */
 
 /* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg) \
+#define GET_THREAD_INFO(reg)	 \
 	movl $-THREAD_SIZE, reg; \
 	andl %esp, reg
 
@@ -119,14 +119,16 @@
 
 /*
  * thread information flags
- * - these are process state flags that various assembly files may need to access
+ * - these are process state flags that various
+ *   assembly files may need to access
  * - pending work-to-be-done flags are in LSW
  * - other flags in MSW
  */
 #define TIF_SYSCALL_TRACE	0	/* syscall trace active */
 #define TIF_SIGPENDING		1	/* signal pending */
 #define TIF_NEED_RESCHED	2	/* rescheduling necessary */
-#define TIF_SINGLESTEP		3	/* restore singlestep on return to user mode */
+#define TIF_SINGLESTEP		3	/* restore singlestep on return to
+					   user mode */
 #define TIF_IRET		4	/* return with iret */
 #define TIF_SYSCALL_EMU		5	/* syscall emulation active */
 #define TIF_SYSCALL_AUDIT	6	/* syscall auditing active */
@@ -143,36 +145,36 @@
 #define TIF_DS_AREA_MSR 	23      /* uses thread_struct.ds_area_msr */
 #define TIF_BTS_TRACE_TS        24      /* record scheduling event timestamps */
 
-#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
-#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
-#define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
-#define _TIF_IRET		(1<<TIF_IRET)
-#define _TIF_SYSCALL_EMU	(1<<TIF_SYSCALL_EMU)
-#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP		(1<<TIF_SECCOMP)
-#define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
-#define _TIF_HRTICK_RESCHED	(1<<TIF_HRTICK_RESCHED)
-#define _TIF_DEBUG		(1<<TIF_DEBUG)
-#define _TIF_IO_BITMAP		(1<<TIF_IO_BITMAP)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
-#define _TIF_NOTSC		(1<<TIF_NOTSC)
-#define _TIF_FORCED_TF		(1<<TIF_FORCED_TF)
-#define _TIF_DEBUGCTLMSR	(1<<TIF_DEBUGCTLMSR)
-#define _TIF_DS_AREA_MSR	(1<<TIF_DS_AREA_MSR)
-#define _TIF_BTS_TRACE_TS	(1<<TIF_BTS_TRACE_TS)
+#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
+#define _TIF_IRET		(1 << TIF_IRET)
+#define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
+#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+#define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
+#define _TIF_HRTICK_RESCHED	(1 << TIF_HRTICK_RESCHED)
+#define _TIF_DEBUG		(1 << TIF_DEBUG)
+#define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
+#define _TIF_FREEZE		(1 << TIF_FREEZE)
+#define _TIF_NOTSC		(1 << TIF_NOTSC)
+#define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
+#define _TIF_DEBUGCTLMSR	(1 << TIF_DEBUGCTLMSR)
+#define _TIF_DS_AREA_MSR	(1 << TIF_DS_AREA_MSR)
+#define _TIF_BTS_TRACE_TS	(1 << TIF_BTS_TRACE_TS)
 
 /* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK \
-  (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-		  _TIF_SECCOMP | _TIF_SYSCALL_EMU))
+#define _TIF_WORK_MASK							\
+	(0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |	\
+			_TIF_SECCOMP | _TIF_SYSCALL_EMU))
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	(0x0000FFFF & ~_TIF_SECCOMP)
 
 /* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW \
-    (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
-     _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
+#define _TIF_WORK_CTXSW						\
+	(_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR |	\
+	 _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
 #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG)
 
@@ -184,8 +186,10 @@
  * ever touches our thread-synchronous status, so we don't
  * have to worry about atomic accesses.
  */
-#define TS_USEDFPU		0x0001	/* FPU was used by this task this quantum (SMP) */
-#define TS_POLLING		0x0002	/* True if in idle loop and not sleeping */
+#define TS_USEDFPU		0x0001	/* FPU was used by this task
+					   this quantum (SMP) */
+#define TS_POLLING		0x0002	/* True if in idle loop
+					   and not sleeping */
 
 #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
 
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h
index 6c9b214..1e5c6f6 100644
--- a/include/asm-x86/thread_info_64.h
+++ b/include/asm-x86/thread_info_64.h
@@ -29,9 +29,9 @@
 	__u32			flags;		/* low level flags */
 	__u32			status;		/* thread synchronous flags */
 	__u32			cpu;		/* current CPU */
-	int 			preempt_count;	/* 0 => preemptable, <0 => BUG */
-
-	mm_segment_t		addr_limit;	
+	int 			preempt_count;	/* 0 => preemptable,
+						   <0 => BUG */
+	mm_segment_t		addr_limit;
 	struct restart_block    restart_block;
 #ifdef CONFIG_IA32_EMULATION
 	void __user		*sysenter_return;
@@ -61,17 +61,17 @@
 #define init_stack		(init_thread_union.stack)
 
 static inline struct thread_info *current_thread_info(void)
-{ 
+{
 	struct thread_info *ti;
 	ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
-	return ti; 
+	return ti;
 }
 
 /* do not use in interrupt context */
 static inline struct thread_info *stack_thread_info(void)
 {
 	struct thread_info *ti;
-	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1)));
+	asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
 	return ti;
 }
 
@@ -82,8 +82,8 @@
 #define THREAD_FLAGS GFP_KERNEL
 #endif
 
-#define alloc_thread_info(tsk) \
-	((struct thread_info *) __get_free_pages(THREAD_FLAGS, THREAD_ORDER))
+#define alloc_thread_info(tsk)						\
+	((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
 
 #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
 
@@ -98,7 +98,8 @@
 
 /*
  * thread information flags
- * - these are process state flags that various assembly files may need to access
+ * - these are process state flags that various assembly files
+ *   may need to access
  * - pending work-to-be-done flags are in LSW
  * - other flags in MSW
  * Warning: layout of LSW is hardcoded in entry.S
@@ -114,7 +115,7 @@
 #define TIF_MCE_NOTIFY		10	/* notify userspace of an MCE */
 #define TIF_HRTICK_RESCHED	11	/* reprogram hrtick timer */
 /* 16 free */
-#define TIF_IA32		17	/* 32bit process */ 
+#define TIF_IA32		17	/* 32bit process */
 #define TIF_FORK		18	/* ret_from_fork */
 #define TIF_ABI_PENDING		19
 #define TIF_MEMDIE		20
@@ -126,39 +127,40 @@
 #define TIF_DS_AREA_MSR		26      /* uses thread_struct.ds_area_msr */
 #define TIF_BTS_TRACE_TS	27      /* record scheduling event timestamps */
 
-#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
-#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
-#define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
-#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
-#define _TIF_IRET		(1<<TIF_IRET)
-#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP		(1<<TIF_SECCOMP)
-#define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
-#define _TIF_MCE_NOTIFY		(1<<TIF_MCE_NOTIFY)
-#define _TIF_HRTICK_RESCHED	(1<<TIF_HRTICK_RESCHED)
-#define _TIF_IA32		(1<<TIF_IA32)
-#define _TIF_FORK		(1<<TIF_FORK)
-#define _TIF_ABI_PENDING	(1<<TIF_ABI_PENDING)
-#define _TIF_DEBUG		(1<<TIF_DEBUG)
-#define _TIF_IO_BITMAP		(1<<TIF_IO_BITMAP)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
-#define _TIF_FORCED_TF		(1<<TIF_FORCED_TF)
-#define _TIF_DEBUGCTLMSR	(1<<TIF_DEBUGCTLMSR)
-#define _TIF_DS_AREA_MSR	(1<<TIF_DS_AREA_MSR)
-#define _TIF_BTS_TRACE_TS	(1<<TIF_BTS_TRACE_TS)
+#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
+#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+#define _TIF_IRET		(1 << TIF_IRET)
+#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+#define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
+#define _TIF_MCE_NOTIFY		(1 << TIF_MCE_NOTIFY)
+#define _TIF_HRTICK_RESCHED	(1 << TIF_HRTICK_RESCHED)
+#define _TIF_IA32		(1 << TIF_IA32)
+#define _TIF_FORK		(1 << TIF_FORK)
+#define _TIF_ABI_PENDING	(1 << TIF_ABI_PENDING)
+#define _TIF_DEBUG		(1 << TIF_DEBUG)
+#define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
+#define _TIF_FREEZE		(1 << TIF_FREEZE)
+#define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
+#define _TIF_DEBUGCTLMSR	(1 << TIF_DEBUGCTLMSR)
+#define _TIF_DS_AREA_MSR	(1 << TIF_DS_AREA_MSR)
+#define _TIF_BTS_TRACE_TS	(1 << TIF_BTS_TRACE_TS)
 
 /* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK \
-  (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
+#define _TIF_WORK_MASK							\
+	(0x0000FFFF &							\
+	 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
 /* work to do on any return to user space */
 #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
 
-#define _TIF_DO_NOTIFY_MASK \
+#define _TIF_DO_NOTIFY_MASK						\
 	(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED)
 
 /* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW \
-    (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS)
+#define _TIF_WORK_CTXSW							\
+	(_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS)
 #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
 
@@ -171,9 +173,11 @@
  * ever touches our thread-synchronous status, so we don't
  * have to worry about atomic accesses.
  */
-#define TS_USEDFPU		0x0001	/* FPU was used by this task this quantum (SMP) */
+#define TS_USEDFPU		0x0001	/* FPU was used by this task
+					   this quantum (SMP) */
 #define TS_COMPAT		0x0002	/* 32bit syscall active */
-#define TS_POLLING		0x0004	/* true if in idle loop and not sleeping */
+#define TS_POLLING		0x0004	/* true if in idle loop
+					   and not sleeping */
 
 #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
 
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
index 3998709..0c0674d 100644
--- a/include/asm-x86/tlbflush.h
+++ b/include/asm-x86/tlbflush.h
@@ -32,7 +32,7 @@
 
 static inline void __native_flush_tlb_single(unsigned long addr)
 {
-	__asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory");
+	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
 }
 
 static inline void __flush_tlb_all(void)
@@ -134,8 +134,7 @@
 #define TLBSTATE_LAZY	2
 
 #ifdef CONFIG_X86_32
-struct tlb_state
-{
+struct tlb_state {
 	struct mm_struct *active_mm;
 	int state;
 	char __cacheline_padding[L1_CACHE_BYTES-8];
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h
index 8af05a9..81a29eb 100644
--- a/include/asm-x86/topology.h
+++ b/include/asm-x86/topology.h
@@ -32,15 +32,20 @@
 /* Mappings between logical cpu number and node number */
 #ifdef CONFIG_X86_32
 extern int cpu_to_node_map[];
-
 #else
-DECLARE_PER_CPU(int, x86_cpu_to_node_map);
-extern int x86_cpu_to_node_map_init[];
-extern void *x86_cpu_to_node_map_early_ptr;
 /* Returns the number of the current Node. */
 #define numa_node_id()		(early_cpu_to_node(raw_smp_processor_id()))
 #endif
 
+DECLARE_PER_CPU(int, x86_cpu_to_node_map);
+
+#ifdef CONFIG_SMP
+extern int x86_cpu_to_node_map_init[];
+extern void *x86_cpu_to_node_map_early_ptr;
+#else
+#define x86_cpu_to_node_map_early_ptr NULL
+#endif
+
 extern cpumask_t node_to_cpumask_map[];
 
 #define NUMA_NO_NODE	(-1)
@@ -54,6 +59,8 @@
 }
 
 #else /* CONFIG_X86_64 */
+
+#ifdef CONFIG_SMP
 static inline int early_cpu_to_node(int cpu)
 {
 	int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
@@ -65,21 +72,21 @@
 	else
 		return NUMA_NO_NODE;
 }
+#else
+#define	early_cpu_to_node(cpu)	cpu_to_node(cpu)
+#endif
 
 static inline int cpu_to_node(int cpu)
 {
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
 	if (x86_cpu_to_node_map_early_ptr) {
 		printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n",
-			(int)cpu);
+		       (int)cpu);
 		dump_stack();
 		return ((int *)x86_cpu_to_node_map_early_ptr)[cpu];
 	}
 #endif
-	if (per_cpu_offset(cpu))
-		return per_cpu(x86_cpu_to_node_map, cpu);
-	else
-		return NUMA_NO_NODE;
+	return per_cpu(x86_cpu_to_node_map, cpu);
 }
 #endif /* CONFIG_X86_64 */
 
diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h
new file mode 100644
index 0000000..b156b08
--- /dev/null
+++ b/include/asm-x86/trampoline.h
@@ -0,0 +1,21 @@
+#ifndef __TRAMPOLINE_HEADER
+#define __TRAMPOLINE_HEADER
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Trampoline 80x86 program as an array.
+ */
+extern const unsigned char trampoline_data [];
+extern const unsigned char trampoline_end  [];
+extern unsigned char *trampoline_base;
+
+extern unsigned long init_rsp;
+extern unsigned long initial_code;
+
+#define TRAMPOLINE_BASE 0x6000
+extern unsigned long setup_trampoline(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __TRAMPOLINE_HEADER */
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
index 7d3e27f..d2d8eb5 100644
--- a/include/asm-x86/tsc.h
+++ b/include/asm-x86/tsc.h
@@ -42,7 +42,7 @@
 	if (!cpu_has_tsc)
 		return 0;
 #endif
-	return (cycles_t) __native_read_tsc();
+	return (cycles_t)__native_read_tsc();
 }
 
 extern void tsc_init(void);
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index fcc570e..8e7595c 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -32,7 +32,7 @@
 #define get_fs()	(current_thread_info()->addr_limit)
 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
 
-#define segment_eq(a,b)	((a).seg == (b).seg)
+#define segment_eq(a, b)	((a).seg == (b).seg)
 
 /*
  * movsl can be slow when source and dest are not both 8-byte aligned
@@ -43,7 +43,9 @@
 } ____cacheline_aligned_in_smp movsl_mask;
 #endif
 
-#define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg))
+#define __addr_ok(addr)					\
+	((unsigned long __force)(addr) <		\
+	 (current_thread_info()->addr_limit.seg))
 
 /*
  * Test whether a block of memory is a valid user space address.
@@ -54,13 +56,16 @@
  *
  * This needs 33-bit arithmetic. We have a carry...
  */
-#define __range_ok(addr,size) ({ \
-	unsigned long flag,roksum; \
-	__chk_user_ptr(addr); \
-	asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
-		:"=&r" (flag), "=r" (roksum) \
-		:"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \
-	flag; })
+#define __range_ok(addr, size)						\
+({									\
+	unsigned long flag, roksum;					\
+	__chk_user_ptr(addr);						\
+	asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0"		\
+	    :"=&r" (flag), "=r" (roksum)				\
+	    :"1" (addr), "g" ((int)(size)),				\
+	    "rm" (current_thread_info()->addr_limit.seg));		\
+	flag;								\
+})
 
 /**
  * access_ok: - Checks if a user space pointer is valid
@@ -81,7 +86,7 @@
  * checks that the pointer is in the user space range - after calling
  * this function, memory access functions may still return -EFAULT.
  */
-#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
+#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
 
 /*
  * The exception table consists of pairs of addresses: the first is the
@@ -96,8 +101,7 @@
  * on our cache or tlb entries.
  */
 
-struct exception_table_entry
-{
+struct exception_table_entry {
 	unsigned long insn, fixup;
 };
 
@@ -122,13 +126,15 @@
 extern void __get_user_2(void);
 extern void __get_user_4(void);
 
-#define __get_user_x(size,ret,x,ptr) \
-	__asm__ __volatile__("call __get_user_" #size \
-		:"=a" (ret),"=d" (x) \
-		:"0" (ptr))
+#define __get_user_x(size, ret, x, ptr)	      \
+	asm volatile("call __get_user_" #size \
+		     :"=a" (ret),"=d" (x)     \
+		     :"0" (ptr))
 
 
-/* Careful: we have to cast the result to the type of the pointer for sign reasons */
+/* Careful: we have to cast the result to the type of the pointer
+ * for sign reasons */
+
 /**
  * get_user: - Get a simple variable from user space.
  * @x:   Variable to store result.
@@ -146,15 +152,24 @@
  * Returns zero on success, or -EFAULT on error.
  * On error, the variable @x is set to zero.
  */
-#define get_user(x,ptr)							\
-({	int __ret_gu;							\
+#define get_user(x, ptr)						\
+({									\
+	int __ret_gu;							\
 	unsigned long __val_gu;						\
 	__chk_user_ptr(ptr);						\
-	switch(sizeof (*(ptr))) {					\
-	case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;		\
-	case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;		\
-	case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;		\
-	default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;		\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		__get_user_x(1, __ret_gu, __val_gu, ptr);		\
+		break;							\
+	case 2:								\
+		__get_user_x(2, __ret_gu, __val_gu, ptr);		\
+		break;							\
+	case 4:								\
+		__get_user_x(4, __ret_gu, __val_gu, ptr);		\
+		break;							\
+	default:							\
+		__get_user_x(X, __ret_gu, __val_gu, ptr);		\
+		break;							\
 	}								\
 	(x) = (__typeof__(*(ptr)))__val_gu;				\
 	__ret_gu;							\
@@ -171,11 +186,25 @@
 extern void __put_user_4(void);
 extern void __put_user_8(void);
 
-#define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
-#define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
-#define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
-#define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr))
-#define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr))
+#define __put_user_1(x, ptr)					\
+	asm volatile("call __put_user_1" : "=a" (__ret_pu)	\
+		     : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
+
+#define __put_user_2(x, ptr)					\
+	asm volatile("call __put_user_2" : "=a" (__ret_pu)	\
+		     : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
+
+#define __put_user_4(x, ptr)					\
+	asm volatile("call __put_user_4" : "=a" (__ret_pu)	\
+		     : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
+
+#define __put_user_8(x, ptr)					\
+	asm volatile("call __put_user_8" : "=a" (__ret_pu)	\
+		     : "A" ((typeof(*(ptr)))(x)), "c" (ptr))
+
+#define __put_user_X(x, ptr)					\
+	asm volatile("call __put_user_X" : "=a" (__ret_pu)	\
+		     : "c" (ptr))
 
 /**
  * put_user: - Write a simple value into user space.
@@ -195,32 +224,43 @@
  */
 #ifdef CONFIG_X86_WP_WORKS_OK
 
-#define put_user(x,ptr)						\
-({	int __ret_pu;						\
+#define put_user(x, ptr)					\
+({								\
+	int __ret_pu;						\
 	__typeof__(*(ptr)) __pu_val;				\
 	__chk_user_ptr(ptr);					\
 	__pu_val = x;						\
-	switch(sizeof(*(ptr))) {				\
-	case 1: __put_user_1(__pu_val, ptr); break;		\
-	case 2: __put_user_2(__pu_val, ptr); break;		\
-	case 4: __put_user_4(__pu_val, ptr); break;		\
-	case 8: __put_user_8(__pu_val, ptr); break;		\
-	default:__put_user_X(__pu_val, ptr); break;		\
+	switch (sizeof(*(ptr))) {				\
+	case 1:							\
+		__put_user_1(__pu_val, ptr);			\
+		break;						\
+	case 2:							\
+		__put_user_2(__pu_val, ptr);			\
+		break;						\
+	case 4:							\
+		__put_user_4(__pu_val, ptr);			\
+		break;						\
+	case 8:							\
+		__put_user_8(__pu_val, ptr);			\
+		break;						\
+	default:						\
+		__put_user_X(__pu_val, ptr);			\
+		break;						\
 	}							\
 	__ret_pu;						\
 })
 
 #else
-#define put_user(x,ptr)						\
+#define put_user(x, ptr)					\
 ({								\
- 	int __ret_pu;						\
-	__typeof__(*(ptr)) __pus_tmp = x;			\
-	__ret_pu=0;						\
-	if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp,		\
-				sizeof(*(ptr))) != 0))		\
- 		__ret_pu=-EFAULT;				\
- 	__ret_pu;						\
- })
+	int __ret_pu;						\
+	__typeof__(*(ptr))__pus_tmp = x;			\
+	__ret_pu = 0;						\
+	if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp,		\
+				       sizeof(*(ptr))) != 0))	\
+		__ret_pu = -EFAULT;				\
+	__ret_pu;						\
+})
 
 
 #endif
@@ -245,8 +285,8 @@
  * Returns zero on success, or -EFAULT on error.
  * On error, the variable @x is set to zero.
  */
-#define __get_user(x,ptr) \
-  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __get_user(x, ptr)				\
+	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 
 
 /**
@@ -268,54 +308,62 @@
  *
  * Returns zero on success, or -EFAULT on error.
  */
-#define __put_user(x,ptr) \
-  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define __put_user(x, ptr)						\
+	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
-#define __put_user_nocheck(x,ptr,size)				\
+#define __put_user_nocheck(x, ptr, size)			\
 ({								\
 	long __pu_err;						\
-	__put_user_size((x),(ptr),(size),__pu_err,-EFAULT);	\
+	__put_user_size((x), (ptr), (size), __pu_err, -EFAULT);	\
 	__pu_err;						\
 })
 
 
-#define __put_user_u64(x, addr, err)				\
-	__asm__ __volatile__(					\
-		"1:	movl %%eax,0(%2)\n"			\
-		"2:	movl %%edx,4(%2)\n"			\
-		"3:\n"						\
-		".section .fixup,\"ax\"\n"			\
-		"4:	movl %3,%0\n"				\
-		"	jmp 3b\n"				\
-		".previous\n"					\
-		_ASM_EXTABLE(1b,4b)				\
-		_ASM_EXTABLE(2b,4b)				\
-		: "=r"(err)					\
-		: "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
+#define __put_user_u64(x, addr, err)					\
+	asm volatile("1:	movl %%eax,0(%2)\n"			\
+		     "2:	movl %%edx,4(%2)\n"			\
+		     "3:\n"						\
+		     ".section .fixup,\"ax\"\n"				\
+		     "4:	movl %3,%0\n"				\
+		     "	jmp 3b\n"					\
+		     ".previous\n"					\
+		     _ASM_EXTABLE(1b, 4b)				\
+		     _ASM_EXTABLE(2b, 4b)				\
+		     : "=r" (err)					\
+		     : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
 
 #ifdef CONFIG_X86_WP_WORKS_OK
 
-#define __put_user_size(x,ptr,size,retval,errret)			\
+#define __put_user_size(x, ptr, size, retval, errret)			\
 do {									\
 	retval = 0;							\
 	__chk_user_ptr(ptr);						\
 	switch (size) {							\
-	case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break;	\
-	case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
-	case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break;	\
-	case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
-	  default: __put_user_bad();					\
+	case 1:								\
+		__put_user_asm(x, ptr, retval, "b", "b", "iq", errret);	\
+		break;							\
+	case 2:								\
+		__put_user_asm(x, ptr, retval, "w", "w", "ir", errret);	\
+		break;							\
+	case 4:								\
+		__put_user_asm(x, ptr, retval, "l", "",  "ir", errret);	\
+		break;							\
+	case 8:								\
+		__put_user_u64((__typeof__(*ptr))(x), ptr, retval);	\
+		break;							\
+	default:							\
+		__put_user_bad();					\
 	}								\
 } while (0)
 
 #else
 
-#define __put_user_size(x,ptr,size,retval,errret)			\
+#define __put_user_size(x, ptr, size, retval, errret)			\
 do {									\
-	__typeof__(*(ptr)) __pus_tmp = x;				\
+	__typeof__(*(ptr))__pus_tmp = x;				\
 	retval = 0;							\
 									\
-	if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0))	\
+	if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0))	\
 		retval = errret;					\
 } while (0)
 
@@ -329,65 +377,70 @@
  * aliasing issues.
  */
 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
-	__asm__ __volatile__(						\
-		"1:	mov"itype" %"rtype"1,%2\n"			\
-		"2:\n"							\
-		".section .fixup,\"ax\"\n"				\
-		"3:	movl %3,%0\n"					\
-		"	jmp 2b\n"					\
-		".previous\n"						\
-		_ASM_EXTABLE(1b,3b)					\
-		: "=r"(err)						\
-		: ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
+	asm volatile("1:	mov"itype" %"rtype"1,%2\n"		\
+		     "2:\n"						\
+		     ".section .fixup,\"ax\"\n"				\
+		     "3:	movl %3,%0\n"				\
+		     "	jmp 2b\n"					\
+		     ".previous\n"					\
+		     _ASM_EXTABLE(1b, 3b)				\
+		     : "=r"(err)					\
+		     : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
 
 
-#define __get_user_nocheck(x,ptr,size)				\
-({								\
-	long __gu_err;						\
-	unsigned long __gu_val;					\
-	__get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
-	(x) = (__typeof__(*(ptr)))__gu_val;			\
-	__gu_err;						\
+#define __get_user_nocheck(x, ptr, size)				\
+({									\
+	long __gu_err;							\
+	unsigned long __gu_val;						\
+	__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);	\
+	(x) = (__typeof__(*(ptr)))__gu_val;				\
+	__gu_err;							\
 })
 
 extern long __get_user_bad(void);
 
-#define __get_user_size(x,ptr,size,retval,errret)			\
+#define __get_user_size(x, ptr, size, retval, errret)			\
 do {									\
 	retval = 0;							\
 	__chk_user_ptr(ptr);						\
 	switch (size) {							\
-	case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break;	\
-	case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break;	\
-	case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break;	\
-	default: (x) = __get_user_bad();				\
+	case 1:								\
+		__get_user_asm(x, ptr, retval, "b", "b", "=q", errret);	\
+		break;							\
+	case 2:								\
+		__get_user_asm(x, ptr, retval, "w", "w", "=r", errret);	\
+		break;							\
+	case 4:								\
+		__get_user_asm(x, ptr, retval, "l", "", "=r", errret);	\
+		break;							\
+	default:							\
+		(x) = __get_user_bad();					\
 	}								\
 } while (0)
 
 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
-	__asm__ __volatile__(						\
-		"1:	mov"itype" %2,%"rtype"1\n"			\
-		"2:\n"							\
-		".section .fixup,\"ax\"\n"				\
-		"3:	movl %3,%0\n"					\
-		"	xor"itype" %"rtype"1,%"rtype"1\n"		\
-		"	jmp 2b\n"					\
-		".previous\n"						\
-		_ASM_EXTABLE(1b,3b)					\
-		: "=r"(err), ltype (x)					\
-		: "m"(__m(addr)), "i"(errret), "0"(err))
+	asm volatile("1:	mov"itype" %2,%"rtype"1\n"		\
+		     "2:\n"						\
+		     ".section .fixup,\"ax\"\n"				\
+		     "3:	movl %3,%0\n"				\
+		     "	xor"itype" %"rtype"1,%"rtype"1\n"		\
+		     "	jmp 2b\n"					\
+		     ".previous\n"					\
+		     _ASM_EXTABLE(1b, 3b)				\
+		     : "=r" (err), ltype (x)				\
+		     : "m" (__m(addr)), "i" (errret), "0" (err))
 
 
-unsigned long __must_check __copy_to_user_ll(void __user *to,
-				const void *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll(void *to,
-				const void __user *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll_nozero(void *to,
-				const void __user *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll_nocache(void *to,
-				const void __user *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
-				const void __user *from, unsigned long n);
+unsigned long __must_check __copy_to_user_ll
+		(void __user *to, const void *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll
+		(void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nozero
+		(void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nocache
+		(void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nocache_nozero
+		(void *to, const void __user *from, unsigned long n);
 
 /**
  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
@@ -416,13 +469,16 @@
 
 		switch (n) {
 		case 1:
-			__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
+			__put_user_size(*(u8 *)from, (u8 __user *)to,
+					1, ret, 1);
 			return ret;
 		case 2:
-			__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
+			__put_user_size(*(u16 *)from, (u16 __user *)to,
+					2, ret, 2);
 			return ret;
 		case 4:
-			__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
+			__put_user_size(*(u32 *)from, (u32 __user *)to,
+					4, ret, 4);
 			return ret;
 		}
 	}
@@ -545,19 +601,21 @@
 }
 
 static __always_inline unsigned long
-__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n)
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+				  unsigned long n)
 {
        return __copy_from_user_ll_nocache_nozero(to, from, n);
 }
 
 unsigned long __must_check copy_to_user(void __user *to,
-				const void *from, unsigned long n);
+					const void *from, unsigned long n);
 unsigned long __must_check copy_from_user(void *to,
-				const void __user *from, unsigned long n);
+					  const void __user *from,
+					  unsigned long n);
 long __must_check strncpy_from_user(char *dst, const char __user *src,
-				long count);
+				    long count);
 long __must_check __strncpy_from_user(char *dst,
-				const char __user *src, long count);
+				      const char __user *src, long count);
 
 /**
  * strlen_user: - Get the size of a string in user space.
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index b87eb4b..b8a2f43 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -29,23 +29,27 @@
 #define get_fs()	(current_thread_info()->addr_limit)
 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
 
-#define segment_eq(a,b)	((a).seg == (b).seg)
+#define segment_eq(a, b)	((a).seg == (b).seg)
 
-#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
+#define __addr_ok(addr) (!((unsigned long)(addr) &			\
+			   (current_thread_info()->addr_limit.seg)))
 
 /*
  * Uhhuh, this needs 65-bit arithmetic. We have a carry..
  */
-#define __range_not_ok(addr,size) ({ \
-	unsigned long flag,roksum; \
-	__chk_user_ptr(addr); \
-	asm("# range_ok\n\r" \
-		"addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0"  \
-		:"=&r" (flag), "=r" (roksum) \
-		:"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
-	flag; })
+#define __range_not_ok(addr, size)					\
+({									\
+	unsigned long flag, roksum;					\
+	__chk_user_ptr(addr);						\
+	asm("# range_ok\n\r"						\
+	    "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0"		\
+	    : "=&r" (flag), "=r" (roksum)				\
+	    : "1" (addr), "g" ((long)(size)),				\
+	      "g" (current_thread_info()->addr_limit.seg));		\
+	flag;								\
+})
 
-#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
+#define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
 
 /*
  * The exception table consists of pairs of addresses: the first is the
@@ -60,8 +64,7 @@
  * on our cache or tlb entries.
  */
 
-struct exception_table_entry
-{
+struct exception_table_entry {
 	unsigned long insn, fixup;
 };
 
@@ -84,23 +87,36 @@
  * accesses to the same area of user memory).
  */
 
-#define __get_user_x(size,ret,x,ptr) \
-	asm volatile("call __get_user_" #size \
-		:"=a" (ret),"=d" (x) \
-		:"c" (ptr) \
-		:"r8")
+#define __get_user_x(size, ret, x, ptr)		      \
+	asm volatile("call __get_user_" #size	      \
+		     : "=a" (ret),"=d" (x)	      \
+		     : "c" (ptr)		      \
+		     : "r8")
 
-/* Careful: we have to cast the result to the type of the pointer for sign reasons */
-#define get_user(x,ptr)							\
-({	unsigned long __val_gu;						\
-	int __ret_gu; 							\
+/* Careful: we have to cast the result to the type of the pointer
+ * for sign reasons */
+
+#define get_user(x, ptr)						\
+({									\
+	unsigned long __val_gu;						\
+	int __ret_gu;							\
 	__chk_user_ptr(ptr);						\
-	switch(sizeof (*(ptr))) {					\
-	case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;		\
-	case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;		\
-	case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;		\
-	case 8:  __get_user_x(8,__ret_gu,__val_gu,ptr); break;		\
-	default: __get_user_bad(); break;				\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		__get_user_x(1, __ret_gu, __val_gu, ptr);		\
+		break;							\
+	case 2:								\
+		__get_user_x(2, __ret_gu, __val_gu, ptr);		\
+		break;							\
+	case 4:								\
+		__get_user_x(4, __ret_gu, __val_gu, ptr);		\
+		break;							\
+	case 8:								\
+		__get_user_x(8, __ret_gu, __val_gu, ptr);		\
+		break;							\
+	default:							\
+		__get_user_bad();					\
+		break;							\
 	}								\
 	(x) = (__force typeof(*(ptr)))__val_gu;				\
 	__ret_gu;							\
@@ -112,55 +128,73 @@
 extern void __put_user_8(void);
 extern void __put_user_bad(void);
 
-#define __put_user_x(size,ret,x,ptr)					\
-	asm volatile("call __put_user_" #size			\
-		:"=a" (ret)						\
-		:"c" (ptr),"d" (x)					\
-		:"r8")
+#define __put_user_x(size, ret, x, ptr)					\
+	asm volatile("call __put_user_" #size				\
+		     :"=a" (ret)					\
+		     :"c" (ptr),"d" (x)					\
+		     :"r8")
 
-#define put_user(x,ptr)							\
-  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define put_user(x, ptr)						\
+	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
-#define __get_user(x,ptr) \
-  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __put_user(x,ptr) \
-  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define __get_user(x, ptr)						\
+	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr)						\
+	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
 #define __get_user_unaligned __get_user
 #define __put_user_unaligned __put_user
 
-#define __put_user_nocheck(x,ptr,size)			\
+#define __put_user_nocheck(x, ptr, size)		\
 ({							\
 	int __pu_err;					\
-	__put_user_size((x),(ptr),(size),__pu_err);	\
+	__put_user_size((x), (ptr), (size), __pu_err);	\
 	__pu_err;					\
 })
 
 
-#define __put_user_check(x,ptr,size)			\
-({							\
-	int __pu_err;					\
-	typeof(*(ptr)) __user *__pu_addr = (ptr);	\
-	switch (size) { 				\
-	case 1: __put_user_x(1,__pu_err,x,__pu_addr); break;	\
-	case 2: __put_user_x(2,__pu_err,x,__pu_addr); break;	\
-	case 4: __put_user_x(4,__pu_err,x,__pu_addr); break;	\
-	case 8: __put_user_x(8,__pu_err,x,__pu_addr); break;	\
-	default: __put_user_bad();			\
-	}						\
-	__pu_err;					\
+#define __put_user_check(x, ptr, size)				\
+({								\
+	int __pu_err;						\
+	typeof(*(ptr)) __user *__pu_addr = (ptr);		\
+	switch (size) {						\
+	case 1:							\
+		__put_user_x(1, __pu_err, x, __pu_addr);	\
+		break;						\
+	case 2:							\
+		__put_user_x(2, __pu_err, x, __pu_addr);	\
+		break;						\
+	case 4:							\
+		__put_user_x(4, __pu_err, x, __pu_addr);	\
+		break;						\
+	case 8:							\
+		__put_user_x(8, __pu_err, x, __pu_addr);	\
+		break;						\
+	default:						\
+		__put_user_bad();				\
+	}							\
+	__pu_err;						\
 })
 
-#define __put_user_size(x,ptr,size,retval)				\
+#define __put_user_size(x, ptr, size, retval)				\
 do {									\
 	retval = 0;							\
 	__chk_user_ptr(ptr);						\
 	switch (size) {							\
-	  case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
-	  case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
-	  case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
-	  case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\
-	  default: __put_user_bad();					\
+	case 1:								\
+		__put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
+		break;							\
+	case 2:								\
+		__put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
+		break;							\
+	case 4:								\
+		__put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
+		break;							\
+	case 8:								\
+		__put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT);	\
+		break;							\
+	default:							\
+		__put_user_bad();					\
 	}								\
 } while (0)
 
@@ -174,23 +208,22 @@
  * aliasing issues.
  */
 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)	\
-	asm volatile(					\
-		"1:	mov"itype" %"rtype"1,%2\n"		\
-		"2:\n"						\
-		".section .fixup,\"ax\"\n"			\
-		"3:	mov %3,%0\n"				\
-		"	jmp 2b\n"				\
-		".previous\n"					\
-		_ASM_EXTABLE(1b,3b)				\
-		: "=r"(err)					\
-		: ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
+	asm volatile("1:	mov"itype" %"rtype"1,%2\n"		\
+		     "2:\n"						\
+		     ".section .fixup, \"ax\"\n"			\
+		     "3:	mov %3,%0\n"				\
+		     "	jmp 2b\n"					\
+		     ".previous\n"					\
+		     _ASM_EXTABLE(1b, 3b)				\
+		     : "=r"(err)					\
+		     : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
 
 
-#define __get_user_nocheck(x,ptr,size)				\
+#define __get_user_nocheck(x, ptr, size)			\
 ({								\
 	int __gu_err;						\
 	unsigned long __gu_val;					\
-	__get_user_size(__gu_val,(ptr),(size),__gu_err);	\
+	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
 	(x) = (__force typeof(*(ptr)))__gu_val;			\
 	__gu_err;						\
 })
@@ -201,31 +234,39 @@
 extern int __get_user_8(void);
 extern int __get_user_bad(void);
 
-#define __get_user_size(x,ptr,size,retval)				\
+#define __get_user_size(x, ptr, size, retval)				\
 do {									\
 	retval = 0;							\
 	__chk_user_ptr(ptr);						\
 	switch (size) {							\
-	  case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
-	  case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
-	  case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
-	  case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
-	  default: (x) = __get_user_bad();				\
+	case 1:								\
+		__get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
+		break;							\
+	case 2:								\
+		__get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
+		break;							\
+	case 4:								\
+		__get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
+		break;							\
+	case 8:								\
+		__get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT);	\
+		break;							\
+	default:							\
+		(x) = __get_user_bad();					\
 	}								\
 } while (0)
 
 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)	\
-	asm volatile(					\
-		"1:	mov"itype" %2,%"rtype"1\n"		\
-		"2:\n"						\
-		".section .fixup,\"ax\"\n"			\
-		"3:	mov %3,%0\n"				\
-		"	xor"itype" %"rtype"1,%"rtype"1\n"	\
-		"	jmp 2b\n"				\
-		".previous\n"					\
-		_ASM_EXTABLE(1b,3b)				\
-		: "=r"(err), ltype (x)				\
-		: "m"(__m(addr)), "i"(errno), "0"(err))
+	asm volatile("1:	mov"itype" %2,%"rtype"1\n"		\
+		     "2:\n"						\
+		     ".section .fixup, \"ax\"\n"			\
+		     "3:	mov %3,%0\n"				\
+		     "	xor"itype" %"rtype"1,%"rtype"1\n"		\
+		     "	jmp 2b\n"					\
+		     ".previous\n"					\
+		     _ASM_EXTABLE(1b, 3b)				\
+		     : "=r" (err), ltype (x)				\
+		     : "m" (__m(addr)), "i"(errno), "0"(err))
 
 /*
  * Copy To/From Userspace
@@ -244,110 +285,142 @@
 
 static __always_inline __must_check
 int __copy_from_user(void *dst, const void __user *src, unsigned size)
-{ 
+{
 	int ret = 0;
 	if (!__builtin_constant_p(size))
-		return copy_user_generic(dst,(__force void *)src,size);
-	switch (size) { 
-	case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); 
+		return copy_user_generic(dst, (__force void *)src, size);
+	switch (size) {
+	case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+			      ret, "b", "b", "=q", 1);
 		return ret;
-	case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
+	case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+			      ret, "w", "w", "=r", 2);
 		return ret;
-	case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
+	case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+			      ret, "l", "k", "=r", 4);
 		return ret;
-	case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
-		return ret; 
+	case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+			      ret, "q", "", "=r", 8);
+		return ret;
 	case 10:
-	       	__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
-		if (unlikely(ret)) return ret;
-		__get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
-		return ret; 
+		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+			       ret, "q", "", "=r", 16);
+		if (unlikely(ret))
+			return ret;
+		__get_user_asm(*(u16 *)(8 + (char *)dst),
+			       (u16 __user *)(8 + (char __user *)src),
+			       ret, "w", "w", "=r", 2);
+		return ret;
 	case 16:
-		__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
-		if (unlikely(ret)) return ret;
-		__get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
-		return ret; 
+		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+			       ret, "q", "", "=r", 16);
+		if (unlikely(ret))
+			return ret;
+		__get_user_asm(*(u64 *)(8 + (char *)dst),
+			       (u64 __user *)(8 + (char __user *)src),
+			       ret, "q", "", "=r", 8);
+		return ret;
 	default:
-		return copy_user_generic(dst,(__force void *)src,size); 
+		return copy_user_generic(dst, (__force void *)src, size);
 	}
-}	
+}
 
 static __always_inline __must_check
 int __copy_to_user(void __user *dst, const void *src, unsigned size)
-{ 
+{
 	int ret = 0;
 	if (!__builtin_constant_p(size))
-		return copy_user_generic((__force void *)dst,src,size);
-	switch (size) { 
-	case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); 
+		return copy_user_generic((__force void *)dst, src, size);
+	switch (size) {
+	case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+			      ret, "b", "b", "iq", 1);
 		return ret;
-	case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
+	case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+			      ret, "w", "w", "ir", 2);
 		return ret;
-	case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
+	case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+			      ret, "l", "k", "ir", 4);
 		return ret;
-	case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
-		return ret; 
+	case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+			      ret, "q", "", "ir", 8);
+		return ret;
 	case 10:
-		__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
-		if (unlikely(ret)) return ret;
+		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+			       ret, "q", "", "ir", 10);
+		if (unlikely(ret))
+			return ret;
 		asm("":::"memory");
-		__put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
-		return ret; 
+		__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+			       ret, "w", "w", "ir", 2);
+		return ret;
 	case 16:
-		__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
-		if (unlikely(ret)) return ret;
+		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+			       ret, "q", "", "ir", 16);
+		if (unlikely(ret))
+			return ret;
 		asm("":::"memory");
-		__put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
-		return ret; 
+		__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+			       ret, "q", "", "ir", 8);
+		return ret;
 	default:
-		return copy_user_generic((__force void *)dst,src,size); 
+		return copy_user_generic((__force void *)dst, src, size);
 	}
-}	
+}
 
 static __always_inline __must_check
 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
-{ 
+{
 	int ret = 0;
 	if (!__builtin_constant_p(size))
-		return copy_user_generic((__force void *)dst,(__force void *)src,size);
-	switch (size) { 
-	case 1: { 
+		return copy_user_generic((__force void *)dst,
+					 (__force void *)src, size);
+	switch (size) {
+	case 1: {
 		u8 tmp;
-		__get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); 
+		__get_user_asm(tmp, (u8 __user *)src,
+			       ret, "b", "b", "=q", 1);
 		if (likely(!ret))
-			__put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); 
+			__put_user_asm(tmp, (u8 __user *)dst,
+				       ret, "b", "b", "iq", 1);
 		return ret;
 	}
-	case 2: { 
+	case 2: {
 		u16 tmp;
-		__get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); 
+		__get_user_asm(tmp, (u16 __user *)src,
+			       ret, "w", "w", "=r", 2);
 		if (likely(!ret))
-			__put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); 
+			__put_user_asm(tmp, (u16 __user *)dst,
+				       ret, "w", "w", "ir", 2);
 		return ret;
 	}
 
-	case 4: { 
+	case 4: {
 		u32 tmp;
-		__get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); 
+		__get_user_asm(tmp, (u32 __user *)src,
+			       ret, "l", "k", "=r", 4);
 		if (likely(!ret))
-			__put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); 
+			__put_user_asm(tmp, (u32 __user *)dst,
+				       ret, "l", "k", "ir", 4);
 		return ret;
 	}
-	case 8: { 
+	case 8: {
 		u64 tmp;
-		__get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); 
+		__get_user_asm(tmp, (u64 __user *)src,
+			       ret, "q", "", "=r", 8);
 		if (likely(!ret))
-			__put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); 
+			__put_user_asm(tmp, (u64 __user *)dst,
+				       ret, "q", "", "ir", 8);
 		return ret;
 	}
 	default:
-		return copy_user_generic((__force void *)dst,(__force void *)src,size); 
+		return copy_user_generic((__force void *)dst,
+					 (__force void *)src, size);
 	}
-}	
+}
 
-__must_check long 
+__must_check long
 strncpy_from_user(char *dst, const char __user *src, long count);
-__must_check long 
+__must_check long
 __strncpy_from_user(char *dst, const char __user *src, long count);
 __must_check long strnlen_user(const char __user *str, long n);
 __must_check long __strnlen_user(const char __user *str, long n);
@@ -355,7 +428,8 @@
 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
 
-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
+__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
+					    unsigned size);
 
 static __must_check __always_inline int
 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
@@ -364,15 +438,19 @@
 }
 
 #define ARCH_HAS_NOCACHE_UACCESS 1
-extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest);
+extern long __copy_user_nocache(void *dst, const void __user *src,
+				unsigned size, int zerorest);
 
-static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+static inline int __copy_from_user_nocache(void *dst, const void __user *src,
+					   unsigned size)
 {
 	might_sleep();
 	return __copy_user_nocache(dst, src, size, 1);
 }
 
-static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size)
+static inline int __copy_from_user_inatomic_nocache(void *dst,
+						    const void __user *src,
+						    unsigned size)
 {
 	return __copy_user_nocache(dst, src, size, 0);
 }
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h
index 913598d..d270ffe 100644
--- a/include/asm-x86/unaligned.h
+++ b/include/asm-x86/unaligned.h
@@ -32,6 +32,6 @@
  *
  * Note that unaligned accesses can be very expensive on some architectures.
  */
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+#define put_unaligned(val, ptr) ((void)(*(ptr) = (val)))
 
 #endif /* _ASM_X86_UNALIGNED_H */
diff --git a/include/asm-x86/unistd.h b/include/asm-x86/unistd.h
index 2a58ed3..effc7ad 100644
--- a/include/asm-x86/unistd.h
+++ b/include/asm-x86/unistd.h
@@ -1,11 +1,5 @@
 #ifdef __KERNEL__
-# ifdef CONFIG_X86_32
-#  include "unistd_32.h"
-# else
-#  include "unistd_64.h"
-# endif
-#else
-# ifdef __i386__
+# if defined(CONFIG_X86_32) || defined(__i386__)
 #  include "unistd_32.h"
 # else
 #  include "unistd_64.h"
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h
index 984123a..8317d94 100644
--- a/include/asm-x86/unistd_32.h
+++ b/include/asm-x86/unistd_32.h
@@ -81,7 +81,7 @@
 #define __NR_sigpending		 73
 #define __NR_sethostname	 74
 #define __NR_setrlimit		 75
-#define __NR_getrlimit		 76	/* Back compatible 2Gig limited rlimit */
+#define __NR_getrlimit		 76   /* Back compatible 2Gig limited rlimit */
 #define __NR_getrusage		 77
 #define __NR_gettimeofday	 78
 #define __NR_settimeofday	 79
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index 3883ceb..fe26e36 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -2,7 +2,7 @@
 #define _ASM_X86_64_UNISTD_H_
 
 #ifndef __SYSCALL
-#define __SYSCALL(a,b)
+#define __SYSCALL(a, b)
 #endif
 
 /*
diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h
index f769872..a3d9100 100644
--- a/include/asm-x86/user32.h
+++ b/include/asm-x86/user32.h
@@ -1,7 +1,8 @@
 #ifndef USER32_H
 #define USER32_H 1
 
-/* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */
+/* IA32 compatible user structures for ptrace.
+ * These should be used for 32bit coredumps too. */
 
 struct user_i387_ia32_struct {
 	u32	cwd;
@@ -42,9 +43,9 @@
 };
 
 struct user32 {
-  struct user_regs_struct32 regs;		/* Where the registers are actually stored */
+  struct user_regs_struct32 regs; /* Where the registers are actually stored */
   int u_fpvalid;		/* True if math co-processor being used. */
-                                /* for this mess. Not yet used. */
+				/* for this mess. Not yet used. */
   struct user_i387_ia32_struct i387;	/* Math Co-processor registers. */
 /* The rest of this junk is to help gdb figure out what goes where */
   __u32 u_tsize;	/* Text segment size (pages). */
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h
index 6157da6..d6e51ed 100644
--- a/include/asm-x86/user_32.h
+++ b/include/asm-x86/user_32.h
@@ -100,10 +100,10 @@
 struct user{
 /* We start with the registers, to mimic the way that "memory" is returned
    from the ptrace(3,...) function.  */
-  struct user_regs_struct regs;		/* Where the registers are actually stored */
+  struct user_regs_struct regs;	/* Where the registers are actually stored */
 /* ptrace does not yet supply these.  Someday.... */
   int u_fpvalid;		/* True if math co-processor being used. */
-                                /* for this mess. Not yet used. */
+				/* for this mess. Not yet used. */
   struct user_i387_struct i387;	/* Math Co-processor registers. */
 /* The rest of this junk is to help gdb figure out what goes where */
   unsigned long int u_tsize;	/* Text segment size (pages). */
@@ -118,7 +118,7 @@
   int reserved;			/* No longer used */
   unsigned long u_ar0;		/* Used by gdb to help find the values for */
 				/* the registers. */
-  struct user_i387_struct* u_fpstate;	/* Math Co-processor pointer. */
+  struct user_i387_struct *u_fpstate;	/* Math Co-processor pointer. */
   unsigned long magic;		/* To uniquely identify a core file */
   char u_comm[32];		/* User command that was responsible */
   int u_debugreg[8];
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h
index 9636164..6037b63 100644
--- a/include/asm-x86/user_64.h
+++ b/include/asm-x86/user_64.h
@@ -45,12 +45,13 @@
  */
 
 /* This matches the 64bit FXSAVE format as defined by AMD. It is the same
-   as the 32bit format defined by Intel, except that the selector:offset pairs for
-   data and eip are replaced with flat 64bit pointers. */
+   as the 32bit format defined by Intel, except that the selector:offset pairs
+   for data and eip are replaced with flat 64bit pointers. */
 struct user_i387_struct {
 	unsigned short	cwd;
 	unsigned short	swd;
-	unsigned short	twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
+	unsigned short	twd;	/* Note this is not the same as
+				   the 32bit/x87/FSAVE twd */
 	unsigned short	fop;
 	__u64	rip;
 	__u64	rdp;
@@ -97,13 +98,14 @@
 /* When the kernel dumps core, it starts by dumping the user struct -
    this will be used by gdb to figure out where the data and stack segments
    are within the file, and what virtual addresses to use. */
-struct user{
+
+struct user {
 /* We start with the registers, to mimic the way that "memory" is returned
    from the ptrace(3,...) function.  */
-  struct user_regs_struct regs;		/* Where the registers are actually stored */
+  struct user_regs_struct regs;	/* Where the registers are actually stored */
 /* ptrace does not yet supply these.  Someday.... */
   int u_fpvalid;		/* True if math co-processor being used. */
-                                /* for this mess. Not yet used. */
+				/* for this mess. Not yet used. */
   int pad0;
   struct user_i387_struct i387;	/* Math Co-processor registers. */
 /* The rest of this junk is to help gdb figure out what goes where */
@@ -120,7 +122,7 @@
   int pad1;
   unsigned long u_ar0;		/* Used by gdb to help find the values for */
 				/* the registers. */
-  struct user_i387_struct* u_fpstate;	/* Math Co-processor pointer. */
+  struct user_i387_struct *u_fpstate;	/* Math Co-processor pointer. */
   unsigned long magic;		/* To uniquely identify a core file */
   char u_comm[32];		/* User command that was responsible */
   unsigned long u_debugreg[8];
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h
new file mode 100644
index 0000000..26b9240
--- /dev/null
+++ b/include/asm-x86/uv/uv_hub.h
@@ -0,0 +1,284 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV architectural definitions
+ *
+ * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef __ASM_X86_UV_HUB_H__
+#define __ASM_X86_UV_HUB_H__
+
+#include <linux/numa.h>
+#include <linux/percpu.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+
+
+/*
+ * Addressing Terminology
+ *
+ * 	NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
+ * 		routers always have low bit of 1, C/MBricks have low bit
+ * 		equal to 0. Most addressing macros that target UV hub chips
+ * 		right shift the NASID by 1 to exclude the always-zero bit.
+ *
+ *	SNASID - NASID right shifted by 1 bit.
+ *
+ *
+ *  Memory/UV-HUB Processor Socket Address Format:
+ *  +--------+---------------+---------------------+
+ *  |00..0000|    SNASID     |      NodeOffset     |
+ *  +--------+---------------+---------------------+
+ *           <--- N bits --->|<--------M bits ----->
+ *
+ *	M number of node offset bits (35 .. 40)
+ *	N number of SNASID bits (0 .. 10)
+ *
+ *		Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
+ *		The actual values are configuration dependent and are set at
+ *		boot time
+ *
+ * APICID format
+ * 	NOTE!!!!!! This is the current format of the APICID. However, code
+ * 	should assume that this will change in the future. Use functions
+ * 	in this file for all APICID bit manipulations and conversion.
+ *
+ * 		1111110000000000
+ * 		5432109876543210
+ *		nnnnnnnnnnlc0cch
+ *		sssssssssss
+ *
+ *			n  = snasid bits
+ *			l =  socket number on board
+ *			c  = core
+ *			h  = hyperthread
+ *			s  = bits that are in the socket CSR
+ *
+ *	Note: Processor only supports 12 bits in the APICID register. The ACPI
+ *	      tables hold all 16 bits. Software needs to be aware of this.
+ *
+ * 	      Unless otherwise specified, all references to APICID refer to
+ * 	      the FULL value contained in ACPI tables, not the subset in the
+ * 	      processor APICID register.
+ */
+
+
+/*
+ * Maximum number of bricks in all partitions and in all coherency domains.
+ * This is the total number of bricks accessible in the numalink fabric. It
+ * includes all C & M bricks. Routers are NOT included.
+ *
+ * This value is also the value of the maximum number of non-router NASIDs
+ * in the numalink fabric.
+ *
+ * NOTE: a brick may be 1 or 2 OS nodes. Don't get these confused.
+ */
+#define UV_MAX_NUMALINK_BLADES	16384
+
+/*
+ * Maximum number of C/Mbricks within a software SSI (hardware may support
+ * more).
+ */
+#define UV_MAX_SSI_BLADES	256
+
+/*
+ * The largest possible NASID of a C or M brick (+ 2)
+ */
+#define UV_MAX_NASID_VALUE	(UV_MAX_NUMALINK_NODES * 2)
+
+/*
+ * The following defines attributes of the HUB chip. These attributes are
+ * frequently referenced and are kept in the per-cpu data areas of each cpu.
+ * They are kept together in a struct to minimize cache misses.
+ */
+struct uv_hub_info_s {
+	unsigned long	global_mmr_base;
+	unsigned short	local_nasid;
+	unsigned short	gnode_upper;
+	unsigned short	coherency_domain_number;
+	unsigned short	numa_blade_id;
+	unsigned char	blade_processor_id;
+	unsigned char	m_val;
+	unsigned char	n_val;
+};
+DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+#define uv_hub_info 		(&__get_cpu_var(__uv_hub_info))
+#define uv_cpu_hub_info(cpu)	(&per_cpu(__uv_hub_info, cpu))
+
+/*
+ * Local & Global MMR space macros.
+ * 	Note: macros are intended to be used ONLY by inline functions
+ * 	in this file - not by other kernel code.
+ */
+#define UV_SNASID(n)			((n) >> 1)
+#define UV_NASID(n)			((n) << 1)
+
+#define UV_LOCAL_MMR_BASE		0xf4000000UL
+#define UV_GLOBAL_MMR32_BASE		0xf8000000UL
+#define UV_GLOBAL_MMR64_BASE		(uv_hub_info->global_mmr_base)
+
+#define UV_GLOBAL_MMR32_SNASID_MASK	0x3ff
+#define UV_GLOBAL_MMR32_SNASID_SHIFT	15
+#define UV_GLOBAL_MMR64_SNASID_SHIFT	26
+
+#define UV_GLOBAL_MMR32_NASID_BITS(n)					\
+		(((UV_SNASID(n) & UV_GLOBAL_MMR32_SNASID_MASK)) <<	\
+		(UV_GLOBAL_MMR32_SNASID_SHIFT))
+
+#define UV_GLOBAL_MMR64_NASID_BITS(n)					\
+	((unsigned long)UV_SNASID(n) << UV_GLOBAL_MMR64_SNASID_SHIFT)
+
+#define UV_APIC_NASID_SHIFT	6
+
+/*
+ * Extract a NASID from an APICID (full apicid, not processor subset)
+ */
+static inline int uv_apicid_to_nasid(int apicid)
+{
+	return (UV_NASID(apicid >> UV_APIC_NASID_SHIFT));
+}
+
+/*
+ * Access global MMRs using the low memory MMR32 space. This region supports
+ * faster MMR access but not all MMRs are accessible in this space.
+ */
+static inline unsigned long *uv_global_mmr32_address(int nasid,
+				unsigned long offset)
+{
+	return __va(UV_GLOBAL_MMR32_BASE |
+		       UV_GLOBAL_MMR32_NASID_BITS(nasid) | offset);
+}
+
+static inline void uv_write_global_mmr32(int nasid, unsigned long offset,
+				 unsigned long val)
+{
+	*uv_global_mmr32_address(nasid, offset) = val;
+}
+
+static inline unsigned long uv_read_global_mmr32(int nasid,
+						 unsigned long offset)
+{
+	return *uv_global_mmr32_address(nasid, offset);
+}
+
+/*
+ * Access Global MMR space using the MMR space located at the top of physical
+ * memory.
+ */
+static inline unsigned long *uv_global_mmr64_address(int nasid,
+				unsigned long offset)
+{
+	return __va(UV_GLOBAL_MMR64_BASE |
+		       UV_GLOBAL_MMR64_NASID_BITS(nasid) | offset);
+}
+
+static inline void uv_write_global_mmr64(int nasid, unsigned long offset,
+				unsigned long val)
+{
+	*uv_global_mmr64_address(nasid, offset) = val;
+}
+
+static inline unsigned long uv_read_global_mmr64(int nasid,
+						 unsigned long offset)
+{
+	return *uv_global_mmr64_address(nasid, offset);
+}
+
+/*
+ * Access node local MMRs. Faster than using global space but only local MMRs
+ * are accessible.
+ */
+static inline unsigned long *uv_local_mmr_address(unsigned long offset)
+{
+	return __va(UV_LOCAL_MMR_BASE | offset);
+}
+
+static inline unsigned long uv_read_local_mmr(unsigned long offset)
+{
+	return *uv_local_mmr_address(offset);
+}
+
+static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
+{
+	*uv_local_mmr_address(offset) = val;
+}
+
+/*
+ * Structures and definitions for converting between cpu, node, and blade
+ * numbers.
+ */
+struct uv_blade_info {
+	unsigned short	nr_posible_cpus;
+	unsigned short	nr_online_cpus;
+	unsigned short	nasid;
+};
+struct uv_blade_info *uv_blade_info;
+extern short *uv_node_to_blade;
+extern short *uv_cpu_to_blade;
+extern short uv_possible_blades;
+
+/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
+static inline int uv_blade_processor_id(void)
+{
+	return uv_hub_info->blade_processor_id;
+}
+
+/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
+static inline int uv_numa_blade_id(void)
+{
+	return uv_hub_info->numa_blade_id;
+}
+
+/* Convert a cpu number to the the UV blade number */
+static inline int uv_cpu_to_blade_id(int cpu)
+{
+	return uv_cpu_to_blade[cpu];
+}
+
+/* Convert linux node number to the UV blade number */
+static inline int uv_node_to_blade_id(int nid)
+{
+	return uv_node_to_blade[nid];
+}
+
+/* Convert a blade id to the NASID of the blade */
+static inline int uv_blade_to_nasid(int bid)
+{
+	return uv_blade_info[bid].nasid;
+}
+
+/* Determine the number of possible cpus on a blade */
+static inline int uv_blade_nr_possible_cpus(int bid)
+{
+	return uv_blade_info[bid].nr_posible_cpus;
+}
+
+/* Determine the number of online cpus on a blade */
+static inline int uv_blade_nr_online_cpus(int bid)
+{
+	return uv_blade_info[bid].nr_online_cpus;
+}
+
+/* Convert a cpu id to the NASID of the blade containing the cpu */
+static inline int uv_cpu_to_nasid(int cpu)
+{
+	return uv_blade_info[uv_cpu_to_blade_id(cpu)].nasid;
+}
+
+/* Convert a node number to the NASID of the blade */
+static inline int uv_node_to_nasid(int nid)
+{
+	return uv_blade_info[uv_node_to_blade_id(nid)].nasid;
+}
+
+/* Maximum possible number of blades */
+static inline int uv_num_possible_blades(void)
+{
+	return uv_possible_blades;
+}
+
+#endif /* __ASM_X86_UV_HUB__ */
+
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h
new file mode 100644
index 0000000..3b69fe6
--- /dev/null
+++ b/include/asm-x86/uv/uv_mmrs.h
@@ -0,0 +1,373 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV MMR definitions
+ *
+ * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef __ASM_X86_UV_MMRS__
+#define __ASM_X86_UV_MMRS__
+
+/*
+ *       AUTO GENERATED - Do not edit
+ */
+
+ #define UV_MMR_ENABLE		(1UL << 63)
+
+/* ========================================================================= */
+/*                               UVH_IPI_INT                                 */
+/* ========================================================================= */
+#define UVH_IPI_INT 0x60500UL
+#define UVH_IPI_INT_32 0x0360
+
+#define UVH_IPI_INT_VECTOR_SHFT 0
+#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
+#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL
+#define UVH_IPI_INT_DESTMODE_SHFT 11
+#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_IPI_INT_APIC_ID_SHFT 16
+#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL
+#define UVH_IPI_INT_SEND_SHFT 63
+#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL
+
+union uvh_ipi_int_u {
+    unsigned long	v;
+    struct uvh_ipi_int_s {
+	unsigned long	vector_       :  8;  /* RW */
+	unsigned long	delivery_mode :  3;  /* RW */
+	unsigned long	destmode      :  1;  /* RW */
+	unsigned long	rsvd_12_15    :  4;  /*    */
+	unsigned long	apic_id       : 32;  /* RW */
+	unsigned long	rsvd_48_62    : 15;  /*    */
+	unsigned long	send          :  1;  /* WP */
+    } s;
+};
+
+/* ========================================================================= */
+/*                   UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST                     */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009f0
+
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
+
+union uvh_lb_bau_intd_payload_queue_first_u {
+    unsigned long	v;
+    struct uvh_lb_bau_intd_payload_queue_first_s {
+	unsigned long	rsvd_0_3:  4;  /*    */
+	unsigned long	address : 39;  /* RW */
+	unsigned long	rsvd_43_48:  6;  /*    */
+	unsigned long	node_id : 14;  /* RW */
+	unsigned long	rsvd_63 :  1;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                    UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST                     */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009f8
+
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
+
+union uvh_lb_bau_intd_payload_queue_last_u {
+    unsigned long	v;
+    struct uvh_lb_bau_intd_payload_queue_last_s {
+	unsigned long	rsvd_0_3:  4;  /*    */
+	unsigned long	address : 39;  /* RW */
+	unsigned long	rsvd_43_63: 21;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                    UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL                     */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x00a00
+
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
+
+union uvh_lb_bau_intd_payload_queue_tail_u {
+    unsigned long	v;
+    struct uvh_lb_bau_intd_payload_queue_tail_s {
+	unsigned long	rsvd_0_3:  4;  /*    */
+	unsigned long	address : 39;  /* RW */
+	unsigned long	rsvd_43_63: 21;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                   UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE                    */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
+union uvh_lb_bau_intd_software_acknowledge_u {
+    unsigned long	v;
+    struct uvh_lb_bau_intd_software_acknowledge_s {
+	unsigned long	pending_0 :  1;  /* RW, W1C */
+	unsigned long	pending_1 :  1;  /* RW, W1C */
+	unsigned long	pending_2 :  1;  /* RW, W1C */
+	unsigned long	pending_3 :  1;  /* RW, W1C */
+	unsigned long	pending_4 :  1;  /* RW, W1C */
+	unsigned long	pending_5 :  1;  /* RW, W1C */
+	unsigned long	pending_6 :  1;  /* RW, W1C */
+	unsigned long	pending_7 :  1;  /* RW, W1C */
+	unsigned long	timeout_0 :  1;  /* RW, W1C */
+	unsigned long	timeout_1 :  1;  /* RW, W1C */
+	unsigned long	timeout_2 :  1;  /* RW, W1C */
+	unsigned long	timeout_3 :  1;  /* RW, W1C */
+	unsigned long	timeout_4 :  1;  /* RW, W1C */
+	unsigned long	timeout_5 :  1;  /* RW, W1C */
+	unsigned long	timeout_6 :  1;  /* RW, W1C */
+	unsigned long	timeout_7 :  1;  /* RW, W1C */
+	unsigned long	rsvd_16_63: 48;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS                 */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
+
+/* ========================================================================= */
+/*                     UVH_LB_BAU_SB_ACTIVATION_CONTROL                      */
+/* ========================================================================= */
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009d8
+
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL
+
+union uvh_lb_bau_sb_activation_control_u {
+    unsigned long	v;
+    struct uvh_lb_bau_sb_activation_control_s {
+	unsigned long	index :  6;  /* RW */
+	unsigned long	rsvd_6_61: 56;  /*    */
+	unsigned long	push  :  1;  /* WP */
+	unsigned long	init  :  1;  /* WP */
+    } s;
+};
+
+/* ========================================================================= */
+/*                    UVH_LB_BAU_SB_ACTIVATION_STATUS_0                      */
+/* ========================================================================= */
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009e0
+
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
+
+union uvh_lb_bau_sb_activation_status_0_u {
+    unsigned long	v;
+    struct uvh_lb_bau_sb_activation_status_0_s {
+	unsigned long	status : 64;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                    UVH_LB_BAU_SB_ACTIVATION_STATUS_1                      */
+/* ========================================================================= */
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009e8
+
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
+
+union uvh_lb_bau_sb_activation_status_1_u {
+    unsigned long	v;
+    struct uvh_lb_bau_sb_activation_status_1_s {
+	unsigned long	status : 64;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                      UVH_LB_BAU_SB_DESCRIPTOR_BASE                        */
+/* ========================================================================= */
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009d0
+
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+
+union uvh_lb_bau_sb_descriptor_base_u {
+    unsigned long	v;
+    struct uvh_lb_bau_sb_descriptor_base_s {
+	unsigned long	rsvd_0_11    : 12;  /*    */
+	unsigned long	page_address : 31;  /* RW */
+	unsigned long	rsvd_43_48   :  6;  /*    */
+	unsigned long	node_id      : 14;  /* RW */
+	unsigned long	rsvd_63      :  1;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                               UVH_NODE_ID                                 */
+/* ========================================================================= */
+#define UVH_NODE_ID 0x0UL
+
+#define UVH_NODE_ID_FORCE1_SHFT 0
+#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVH_NODE_ID_MANUFACTURER_SHFT 1
+#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVH_NODE_ID_PART_NUMBER_SHFT 12
+#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVH_NODE_ID_REVISION_SHFT 28
+#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UVH_NODE_ID_NODE_ID_SHFT 32
+#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48
+#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
+#define UVH_NODE_ID_NI_PORT_SHFT 56
+#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+union uvh_node_id_u {
+    unsigned long	v;
+    struct uvh_node_id_s {
+	unsigned long	force1        :  1;  /* RO */
+	unsigned long	manufacturer  : 11;  /* RO */
+	unsigned long	part_number   : 16;  /* RO */
+	unsigned long	revision      :  4;  /* RO */
+	unsigned long	node_id       : 15;  /* RW */
+	unsigned long	rsvd_47       :  1;  /*    */
+	unsigned long	nodes_per_bit :  7;  /* RW */
+	unsigned long	rsvd_55       :  1;  /*    */
+	unsigned long	ni_port       :  4;  /* RO */
+	unsigned long	rsvd_60_63    :  4;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                    UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR                      */
+/* ========================================================================= */
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_gru_overlay_config_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_gru_overlay_config_mmr_s {
+	unsigned long	rsvd_0_27: 28;  /*    */
+	unsigned long	base   : 18;  /* RW */
+	unsigned long	gr4    :  1;  /* RW */
+	unsigned long	rsvd_47_51:  5;  /*    */
+	unsigned long	n_gru  :  4;  /* RW */
+	unsigned long	rsvd_56_62:  7;  /*    */
+	unsigned long	enable :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                    UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR                      */
+/* ========================================================================= */
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_mmr_overlay_config_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_mmr_overlay_config_mmr_s {
+	unsigned long	rsvd_0_25: 26;  /*    */
+	unsigned long	base     : 20;  /* RW */
+	unsigned long	dual_hub :  1;  /* RW */
+	unsigned long	rsvd_47_62: 16;  /*    */
+	unsigned long	enable   :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                                 UVH_RTC                                   */
+/* ========================================================================= */
+#define UVH_RTC 0x28000UL
+
+#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
+#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
+
+union uvh_rtc_u {
+    unsigned long	v;
+    struct uvh_rtc_s {
+	unsigned long	real_time_clock : 56;  /* RW */
+	unsigned long	rsvd_56_63      :  8;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                          UVH_SI_ADDR_MAP_CONFIG                           */
+/* ========================================================================= */
+#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
+
+#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
+#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
+#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
+
+union uvh_si_addr_map_config_u {
+    unsigned long	v;
+    struct uvh_si_addr_map_config_s {
+	unsigned long	m_skt :  6;  /* RW */
+	unsigned long	rsvd_6_7:  2;  /*    */
+	unsigned long	n_skt :  4;  /* RW */
+	unsigned long	rsvd_12_63: 52;  /*    */
+    } s;
+};
+
+
+#endif /* __ASM_X86_UV_MMRS__ */
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h
index 629bcb6..86e085e 100644
--- a/include/asm-x86/vdso.h
+++ b/include/asm-x86/vdso.h
@@ -8,9 +8,11 @@
  * Given a pointer to the vDSO image, find the pointer to VDSO64_name
  * as that symbol is defined in the vDSO sources or linker script.
  */
-#define VDSO64_SYMBOL(base, name) ({		\
-	extern const char VDSO64_##name[];	\
-	(void *) (VDSO64_##name - VDSO64_PRELINK + (unsigned long) (base)); })
+#define VDSO64_SYMBOL(base, name)					\
+({									\
+	extern const char VDSO64_##name[];				\
+	(void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \
+})
 #endif
 
 #if defined CONFIG_X86_32 || defined CONFIG_COMPAT
@@ -20,9 +22,18 @@
  * Given a pointer to the vDSO image, find the pointer to VDSO32_name
  * as that symbol is defined in the vDSO sources or linker script.
  */
-#define VDSO32_SYMBOL(base, name) ({		\
-	extern const char VDSO32_##name[];	\
-	(void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); })
+#define VDSO32_SYMBOL(base, name)					\
+({									\
+	extern const char VDSO32_##name[];				\
+	(void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
+})
 #endif
 
+/*
+ * These symbols are defined with the addresses in the vsyscall page.
+ * See vsyscall-sigreturn.S.
+ */
+extern void __user __kernel_sigreturn;
+extern void __user __kernel_rt_sigreturn;
+
 #endif	/* asm-x86/vdso.h */
diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h
index 0ecf68a..0ccf804 100644
--- a/include/asm-x86/vga.h
+++ b/include/asm-x86/vga.h
@@ -12,9 +12,9 @@
  *	access the videoram directly without any black magic.
  */
 
-#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
+#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
 
 #define vga_readb(x) (*(x))
-#define vga_writeb(x,y) (*(y) = (x))
+#define vga_writeb(x, y) (*(y) = (x))
 
 #endif
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
index c92fe4a..074b357 100644
--- a/include/asm-x86/vm86.h
+++ b/include/asm-x86/vm86.h
@@ -12,19 +12,13 @@
  * Linus
  */
 
-#define TF_MASK		0x00000100
-#define IF_MASK		0x00000200
-#define IOPL_MASK	0x00003000
-#define NT_MASK		0x00004000
+#include <asm/processor-flags.h>
+
 #ifdef CONFIG_VM86
-#define VM_MASK		0x00020000
+#define X86_VM_MASK	X86_EFLAGS_VM
 #else
-#define VM_MASK		0 /* ignored */
+#define X86_VM_MASK	0 /* No VM86 support */
 #endif
-#define AC_MASK		0x00040000
-#define VIF_MASK	0x00080000	/* virtual interrupt flag */
-#define VIP_MASK	0x00100000	/* virtual interrupt pending */
-#define ID_MASK		0x00200000
 
 #define BIOSSEG		0x0f000
 
@@ -42,9 +36,11 @@
 #define VM86_ARG(retval)	((retval) >> 8)
 
 #define VM86_SIGNAL	0	/* return due to signal */
-#define VM86_UNKNOWN	1	/* unhandled GP fault - IO-instruction or similar */
+#define VM86_UNKNOWN	1	/* unhandled GP fault
+				   - IO-instruction or similar */
 #define VM86_INTx	2	/* int3/int x instruction (ARG = x) */
-#define VM86_STI	3	/* sti/popf/iret instruction enabled virtual interrupts */
+#define VM86_STI	3	/* sti/popf/iret instruction enabled
+				   virtual interrupts */
 
 /*
  * Additional return values when invoking new vm86()
@@ -205,7 +201,8 @@
 #define handle_vm86_fault(a, b)
 #define release_vm86_irqs(a)
 
-static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) {
+static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
+{
 	return 0;
 }
 
diff --git a/include/asm-x86/vmi.h b/include/asm-x86/vmi.h
index eb8bd89..b7c0dea 100644
--- a/include/asm-x86/vmi.h
+++ b/include/asm-x86/vmi.h
@@ -155,9 +155,9 @@
 
 #ifndef __ASSEMBLY__
 struct vmi_relocation_info {
-        unsigned char           *eip;
-        unsigned char           type;
-        unsigned char           reserved[3];
+	unsigned char           *eip;
+	unsigned char           type;
+	unsigned char           reserved[3];
 };
 #endif
 
@@ -173,53 +173,53 @@
 #ifndef __ASSEMBLY__
 
 struct vrom_header {
-	u16     rom_signature;  // option ROM signature
-	u8      rom_length;     // ROM length in 512 byte chunks
-	u8      rom_entry[4];   // 16-bit code entry point
-	u8      rom_pad0;       // 4-byte align pad
-	u32     vrom_signature; // VROM identification signature
-	u8      api_version_min;// Minor version of API
-	u8      api_version_maj;// Major version of API
-	u8      jump_slots;     // Number of jump slots
-	u8      reserved1;      // Reserved for expansion
-	u32     virtual_top;    // Hypervisor virtual address start
-	u16     reserved2;      // Reserved for expansion
-	u16	license_offs;	// Offset to License string
-	u16     pci_header_offs;// Offset to PCI OPROM header
-	u16     pnp_header_offs;// Offset to PnP OPROM header
-	u32     rom_pad3;       // PnP reserverd / VMI reserved
-	u8      reserved[96];   // Reserved for headers
-	char    vmi_init[8];    // VMI_Init jump point
-	char    get_reloc[8];   // VMI_GetRelocationInfo jump point
+	u16     rom_signature;  /* option ROM signature */
+	u8      rom_length;     /* ROM length in 512 byte chunks */
+	u8      rom_entry[4];   /* 16-bit code entry point */
+	u8      rom_pad0;       /* 4-byte align pad */
+	u32     vrom_signature; /* VROM identification signature */
+	u8      api_version_min;/* Minor version of API */
+	u8      api_version_maj;/* Major version of API */
+	u8      jump_slots;     /* Number of jump slots */
+	u8      reserved1;      /* Reserved for expansion */
+	u32     virtual_top;    /* Hypervisor virtual address start */
+	u16     reserved2;      /* Reserved for expansion */
+	u16	license_offs;	/* Offset to License string */
+	u16     pci_header_offs;/* Offset to PCI OPROM header */
+	u16     pnp_header_offs;/* Offset to PnP OPROM header */
+	u32     rom_pad3;       /* PnP reserverd / VMI reserved */
+	u8      reserved[96];   /* Reserved for headers */
+	char    vmi_init[8];    /* VMI_Init jump point */
+	char    get_reloc[8];   /* VMI_GetRelocationInfo jump point */
 } __attribute__((packed));
 
 struct pnp_header {
-        char sig[4];
-        char rev;
-        char size;
-        short next;
-        short res;
-        long devID;
-        unsigned short manufacturer_offset;
-        unsigned short product_offset;
+	char sig[4];
+	char rev;
+	char size;
+	short next;
+	short res;
+	long devID;
+	unsigned short manufacturer_offset;
+	unsigned short product_offset;
 } __attribute__((packed));
 
 struct pci_header {
-        char sig[4];
-        short vendorID;
-        short deviceID;
-        short vpdData;
-        short size;
-        char rev;
-        char class;
-        char subclass;
-        char interface;
-        short chunks;
-        char rom_version_min;
-        char rom_version_maj;
-        char codetype;
-        char lastRom;
-        short reserved;
+	char sig[4];
+	short vendorID;
+	short deviceID;
+	short vpdData;
+	short size;
+	char rev;
+	char class;
+	char subclass;
+	char interface;
+	short chunks;
+	char rom_version_min;
+	char rom_version_maj;
+	char codetype;
+	char lastRom;
+	short reserved;
 } __attribute__((packed));
 
 /* Function prototypes for bootstrapping */
diff --git a/include/asm-x86/voyager.h b/include/asm-x86/voyager.h
index 91a9932..9c811d2 100644
--- a/include/asm-x86/voyager.h
+++ b/include/asm-x86/voyager.h
@@ -91,8 +91,7 @@
 #define VOYAGER_WRITE_CONFIG			0x2
 #define VOYAGER_BYPASS				0xff
 
-typedef struct voyager_asic 
-{
+typedef struct voyager_asic {
 	__u8	asic_addr;	/* ASIC address; Level 4 */
 	__u8	asic_type;      /* ASIC type */
 	__u8	asic_id;	/* ASIC id */
@@ -113,7 +112,7 @@
 	__u16   largest_reg;		/* Largest register in the scan path */
 	__u16   smallest_reg;		/* Smallest register in the scan path */
 	voyager_asic_t   *asic;		/* First ASIC in scan path (CAT_I) */
-	struct   voyager_module *submodule;	/* Submodule pointer */ 
+	struct   voyager_module *submodule;	/* Submodule pointer */
 	struct   voyager_module *next;		/* Next module in linked list */
 } voyager_module_t;
 
@@ -135,7 +134,7 @@
 	 __u16 cct_offset;
 	 __u16 log_length;	/* length of err log */
 	 __u16 xsum_end;	/* offset to end of
-							   checksum */
+				   checksum */
 	 __u8  reserved[4];
 	 __u8  sflag;		/* starting sentinal */
 	 __u8  part_number[13];	/* prom part number */
@@ -148,7 +147,8 @@
 
 
 
-#define VOYAGER_EPROM_SIZE_OFFSET   ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
+#define VOYAGER_EPROM_SIZE_OFFSET				\
+	((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
 #define VOYAGER_XSUM_END_OFFSET		0x2a
 
 /* the following three definitions are for internal table layouts
@@ -199,7 +199,7 @@
 #define VOYAGER_WCBIC_TOM_L	0x4
 #define VOYAGER_WCBIC_TOM_H	0x5
 
-/* register defines for Voyager Memory Contol (VMC) 
+/* register defines for Voyager Memory Contol (VMC)
  * these are present on L4 machines only */
 #define	VOYAGER_VMC1		0x81
 #define VOYAGER_VMC2		0x91
@@ -334,7 +334,7 @@
 
 struct QuadDescription {
 	__u8  Type;	/* for type 0 (DYADIC or MONADIC) all fields
-                         * will be zero except for slot */
+			 * will be zero except for slot */
 	__u8 StructureVersion;
 	__u32 CPI_BaseAddress;
 	__u32  LARC_BankSize;
@@ -342,7 +342,7 @@
 	__u8  Slot; /* Processor slots 1 - 4 */
 } __attribute__((packed));
 
-struct ProcBoardInfo { 
+struct ProcBoardInfo {
 	__u8 Type;
 	__u8 StructureVersion;
 	__u8 NumberOfBoards;
@@ -382,19 +382,30 @@
  * packed in it by our friend the compiler.
  */
 typedef struct {
-	__u8	Mailbox_SUS;		/* Written to by SUS to give commands/response to the OS */
-	__u8	Mailbox_OS;		/* Written to by the OS to give commands/response to SUS */
-	__u8	SUS_MailboxVersion;	/* Tells the OS which iteration of the interface SUS supports */
-	__u8	OS_MailboxVersion;	/* Tells SUS which iteration of the interface the OS supports */
-	__u32	OS_Flags;		/* Flags set by the OS as info for SUS */
-	__u32	SUS_Flags;		/* Flags set by SUS as info for the OS */
-	__u32	WatchDogPeriod;		/* Watchdog period (in seconds) which the DP uses to see if the OS is dead */
+	__u8	Mailbox_SUS;		/* Written to by SUS to give
+					   commands/response to the OS */
+	__u8	Mailbox_OS;		/* Written to by the OS to give
+					   commands/response to SUS */
+	__u8	SUS_MailboxVersion;	/* Tells the OS which iteration of the
+					   interface SUS supports */
+	__u8	OS_MailboxVersion;	/* Tells SUS which iteration of the
+					   interface the OS supports */
+	__u32	OS_Flags;		/* Flags set by the OS as info for
+					   SUS */
+	__u32	SUS_Flags;		/* Flags set by SUS as info
+					   for the OS */
+	__u32	WatchDogPeriod;		/* Watchdog period (in seconds) which
+					   the DP uses to see if the OS
+					   is dead */
 	__u32	WatchDogCount;		/* Updated by the OS on every tic. */
-	__u32	MemoryFor_SUS_ErrorLog;	/* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */
-	MC_SlotInformation_t  MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS];	/* Storage for MCA POS data */
+	__u32	MemoryFor_SUS_ErrorLog;	/* Flat 32 bit address which tells SUS
+					   where to stuff the SUS error log
+					   on a dump */
+	MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS];
+					/* Storage for MCA POS data */
 	/* All new SECOND_PASS_INTERFACE fields added from this point */
-        struct ProcBoardInfo    *BoardData;
-        struct CPU_Info         *CPU_Data;
+	struct ProcBoardInfo    *BoardData;
+	struct CPU_Info         *CPU_Data;
 	/* All new fields must be added from this point */
 } Voyager_KernelSUS_Mbox_t;
 
@@ -478,7 +489,7 @@
 	__u32	SUS_errorlog;
 	/* lots of system configuration stuff under here */
 };
-	
+
 /* Variables exported by voyager_smp */
 extern __u32 voyager_extended_vic_processors;
 extern __u32 voyager_allowed_boot_processors;
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h
index a41ef1b..067b5c1 100644
--- a/include/asm-x86/xor_32.h
+++ b/include/asm-x86/xor_32.h
@@ -16,12 +16,12 @@
  * Copyright (C) 1998 Ingo Molnar.
  */
 
-#define LD(x,y)		"       movq   8*("#x")(%1), %%mm"#y"   ;\n"
-#define ST(x,y)		"       movq %%mm"#y",   8*("#x")(%1)   ;\n"
-#define XO1(x,y)	"       pxor   8*("#x")(%2), %%mm"#y"   ;\n"
-#define XO2(x,y)	"       pxor   8*("#x")(%3), %%mm"#y"   ;\n"
-#define XO3(x,y)	"       pxor   8*("#x")(%4), %%mm"#y"   ;\n"
-#define XO4(x,y)	"       pxor   8*("#x")(%5), %%mm"#y"   ;\n"
+#define LD(x, y)	"       movq   8*("#x")(%1), %%mm"#y"   ;\n"
+#define ST(x, y)	"       movq %%mm"#y",   8*("#x")(%1)   ;\n"
+#define XO1(x, y)	"       pxor   8*("#x")(%2), %%mm"#y"   ;\n"
+#define XO2(x, y)	"       pxor   8*("#x")(%3), %%mm"#y"   ;\n"
+#define XO3(x, y)	"       pxor   8*("#x")(%4), %%mm"#y"   ;\n"
+#define XO4(x, y)	"       pxor   8*("#x")(%5), %%mm"#y"   ;\n"
 
 #include <asm/i387.h>
 
@@ -32,24 +32,24 @@
 
 	kernel_fpu_begin();
 
-	__asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
-#define BLOCK(i) \
-	LD(i,0)					\
-		LD(i+1,1)			\
-			LD(i+2,2)		\
-				LD(i+3,3)	\
-	XO1(i,0)				\
-	ST(i,0)					\
-		XO1(i+1,1)			\
-		ST(i+1,1)			\
-			XO1(i+2,2)		\
-			ST(i+2,2)		\
-				XO1(i+3,3)	\
-				ST(i+3,3)
+#define BLOCK(i)				\
+	LD(i, 0)				\
+		LD(i + 1, 1)			\
+			LD(i + 2, 2)		\
+				LD(i + 3, 3)	\
+	XO1(i, 0)				\
+	ST(i, 0)				\
+		XO1(i+1, 1)			\
+		ST(i+1, 1)			\
+			XO1(i + 2, 2)		\
+			ST(i + 2, 2)		\
+				XO1(i + 3, 3)	\
+				ST(i + 3, 3)
 
 	" .align 32			;\n"
-  	" 1:                            ;\n"
+	" 1:                            ;\n"
 
 	BLOCK(0)
 	BLOCK(4)
@@ -76,25 +76,25 @@
 
 	kernel_fpu_begin();
 
-	__asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
-#define BLOCK(i) \
-	LD(i,0)					\
-		LD(i+1,1)			\
-			LD(i+2,2)		\
-				LD(i+3,3)	\
-	XO1(i,0)				\
-		XO1(i+1,1)			\
-			XO1(i+2,2)		\
-				XO1(i+3,3)	\
-	XO2(i,0)				\
-	ST(i,0)					\
-		XO2(i+1,1)			\
-		ST(i+1,1)			\
-			XO2(i+2,2)		\
-			ST(i+2,2)		\
-				XO2(i+3,3)	\
-				ST(i+3,3)
+#define BLOCK(i)				\
+	LD(i, 0)				\
+		LD(i + 1, 1)			\
+			LD(i + 2, 2)		\
+				LD(i + 3, 3)	\
+	XO1(i, 0)				\
+		XO1(i + 1, 1)			\
+			XO1(i + 2, 2)		\
+				XO1(i + 3, 3)	\
+	XO2(i, 0)				\
+	ST(i, 0)				\
+		XO2(i + 1, 1)			\
+		ST(i + 1, 1)			\
+			XO2(i + 2, 2)		\
+			ST(i + 2, 2)		\
+				XO2(i + 3, 3)	\
+				ST(i + 3, 3)
 
 	" .align 32			;\n"
 	" 1:                            ;\n"
@@ -125,29 +125,29 @@
 
 	kernel_fpu_begin();
 
-	__asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
-#define BLOCK(i) \
-	LD(i,0)					\
-		LD(i+1,1)			\
-			LD(i+2,2)		\
-				LD(i+3,3)	\
-	XO1(i,0)				\
-		XO1(i+1,1)			\
-			XO1(i+2,2)		\
-				XO1(i+3,3)	\
-	XO2(i,0)				\
-		XO2(i+1,1)			\
-			XO2(i+2,2)		\
-				XO2(i+3,3)	\
-	XO3(i,0)				\
-	ST(i,0)					\
-		XO3(i+1,1)			\
-		ST(i+1,1)			\
-			XO3(i+2,2)		\
-			ST(i+2,2)		\
-				XO3(i+3,3)	\
-				ST(i+3,3)
+#define BLOCK(i)				\
+	LD(i, 0)				\
+		LD(i + 1, 1)			\
+			LD(i + 2, 2)		\
+				LD(i + 3, 3)	\
+	XO1(i, 0)				\
+		XO1(i + 1, 1)			\
+			XO1(i + 2, 2)		\
+				XO1(i + 3, 3)	\
+	XO2(i, 0)				\
+		XO2(i + 1, 1)			\
+			XO2(i + 2, 2)		\
+				XO2(i + 3, 3)	\
+	XO3(i, 0)				\
+	ST(i, 0)				\
+		XO3(i + 1, 1)			\
+		ST(i + 1, 1)			\
+			XO3(i + 2, 2)		\
+			ST(i + 2, 2)		\
+				XO3(i + 3, 3)	\
+				ST(i + 3, 3)
 
 	" .align 32			;\n"
 	" 1:                            ;\n"
@@ -186,35 +186,35 @@
 	   because we modify p4 and p5 there, but we can't mark them
 	   as read/write, otherwise we'd overflow the 10-asm-operands
 	   limit of GCC < 3.1.  */
-	__asm__ ("" : "+r" (p4), "+r" (p5));
+	asm("" : "+r" (p4), "+r" (p5));
 
-	__asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
-#define BLOCK(i) \
-	LD(i,0)					\
-		LD(i+1,1)			\
-			LD(i+2,2)		\
-				LD(i+3,3)	\
-	XO1(i,0)				\
-		XO1(i+1,1)			\
-			XO1(i+2,2)		\
-				XO1(i+3,3)	\
-	XO2(i,0)				\
-		XO2(i+1,1)			\
-			XO2(i+2,2)		\
-				XO2(i+3,3)	\
-	XO3(i,0)				\
-		XO3(i+1,1)			\
-			XO3(i+2,2)		\
-				XO3(i+3,3)	\
-	XO4(i,0)				\
-	ST(i,0)					\
-		XO4(i+1,1)			\
-		ST(i+1,1)			\
-			XO4(i+2,2)		\
-			ST(i+2,2)		\
-				XO4(i+3,3)	\
-				ST(i+3,3)
+#define BLOCK(i)				\
+	LD(i, 0)				\
+		LD(i + 1, 1)			\
+			LD(i + 2, 2)		\
+				LD(i + 3, 3)	\
+	XO1(i, 0)				\
+		XO1(i + 1, 1)			\
+			XO1(i + 2, 2)		\
+				XO1(i + 3, 3)	\
+	XO2(i, 0)				\
+		XO2(i + 1, 1)			\
+			XO2(i + 2, 2)		\
+				XO2(i + 3, 3)	\
+	XO3(i, 0)				\
+		XO3(i + 1, 1)			\
+			XO3(i + 2, 2)		\
+				XO3(i + 3, 3)	\
+	XO4(i, 0)				\
+	ST(i, 0)				\
+		XO4(i + 1, 1)			\
+		ST(i + 1, 1)			\
+			XO4(i + 2, 2)		\
+			ST(i + 2, 2)		\
+				XO4(i + 3, 3)	\
+				ST(i + 3, 3)
 
 	" .align 32			;\n"
 	" 1:                            ;\n"
@@ -233,13 +233,13 @@
 	"       jnz 1b                ;\n"
 	: "+r" (lines),
 	  "+r" (p1), "+r" (p2), "+r" (p3)
-	: "r" (p4), "r" (p5) 
+	: "r" (p4), "r" (p5)
 	: "memory");
 
 	/* p4 and p5 were modified, and now the variables are dead.
 	   Clobber them just to be sure nobody does something stupid
 	   like assuming they have some legal value.  */
-	__asm__ ("" : "=r" (p4), "=r" (p5));
+	asm("" : "=r" (p4), "=r" (p5));
 
 	kernel_fpu_end();
 }
@@ -259,7 +259,7 @@
 
 	kernel_fpu_begin();
 
-	__asm__ __volatile__ (
+	asm volatile(
 	" .align 32	             ;\n"
 	" 1:                         ;\n"
 	"       movq   (%1), %%mm0   ;\n"
@@ -286,7 +286,7 @@
 	"       pxor 56(%2), %%mm7   ;\n"
 	"       movq %%mm6, 48(%1)   ;\n"
 	"       movq %%mm7, 56(%1)   ;\n"
-	
+
 	"       addl $64, %1         ;\n"
 	"       addl $64, %2         ;\n"
 	"       decl %0              ;\n"
@@ -307,7 +307,7 @@
 
 	kernel_fpu_begin();
 
-	__asm__ __volatile__ (
+	asm volatile(
 	" .align 32,0x90             ;\n"
 	" 1:                         ;\n"
 	"       movq   (%1), %%mm0   ;\n"
@@ -342,7 +342,7 @@
 	"       pxor 56(%3), %%mm7   ;\n"
 	"       movq %%mm6, 48(%1)   ;\n"
 	"       movq %%mm7, 56(%1)   ;\n"
-      
+
 	"       addl $64, %1         ;\n"
 	"       addl $64, %2         ;\n"
 	"       addl $64, %3         ;\n"
@@ -364,7 +364,7 @@
 
 	kernel_fpu_begin();
 
-	__asm__ __volatile__ (
+	asm volatile(
 	" .align 32,0x90             ;\n"
 	" 1:                         ;\n"
 	"       movq   (%1), %%mm0   ;\n"
@@ -407,7 +407,7 @@
 	"       pxor 56(%4), %%mm7   ;\n"
 	"       movq %%mm6, 48(%1)   ;\n"
 	"       movq %%mm7, 56(%1)   ;\n"
-      
+
 	"       addl $64, %1         ;\n"
 	"       addl $64, %2         ;\n"
 	"       addl $64, %3         ;\n"
@@ -436,9 +436,9 @@
 	   because we modify p4 and p5 there, but we can't mark them
 	   as read/write, otherwise we'd overflow the 10-asm-operands
 	   limit of GCC < 3.1.  */
-	__asm__ ("" : "+r" (p4), "+r" (p5));
+	asm("" : "+r" (p4), "+r" (p5));
 
-	__asm__ __volatile__ (
+	asm volatile(
 	" .align 32,0x90             ;\n"
 	" 1:                         ;\n"
 	"       movq   (%1), %%mm0   ;\n"
@@ -489,7 +489,7 @@
 	"       pxor 56(%5), %%mm7   ;\n"
 	"       movq %%mm6, 48(%1)   ;\n"
 	"       movq %%mm7, 56(%1)   ;\n"
-      
+
 	"       addl $64, %1         ;\n"
 	"       addl $64, %2         ;\n"
 	"       addl $64, %3         ;\n"
@@ -505,7 +505,7 @@
 	/* p4 and p5 were modified, and now the variables are dead.
 	   Clobber them just to be sure nobody does something stupid
 	   like assuming they have some legal value.  */
-	__asm__ ("" : "=r" (p4), "=r" (p5));
+	asm("" : "=r" (p4), "=r" (p5));
 
 	kernel_fpu_end();
 }
@@ -531,11 +531,12 @@
  * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
  */
 
-#define XMMS_SAVE do {				\
+#define XMMS_SAVE				\
+do {						\
 	preempt_disable();			\
 	cr0 = read_cr0();			\
 	clts();					\
-	__asm__ __volatile__ ( 			\
+	asm volatile(				\
 		"movups %%xmm0,(%0)	;\n\t"	\
 		"movups %%xmm1,0x10(%0)	;\n\t"	\
 		"movups %%xmm2,0x20(%0)	;\n\t"	\
@@ -543,10 +544,11 @@
 		:				\
 		: "r" (xmm_save) 		\
 		: "memory");			\
-} while(0)
+} while (0)
 
-#define XMMS_RESTORE do {			\
-	__asm__ __volatile__ ( 			\
+#define XMMS_RESTORE				\
+do {						\
+	asm volatile(				\
 		"sfence			;\n\t"	\
 		"movups (%0),%%xmm0	;\n\t"	\
 		"movups 0x10(%0),%%xmm1	;\n\t"	\
@@ -557,76 +559,76 @@
 		: "memory");			\
 	write_cr0(cr0);				\
 	preempt_enable();			\
-} while(0)
+} while (0)
 
 #define ALIGN16 __attribute__((aligned(16)))
 
 #define OFFS(x)		"16*("#x")"
 #define PF_OFFS(x)	"256+16*("#x")"
 #define	PF0(x)		"	prefetchnta "PF_OFFS(x)"(%1)		;\n"
-#define LD(x,y)		"       movaps   "OFFS(x)"(%1), %%xmm"#y"	;\n"
-#define ST(x,y)		"       movaps %%xmm"#y",   "OFFS(x)"(%1)	;\n"
+#define LD(x, y)	"       movaps   "OFFS(x)"(%1), %%xmm"#y"	;\n"
+#define ST(x, y)	"       movaps %%xmm"#y",   "OFFS(x)"(%1)	;\n"
 #define PF1(x)		"	prefetchnta "PF_OFFS(x)"(%2)		;\n"
 #define PF2(x)		"	prefetchnta "PF_OFFS(x)"(%3)		;\n"
 #define PF3(x)		"	prefetchnta "PF_OFFS(x)"(%4)		;\n"
 #define PF4(x)		"	prefetchnta "PF_OFFS(x)"(%5)		;\n"
 #define PF5(x)		"	prefetchnta "PF_OFFS(x)"(%6)		;\n"
-#define XO1(x,y)	"       xorps   "OFFS(x)"(%2), %%xmm"#y"	;\n"
-#define XO2(x,y)	"       xorps   "OFFS(x)"(%3), %%xmm"#y"	;\n"
-#define XO3(x,y)	"       xorps   "OFFS(x)"(%4), %%xmm"#y"	;\n"
-#define XO4(x,y)	"       xorps   "OFFS(x)"(%5), %%xmm"#y"	;\n"
-#define XO5(x,y)	"       xorps   "OFFS(x)"(%6), %%xmm"#y"	;\n"
+#define XO1(x, y)	"       xorps   "OFFS(x)"(%2), %%xmm"#y"	;\n"
+#define XO2(x, y)	"       xorps   "OFFS(x)"(%3), %%xmm"#y"	;\n"
+#define XO3(x, y)	"       xorps   "OFFS(x)"(%4), %%xmm"#y"	;\n"
+#define XO4(x, y)	"       xorps   "OFFS(x)"(%5), %%xmm"#y"	;\n"
+#define XO5(x, y)	"       xorps   "OFFS(x)"(%6), %%xmm"#y"	;\n"
 
 
 static void
 xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
 {
-        unsigned long lines = bytes >> 8;
+	unsigned long lines = bytes >> 8;
 	char xmm_save[16*4] ALIGN16;
 	int cr0;
 
 	XMMS_SAVE;
 
-        __asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
-#define BLOCK(i) \
-		LD(i,0)					\
-			LD(i+1,1)			\
+#define BLOCK(i)					\
+		LD(i, 0)				\
+			LD(i + 1, 1)			\
 		PF1(i)					\
-				PF1(i+2)		\
-				LD(i+2,2)		\
-					LD(i+3,3)	\
-		PF0(i+4)				\
-				PF0(i+6)		\
-		XO1(i,0)				\
-			XO1(i+1,1)			\
-				XO1(i+2,2)		\
-					XO1(i+3,3)	\
-		ST(i,0)					\
-			ST(i+1,1)			\
-				ST(i+2,2)		\
-					ST(i+3,3)	\
+				PF1(i + 2)		\
+				LD(i + 2, 2)		\
+					LD(i + 3, 3)	\
+		PF0(i + 4)				\
+				PF0(i + 6)		\
+		XO1(i, 0)				\
+			XO1(i + 1, 1)			\
+				XO1(i + 2, 2)		\
+					XO1(i + 3, 3)	\
+		ST(i, 0)				\
+			ST(i + 1, 1)			\
+				ST(i + 2, 2)		\
+					ST(i + 3, 3)	\
 
 
 		PF0(0)
 				PF0(2)
 
 	" .align 32			;\n"
-        " 1:                            ;\n"
+	" 1:                            ;\n"
 
 		BLOCK(0)
 		BLOCK(4)
 		BLOCK(8)
 		BLOCK(12)
 
-        "       addl $256, %1           ;\n"
-        "       addl $256, %2           ;\n"
-        "       decl %0                 ;\n"
-        "       jnz 1b                  ;\n"
+	"       addl $256, %1           ;\n"
+	"       addl $256, %2           ;\n"
+	"       decl %0                 ;\n"
+	"       jnz 1b                  ;\n"
 	: "+r" (lines),
 	  "+r" (p1), "+r" (p2)
 	:
-        : "memory");
+	: "memory");
 
 	XMMS_RESTORE;
 }
@@ -635,59 +637,59 @@
 xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
 	  unsigned long *p3)
 {
-        unsigned long lines = bytes >> 8;
+	unsigned long lines = bytes >> 8;
 	char xmm_save[16*4] ALIGN16;
 	int cr0;
 
 	XMMS_SAVE;
 
-        __asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
 #define BLOCK(i) \
 		PF1(i)					\
-				PF1(i+2)		\
+				PF1(i + 2)		\
 		LD(i,0)					\
-			LD(i+1,1)			\
-				LD(i+2,2)		\
-					LD(i+3,3)	\
+			LD(i + 1, 1)			\
+				LD(i + 2, 2)		\
+					LD(i + 3, 3)	\
 		PF2(i)					\
-				PF2(i+2)		\
-		PF0(i+4)				\
-				PF0(i+6)		\
+				PF2(i + 2)		\
+		PF0(i + 4)				\
+				PF0(i + 6)		\
 		XO1(i,0)				\
-			XO1(i+1,1)			\
-				XO1(i+2,2)		\
-					XO1(i+3,3)	\
+			XO1(i + 1, 1)			\
+				XO1(i + 2, 2)		\
+					XO1(i + 3, 3)	\
 		XO2(i,0)				\
-			XO2(i+1,1)			\
-				XO2(i+2,2)		\
-					XO2(i+3,3)	\
+			XO2(i + 1, 1)			\
+				XO2(i + 2, 2)		\
+					XO2(i + 3, 3)	\
 		ST(i,0)					\
-			ST(i+1,1)			\
-				ST(i+2,2)		\
-					ST(i+3,3)	\
+			ST(i + 1, 1)			\
+				ST(i + 2, 2)		\
+					ST(i + 3, 3)	\
 
 
 		PF0(0)
 				PF0(2)
 
 	" .align 32			;\n"
-        " 1:                            ;\n"
+	" 1:                            ;\n"
 
 		BLOCK(0)
 		BLOCK(4)
 		BLOCK(8)
 		BLOCK(12)
 
-        "       addl $256, %1           ;\n"
-        "       addl $256, %2           ;\n"
-        "       addl $256, %3           ;\n"
-        "       decl %0                 ;\n"
-        "       jnz 1b                  ;\n"
+	"       addl $256, %1           ;\n"
+	"       addl $256, %2           ;\n"
+	"       addl $256, %3           ;\n"
+	"       decl %0                 ;\n"
+	"       jnz 1b                  ;\n"
 	: "+r" (lines),
 	  "+r" (p1), "+r"(p2), "+r"(p3)
 	:
-        : "memory" );
+	: "memory" );
 
 	XMMS_RESTORE;
 }
@@ -696,66 +698,66 @@
 xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
 	  unsigned long *p3, unsigned long *p4)
 {
-        unsigned long lines = bytes >> 8;
+	unsigned long lines = bytes >> 8;
 	char xmm_save[16*4] ALIGN16;
 	int cr0;
 
 	XMMS_SAVE;
 
-        __asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
 #define BLOCK(i) \
 		PF1(i)					\
-				PF1(i+2)		\
+				PF1(i + 2)		\
 		LD(i,0)					\
-			LD(i+1,1)			\
-				LD(i+2,2)		\
-					LD(i+3,3)	\
+			LD(i + 1, 1)			\
+				LD(i + 2, 2)		\
+					LD(i + 3, 3)	\
 		PF2(i)					\
-				PF2(i+2)		\
+				PF2(i + 2)		\
 		XO1(i,0)				\
-			XO1(i+1,1)			\
-				XO1(i+2,2)		\
-					XO1(i+3,3)	\
+			XO1(i + 1, 1)			\
+				XO1(i + 2, 2)		\
+					XO1(i + 3, 3)	\
 		PF3(i)					\
-				PF3(i+2)		\
-		PF0(i+4)				\
-				PF0(i+6)		\
+				PF3(i + 2)		\
+		PF0(i + 4)				\
+				PF0(i + 6)		\
 		XO2(i,0)				\
-			XO2(i+1,1)			\
-				XO2(i+2,2)		\
-					XO2(i+3,3)	\
+			XO2(i + 1, 1)			\
+				XO2(i + 2, 2)		\
+					XO2(i + 3, 3)	\
 		XO3(i,0)				\
-			XO3(i+1,1)			\
-				XO3(i+2,2)		\
-					XO3(i+3,3)	\
+			XO3(i + 1, 1)			\
+				XO3(i + 2, 2)		\
+					XO3(i + 3, 3)	\
 		ST(i,0)					\
-			ST(i+1,1)			\
-				ST(i+2,2)		\
-					ST(i+3,3)	\
+			ST(i + 1, 1)			\
+				ST(i + 2, 2)		\
+					ST(i + 3, 3)	\
 
 
 		PF0(0)
 				PF0(2)
 
 	" .align 32			;\n"
-        " 1:                            ;\n"
+	" 1:                            ;\n"
 
 		BLOCK(0)
 		BLOCK(4)
 		BLOCK(8)
 		BLOCK(12)
 
-        "       addl $256, %1           ;\n"
-        "       addl $256, %2           ;\n"
-        "       addl $256, %3           ;\n"
-        "       addl $256, %4           ;\n"
-        "       decl %0                 ;\n"
-        "       jnz 1b                  ;\n"
+	"       addl $256, %1           ;\n"
+	"       addl $256, %2           ;\n"
+	"       addl $256, %3           ;\n"
+	"       addl $256, %4           ;\n"
+	"       decl %0                 ;\n"
+	"       jnz 1b                  ;\n"
 	: "+r" (lines),
 	  "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
 	:
-        : "memory" );
+	: "memory" );
 
 	XMMS_RESTORE;
 }
@@ -764,7 +766,7 @@
 xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
 	  unsigned long *p3, unsigned long *p4, unsigned long *p5)
 {
-        unsigned long lines = bytes >> 8;
+	unsigned long lines = bytes >> 8;
 	char xmm_save[16*4] ALIGN16;
 	int cr0;
 
@@ -776,65 +778,65 @@
 	   because we modify p4 and p5 there, but we can't mark them
 	   as read/write, otherwise we'd overflow the 10-asm-operands
 	   limit of GCC < 3.1.  */
-	__asm__ ("" : "+r" (p4), "+r" (p5));
+	asm("" : "+r" (p4), "+r" (p5));
 
-        __asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
 #define BLOCK(i) \
 		PF1(i)					\
-				PF1(i+2)		\
+				PF1(i + 2)		\
 		LD(i,0)					\
-			LD(i+1,1)			\
-				LD(i+2,2)		\
-					LD(i+3,3)	\
+			LD(i + 1, 1)			\
+				LD(i + 2, 2)		\
+					LD(i + 3, 3)	\
 		PF2(i)					\
-				PF2(i+2)		\
+				PF2(i + 2)		\
 		XO1(i,0)				\
-			XO1(i+1,1)			\
-				XO1(i+2,2)		\
-					XO1(i+3,3)	\
+			XO1(i + 1, 1)			\
+				XO1(i + 2, 2)		\
+					XO1(i + 3, 3)	\
 		PF3(i)					\
-				PF3(i+2)		\
+				PF3(i + 2)		\
 		XO2(i,0)				\
-			XO2(i+1,1)			\
-				XO2(i+2,2)		\
-					XO2(i+3,3)	\
+			XO2(i + 1, 1)			\
+				XO2(i + 2, 2)		\
+					XO2(i + 3, 3)	\
 		PF4(i)					\
-				PF4(i+2)		\
-		PF0(i+4)				\
-				PF0(i+6)		\
+				PF4(i + 2)		\
+		PF0(i + 4)				\
+				PF0(i + 6)		\
 		XO3(i,0)				\
-			XO3(i+1,1)			\
-				XO3(i+2,2)		\
-					XO3(i+3,3)	\
+			XO3(i + 1, 1)			\
+				XO3(i + 2, 2)		\
+					XO3(i + 3, 3)	\
 		XO4(i,0)				\
-			XO4(i+1,1)			\
-				XO4(i+2,2)		\
-					XO4(i+3,3)	\
+			XO4(i + 1, 1)			\
+				XO4(i + 2, 2)		\
+					XO4(i + 3, 3)	\
 		ST(i,0)					\
-			ST(i+1,1)			\
-				ST(i+2,2)		\
-					ST(i+3,3)	\
+			ST(i + 1, 1)			\
+				ST(i + 2, 2)		\
+					ST(i + 3, 3)	\
 
 
 		PF0(0)
 				PF0(2)
 
 	" .align 32			;\n"
-        " 1:                            ;\n"
+	" 1:                            ;\n"
 
 		BLOCK(0)
 		BLOCK(4)
 		BLOCK(8)
 		BLOCK(12)
 
-        "       addl $256, %1           ;\n"
-        "       addl $256, %2           ;\n"
-        "       addl $256, %3           ;\n"
-        "       addl $256, %4           ;\n"
-        "       addl $256, %5           ;\n"
-        "       decl %0                 ;\n"
-        "       jnz 1b                  ;\n"
+	"       addl $256, %1           ;\n"
+	"       addl $256, %2           ;\n"
+	"       addl $256, %3           ;\n"
+	"       addl $256, %4           ;\n"
+	"       addl $256, %5           ;\n"
+	"       decl %0                 ;\n"
+	"       jnz 1b                  ;\n"
 	: "+r" (lines),
 	  "+r" (p1), "+r" (p2), "+r" (p3)
 	: "r" (p4), "r" (p5)
@@ -843,17 +845,17 @@
 	/* p4 and p5 were modified, and now the variables are dead.
 	   Clobber them just to be sure nobody does something stupid
 	   like assuming they have some legal value.  */
-	__asm__ ("" : "=r" (p4), "=r" (p5));
+	asm("" : "=r" (p4), "=r" (p5));
 
 	XMMS_RESTORE;
 }
 
 static struct xor_block_template xor_block_pIII_sse = {
-        .name = "pIII_sse",
-        .do_2 =  xor_sse_2,
-        .do_3 =  xor_sse_3,
-        .do_4 =  xor_sse_4,
-        .do_5 = xor_sse_5,
+	.name = "pIII_sse",
+	.do_2 = xor_sse_2,
+	.do_3 = xor_sse_3,
+	.do_4 = xor_sse_4,
+	.do_5 = xor_sse_5,
 };
 
 /* Also try the generic routines.  */
@@ -861,21 +863,21 @@
 
 #undef XOR_TRY_TEMPLATES
 #define XOR_TRY_TEMPLATES				\
-	do {						\
-		xor_speed(&xor_block_8regs);		\
-		xor_speed(&xor_block_8regs_p);		\
-		xor_speed(&xor_block_32regs);		\
-		xor_speed(&xor_block_32regs_p);		\
-	        if (cpu_has_xmm)			\
-			xor_speed(&xor_block_pIII_sse);	\
-	        if (cpu_has_mmx) {			\
-	                xor_speed(&xor_block_pII_mmx);	\
-	                xor_speed(&xor_block_p5_mmx);	\
-	        }					\
-	} while (0)
+do {							\
+	xor_speed(&xor_block_8regs);			\
+	xor_speed(&xor_block_8regs_p);			\
+	xor_speed(&xor_block_32regs);			\
+	xor_speed(&xor_block_32regs_p);			\
+	if (cpu_has_xmm)				\
+		xor_speed(&xor_block_pIII_sse);		\
+	if (cpu_has_mmx) {				\
+		xor_speed(&xor_block_pII_mmx);		\
+		xor_speed(&xor_block_p5_mmx);		\
+	}						\
+} while (0)
 
 /* We force the use of the SSE xor block because it can write around L2.
    We may also be able to load into the L1 only depending on how the cpu
    deals with a load to a line that is being prefetched.  */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
+#define XOR_SELECT_TEMPLATE(FASTEST)			\
 	(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h
index 1eee7fc..24957e3 100644
--- a/include/asm-x86/xor_64.h
+++ b/include/asm-x86/xor_64.h
@@ -24,20 +24,23 @@
  */
 
 /*
- * x86-64 changes / gcc fixes from Andi Kleen. 
+ * x86-64 changes / gcc fixes from Andi Kleen.
  * Copyright 2002 Andi Kleen, SuSE Labs.
  *
  * This hasn't been optimized for the hammer yet, but there are likely
  * no advantages to be gotten from x86-64 here anyways.
  */
 
-typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
+typedef struct {
+	unsigned long a, b;
+} __attribute__((aligned(16))) xmm_store_t;
 
-/* Doesn't use gcc to save the XMM registers, because there is no easy way to 
+/* Doesn't use gcc to save the XMM registers, because there is no easy way to
    tell it to do a clts before the register saving. */
-#define XMMS_SAVE do {				\
+#define XMMS_SAVE				\
+do {						\
 	preempt_disable();			\
-	asm volatile (				\
+	asm volatile(				\
 		"movq %%cr0,%0		;\n\t"	\
 		"clts			;\n\t"	\
 		"movups %%xmm0,(%1)	;\n\t"	\
@@ -47,10 +50,11 @@
 		: "=&r" (cr0)			\
 		: "r" (xmm_save) 		\
 		: "memory");			\
-} while(0)
+} while (0)
 
-#define XMMS_RESTORE do {			\
-	asm volatile (				\
+#define XMMS_RESTORE				\
+do {						\
+	asm volatile(				\
 		"sfence			;\n\t"	\
 		"movups (%1),%%xmm0	;\n\t"	\
 		"movups 0x10(%1),%%xmm1	;\n\t"	\
@@ -61,72 +65,72 @@
 		: "r" (cr0), "r" (xmm_save)	\
 		: "memory");			\
 	preempt_enable();			\
-} while(0)
+} while (0)
 
 #define OFFS(x)		"16*("#x")"
 #define PF_OFFS(x)	"256+16*("#x")"
 #define	PF0(x)		"	prefetchnta "PF_OFFS(x)"(%[p1])		;\n"
-#define LD(x,y)		"       movaps   "OFFS(x)"(%[p1]), %%xmm"#y"	;\n"
-#define ST(x,y)		"       movaps %%xmm"#y",   "OFFS(x)"(%[p1])	;\n"
+#define LD(x, y)	"       movaps   "OFFS(x)"(%[p1]), %%xmm"#y"	;\n"
+#define ST(x, y)	"       movaps %%xmm"#y",   "OFFS(x)"(%[p1])	;\n"
 #define PF1(x)		"	prefetchnta "PF_OFFS(x)"(%[p2])		;\n"
 #define PF2(x)		"	prefetchnta "PF_OFFS(x)"(%[p3])		;\n"
 #define PF3(x)		"	prefetchnta "PF_OFFS(x)"(%[p4])		;\n"
 #define PF4(x)		"	prefetchnta "PF_OFFS(x)"(%[p5])		;\n"
 #define PF5(x)		"	prefetchnta "PF_OFFS(x)"(%[p6])		;\n"
-#define XO1(x,y)	"       xorps   "OFFS(x)"(%[p2]), %%xmm"#y"	;\n"
-#define XO2(x,y)	"       xorps   "OFFS(x)"(%[p3]), %%xmm"#y"	;\n"
-#define XO3(x,y)	"       xorps   "OFFS(x)"(%[p4]), %%xmm"#y"	;\n"
-#define XO4(x,y)	"       xorps   "OFFS(x)"(%[p5]), %%xmm"#y"	;\n"
-#define XO5(x,y)	"       xorps   "OFFS(x)"(%[p6]), %%xmm"#y"	;\n"
+#define XO1(x, y)	"       xorps   "OFFS(x)"(%[p2]), %%xmm"#y"	;\n"
+#define XO2(x, y)	"       xorps   "OFFS(x)"(%[p3]), %%xmm"#y"	;\n"
+#define XO3(x, y)	"       xorps   "OFFS(x)"(%[p4]), %%xmm"#y"	;\n"
+#define XO4(x, y)	"       xorps   "OFFS(x)"(%[p5]), %%xmm"#y"	;\n"
+#define XO5(x, y)	"       xorps   "OFFS(x)"(%[p6]), %%xmm"#y"	;\n"
 
 
 static void
 xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
 {
-        unsigned int lines = bytes >> 8;
+	unsigned int lines = bytes >> 8;
 	unsigned long cr0;
 	xmm_store_t xmm_save[4];
 
 	XMMS_SAVE;
 
-        asm volatile (
+	asm volatile(
 #undef BLOCK
 #define BLOCK(i) \
-		LD(i,0)					\
-			LD(i+1,1)			\
+		LD(i, 0)				\
+			LD(i + 1, 1)			\
 		PF1(i)					\
-				PF1(i+2)		\
-				LD(i+2,2)		\
-					LD(i+3,3)	\
-		PF0(i+4)				\
-				PF0(i+6)		\
-		XO1(i,0)				\
-			XO1(i+1,1)			\
-				XO1(i+2,2)		\
-					XO1(i+3,3)	\
-		ST(i,0)					\
-			ST(i+1,1)			\
-				ST(i+2,2)		\
-					ST(i+3,3)	\
+				PF1(i + 2)		\
+				LD(i + 2, 2)		\
+					LD(i + 3, 3)	\
+		PF0(i + 4)				\
+				PF0(i + 6)		\
+		XO1(i, 0)				\
+			XO1(i + 1, 1)			\
+				XO1(i + 2, 2)		\
+					XO1(i + 3, 3)	\
+		ST(i, 0)				\
+			ST(i + 1, 1)			\
+				ST(i + 2, 2)		\
+					ST(i + 3, 3)	\
 
 
 		PF0(0)
 				PF0(2)
 
 	" .align 32			;\n"
-        " 1:                            ;\n"
+	" 1:                            ;\n"
 
 		BLOCK(0)
 		BLOCK(4)
 		BLOCK(8)
 		BLOCK(12)
 
-        "       addq %[inc], %[p1]           ;\n"
-        "       addq %[inc], %[p2]           ;\n"
+	"       addq %[inc], %[p1]           ;\n"
+	"       addq %[inc], %[p2]           ;\n"
 		"		decl %[cnt] ; jnz 1b"
 	: [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
-	: [inc] "r" (256UL) 
-        : "memory");
+	: [inc] "r" (256UL)
+	: "memory");
 
 	XMMS_RESTORE;
 }
@@ -141,52 +145,52 @@
 
 	XMMS_SAVE;
 
-        __asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
 #define BLOCK(i) \
 		PF1(i)					\
-				PF1(i+2)		\
-		LD(i,0)					\
-			LD(i+1,1)			\
-				LD(i+2,2)		\
-					LD(i+3,3)	\
+				PF1(i + 2)		\
+		LD(i, 0)					\
+			LD(i + 1, 1)			\
+				LD(i + 2, 2)		\
+					LD(i + 3, 3)	\
 		PF2(i)					\
-				PF2(i+2)		\
-		PF0(i+4)				\
-				PF0(i+6)		\
-		XO1(i,0)				\
-			XO1(i+1,1)			\
-				XO1(i+2,2)		\
-					XO1(i+3,3)	\
-		XO2(i,0)				\
-			XO2(i+1,1)			\
-				XO2(i+2,2)		\
-					XO2(i+3,3)	\
-		ST(i,0)					\
-			ST(i+1,1)			\
-				ST(i+2,2)		\
-					ST(i+3,3)	\
+				PF2(i + 2)		\
+		PF0(i + 4)				\
+				PF0(i + 6)		\
+		XO1(i, 0)				\
+			XO1(i + 1, 1)			\
+				XO1(i + 2, 2)		\
+					XO1(i + 3, 3)	\
+		XO2(i, 0)				\
+			XO2(i + 1, 1)			\
+				XO2(i + 2, 2)		\
+					XO2(i + 3, 3)	\
+		ST(i, 0)				\
+			ST(i + 1, 1)			\
+				ST(i + 2, 2)		\
+					ST(i + 3, 3)	\
 
 
 		PF0(0)
 				PF0(2)
 
 	" .align 32			;\n"
-        " 1:                            ;\n"
+	" 1:                            ;\n"
 
 		BLOCK(0)
 		BLOCK(4)
 		BLOCK(8)
 		BLOCK(12)
 
-        "       addq %[inc], %[p1]           ;\n"
-        "       addq %[inc], %[p2]          ;\n"
-        "       addq %[inc], %[p3]           ;\n"
+	"       addq %[inc], %[p1]           ;\n"
+	"       addq %[inc], %[p2]          ;\n"
+	"       addq %[inc], %[p3]           ;\n"
 		"		decl %[cnt] ; jnz 1b"
 	: [cnt] "+r" (lines),
 	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
 	: [inc] "r" (256UL)
-	: "memory"); 
+	: "memory");
 	XMMS_RESTORE;
 }
 
@@ -195,64 +199,64 @@
 	  unsigned long *p3, unsigned long *p4)
 {
 	unsigned int lines = bytes >> 8;
-	xmm_store_t xmm_save[4]; 
+	xmm_store_t xmm_save[4];
 	unsigned long cr0;
 
 	XMMS_SAVE;
 
-        __asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
 #define BLOCK(i) \
 		PF1(i)					\
-				PF1(i+2)		\
-		LD(i,0)					\
-			LD(i+1,1)			\
-				LD(i+2,2)		\
-					LD(i+3,3)	\
+				PF1(i + 2)		\
+		LD(i, 0)				\
+			LD(i + 1, 1)			\
+				LD(i + 2, 2)		\
+					LD(i + 3, 3)	\
 		PF2(i)					\
-				PF2(i+2)		\
-		XO1(i,0)				\
-			XO1(i+1,1)			\
-				XO1(i+2,2)		\
-					XO1(i+3,3)	\
+				PF2(i + 2)		\
+		XO1(i, 0)				\
+			XO1(i + 1, 1)			\
+				XO1(i + 2, 2)		\
+					XO1(i + 3, 3)	\
 		PF3(i)					\
-				PF3(i+2)		\
-		PF0(i+4)				\
-				PF0(i+6)		\
-		XO2(i,0)				\
-			XO2(i+1,1)			\
-				XO2(i+2,2)		\
-					XO2(i+3,3)	\
-		XO3(i,0)				\
-			XO3(i+1,1)			\
-				XO3(i+2,2)		\
-					XO3(i+3,3)	\
-		ST(i,0)					\
-			ST(i+1,1)			\
-				ST(i+2,2)		\
-					ST(i+3,3)	\
+				PF3(i + 2)		\
+		PF0(i + 4)				\
+				PF0(i + 6)		\
+		XO2(i, 0)				\
+			XO2(i + 1, 1)			\
+				XO2(i + 2, 2)		\
+					XO2(i + 3, 3)	\
+		XO3(i, 0)				\
+			XO3(i + 1, 1)			\
+				XO3(i + 2, 2)		\
+					XO3(i + 3, 3)	\
+		ST(i, 0)				\
+			ST(i + 1, 1)			\
+				ST(i + 2, 2)		\
+					ST(i + 3, 3)	\
 
 
 		PF0(0)
 				PF0(2)
 
 	" .align 32			;\n"
-        " 1:                            ;\n"
+	" 1:                            ;\n"
 
 		BLOCK(0)
 		BLOCK(4)
 		BLOCK(8)
 		BLOCK(12)
 
-        "       addq %[inc], %[p1]           ;\n"
-        "       addq %[inc], %[p2]           ;\n"
-        "       addq %[inc], %[p3]           ;\n"
-        "       addq %[inc], %[p4]           ;\n"
+	"       addq %[inc], %[p1]           ;\n"
+	"       addq %[inc], %[p2]           ;\n"
+	"       addq %[inc], %[p3]           ;\n"
+	"       addq %[inc], %[p4]           ;\n"
 	"	decl %[cnt] ; jnz 1b"
 	: [cnt] "+c" (lines),
 	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
 	: [inc] "r" (256UL)
-        : "memory" );
+	: "memory" );
 
 	XMMS_RESTORE;
 }
@@ -261,70 +265,70 @@
 xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
 	  unsigned long *p3, unsigned long *p4, unsigned long *p5)
 {
-        unsigned int lines = bytes >> 8;
+	unsigned int lines = bytes >> 8;
 	xmm_store_t xmm_save[4];
 	unsigned long cr0;
 
 	XMMS_SAVE;
 
-        __asm__ __volatile__ (
+	asm volatile(
 #undef BLOCK
 #define BLOCK(i) \
 		PF1(i)					\
-				PF1(i+2)		\
-		LD(i,0)					\
-			LD(i+1,1)			\
-				LD(i+2,2)		\
-					LD(i+3,3)	\
+				PF1(i + 2)		\
+		LD(i, 0)				\
+			LD(i + 1, 1)			\
+				LD(i + 2, 2)		\
+					LD(i + 3, 3)	\
 		PF2(i)					\
-				PF2(i+2)		\
-		XO1(i,0)				\
-			XO1(i+1,1)			\
-				XO1(i+2,2)		\
-					XO1(i+3,3)	\
+				PF2(i + 2)		\
+		XO1(i, 0)				\
+			XO1(i + 1, 1)			\
+				XO1(i + 2, 2)		\
+					XO1(i + 3, 3)	\
 		PF3(i)					\
-				PF3(i+2)		\
-		XO2(i,0)				\
-			XO2(i+1,1)			\
-				XO2(i+2,2)		\
-					XO2(i+3,3)	\
+				PF3(i + 2)		\
+		XO2(i, 0)				\
+			XO2(i + 1, 1)			\
+				XO2(i + 2, 2)		\
+					XO2(i + 3, 3)	\
 		PF4(i)					\
-				PF4(i+2)		\
-		PF0(i+4)				\
-				PF0(i+6)		\
-		XO3(i,0)				\
-			XO3(i+1,1)			\
-				XO3(i+2,2)		\
-					XO3(i+3,3)	\
-		XO4(i,0)				\
-			XO4(i+1,1)			\
-				XO4(i+2,2)		\
-					XO4(i+3,3)	\
-		ST(i,0)					\
-			ST(i+1,1)			\
-				ST(i+2,2)		\
-					ST(i+3,3)	\
+				PF4(i + 2)		\
+		PF0(i + 4)				\
+				PF0(i + 6)		\
+		XO3(i, 0)				\
+			XO3(i + 1, 1)			\
+				XO3(i + 2, 2)		\
+					XO3(i + 3, 3)	\
+		XO4(i, 0)				\
+			XO4(i + 1, 1)			\
+				XO4(i + 2, 2)		\
+					XO4(i + 3, 3)	\
+		ST(i, 0)				\
+			ST(i + 1, 1)			\
+				ST(i + 2, 2)		\
+					ST(i + 3, 3)	\
 
 
 		PF0(0)
 				PF0(2)
 
 	" .align 32			;\n"
-        " 1:                            ;\n"
+	" 1:                            ;\n"
 
 		BLOCK(0)
 		BLOCK(4)
 		BLOCK(8)
 		BLOCK(12)
 
-        "       addq %[inc], %[p1]           ;\n"
-        "       addq %[inc], %[p2]           ;\n"
-        "       addq %[inc], %[p3]           ;\n"
-        "       addq %[inc], %[p4]           ;\n"
-        "       addq %[inc], %[p5]           ;\n"
+	"       addq %[inc], %[p1]           ;\n"
+	"       addq %[inc], %[p2]           ;\n"
+	"       addq %[inc], %[p3]           ;\n"
+	"       addq %[inc], %[p4]           ;\n"
+	"       addq %[inc], %[p5]           ;\n"
 	"	decl %[cnt] ; jnz 1b"
 	: [cnt] "+c" (lines),
-  	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), 
+	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
 	  [p5] "+r" (p5)
 	: [inc] "r" (256UL)
 	: "memory");
@@ -333,18 +337,18 @@
 }
 
 static struct xor_block_template xor_block_sse = {
-        .name = "generic_sse",
-        .do_2 = xor_sse_2,
-        .do_3 = xor_sse_3,
-        .do_4 = xor_sse_4,
-        .do_5 = xor_sse_5,
+	.name = "generic_sse",
+	.do_2 = xor_sse_2,
+	.do_3 = xor_sse_3,
+	.do_4 = xor_sse_4,
+	.do_5 = xor_sse_5,
 };
 
 #undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES				\
-	do {						\
-		xor_speed(&xor_block_sse);	\
-	} while (0)
+#define XOR_TRY_TEMPLATES			\
+do {						\
+	xor_speed(&xor_block_sse);		\
+} while (0)
 
 /* We force the use of the SSE xor block because it can write around L2.
    We may also be able to load into the L1 only depending on how the cpu
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
index 3e04167..d9b2034 100644
--- a/include/asm-xtensa/semaphore.h
+++ b/include/asm-xtensa/semaphore.h
@@ -1,99 +1 @@
-/*
- * linux/include/asm-xtensa/semaphore.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SEMAPHORE_H
-#define _XTENSA_SEMAPHORE_H
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	atomic_t count;
-	int sleepers;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name,n)					\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.sleepers	= 0,						\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) 			\
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	atomic_set(&sem->count, val);
-	sem->sleepers = 0;
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int  __down_interruptible(struct semaphore * sem);
-asmlinkage int  __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-static inline void down(struct semaphore * sem)
-{
-	might_sleep();
-
-	if (atomic_sub_return(1, &sem->count) < 0)
-		__down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-
-	might_sleep();
-
-	if (atomic_sub_return(1, &sem->count) < 0)
-		ret = __down_interruptible(sem);
-	return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
-	int ret = 0;
-
-	if (atomic_sub_return(1, &sem->count) < 0)
-		ret = __down_trylock(sem);
-	return ret;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
-	if (atomic_add_return(1, &sem->count) <= 0)
-		__up(sem);
-}
-
-#endif /* _XTENSA_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index f558233..574b201 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -37,7 +37,7 @@
 }
 
 int attribute_container_register(struct attribute_container *cont);
-int attribute_container_unregister(struct attribute_container *cont);
+int __must_check attribute_container_unregister(struct attribute_container *cont);
 void attribute_container_create_device(struct device *dev,
 				       int (*fn)(struct attribute_container *,
 						 struct device *,
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 85778a4..3509447 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -216,6 +216,7 @@
 /* used to install a new clocksource */
 extern int clocksource_register(struct clocksource*);
 extern void clocksource_unregister(struct clocksource*);
+extern void clocksource_touch_watchdog(void);
 extern struct clocksource* clocksource_get_next(void);
 extern void clocksource_change_rating(struct clocksource *cs, int rating);
 extern void clocksource_resume(void);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 34d4406..b4d84ed 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -95,12 +95,17 @@
 #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
 
 /**
- * enum dma_prep_flags - DMA flags to augment operation preparation
+ * enum dma_ctrl_flags - DMA flags to augment operation preparation,
+ * 	control completion, and communicate status.
  * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
  * 	this transaction
+ * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
+ * 	acknowledges receipt, i.e. has has a chance to establish any
+ * 	dependency chains
  */
-enum dma_prep_flags {
+enum dma_ctrl_flags {
 	DMA_PREP_INTERRUPT = (1 << 0),
+	DMA_CTRL_ACK = (1 << 1),
 };
 
 /**
@@ -211,8 +216,8 @@
  * ---dma generic offload fields---
  * @cookie: tracking cookie for this transaction, set to -EBUSY if
  *	this tx is sitting on a dependency list
- * @ack: the descriptor can not be reused until the client acknowledges
- *	receipt, i.e. has has a chance to establish any dependency chains
+ * @flags: flags to augment operation preparation, control completion, and
+ * 	communicate status
  * @phys: physical address of the descriptor
  * @tx_list: driver common field for operations that require multiple
  *	descriptors
@@ -221,23 +226,20 @@
  * @callback: routine to call after this operation is complete
  * @callback_param: general parameter to pass to the callback routine
  * ---async_tx api specific fields---
- * @depend_list: at completion this list of transactions are submitted
- * @depend_node: allow this transaction to be executed after another
- *	transaction has completed, possibly on another channel
+ * @next: at completion submit this descriptor
  * @parent: pointer to the next level up in the dependency chain
- * @lock: protect the dependency list
+ * @lock: protect the parent and next pointers
  */
 struct dma_async_tx_descriptor {
 	dma_cookie_t cookie;
-	int ack;
+	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
 	dma_addr_t phys;
 	struct list_head tx_list;
 	struct dma_chan *chan;
 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
 	dma_async_tx_callback callback;
 	void *callback_param;
-	struct list_head depend_list;
-	struct list_head depend_node;
+	struct dma_async_tx_descriptor *next;
 	struct dma_async_tx_descriptor *parent;
 	spinlock_t lock;
 };
@@ -261,7 +263,6 @@
  * @device_prep_dma_zero_sum: prepares a zero_sum operation
  * @device_prep_dma_memset: prepares a memset operation
  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
- * @device_dependency_added: async_tx notifies the channel about new deps
  * @device_issue_pending: push pending transactions to hardware
  */
 struct dma_device {
@@ -294,9 +295,8 @@
 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
 		unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
-		struct dma_chan *chan);
+		struct dma_chan *chan, unsigned long flags);
 
-	void (*device_dependency_added)(struct dma_chan *chan);
 	enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
 			dma_cookie_t cookie, dma_cookie_t *last,
 			dma_cookie_t *used);
@@ -321,7 +321,13 @@
 static inline void
 async_tx_ack(struct dma_async_tx_descriptor *tx)
 {
-	tx->ack = 1;
+	tx->flags |= DMA_CTRL_ACK;
+}
+
+static inline int
+async_tx_test_ack(struct dma_async_tx_descriptor *tx)
+{
+	return tx->flags & DMA_CTRL_ACK;
 }
 
 #define first_dma_cap(mask) __first_dma_cap(&(mask))
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index e38e759..c37e924 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -422,9 +422,11 @@
 #define HDIO_SET_NOWERR		0x0325	/* change ignore-write-error flag */
 #define HDIO_SET_DMA		0x0326	/* change use-dma flag */
 #define HDIO_SET_PIO_MODE	0x0327	/* reconfig interface to new speed */
+#ifndef __KERNEL__
 #define HDIO_SCAN_HWIF		0x0328	/* register and (re)scan interface */
-#define HDIO_SET_NICE		0x0329	/* set nice flags */
 #define HDIO_UNREGISTER_HWIF	0x032a  /* unregister interface */
+#endif
+#define HDIO_SET_NICE		0x0329	/* set nice flags */
 #define HDIO_SET_WCACHE		0x032b	/* change write cache enable-disable */
 #define HDIO_SET_ACOUSTIC	0x032c	/* change acoustic behavior */
 #define HDIO_SET_BUSSTATE	0x032d	/* set the bus state of the hwif */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 1ad56a7..56f3236 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -173,7 +173,6 @@
  * struct hrtimer_cpu_base - the per cpu clock bases
  * @lock:		lock protecting the base and associated clock bases
  *			and timers
- * @lock_key:		the lock_class_key for use with lockdep
  * @clock_base:		array of clock bases for this cpu
  * @curr_timer:		the timer which is executing a callback right now
  * @expires_next:	absolute time of the next event which was scheduled
@@ -189,7 +188,6 @@
  */
 struct hrtimer_cpu_base {
 	spinlock_t			lock;
-	struct lock_class_key		lock_key;
 	struct hrtimer_clock_base	clock_base[HRTIMER_MAX_CLOCK_BASES];
 	struct list_head		cb_pending;
 #ifdef CONFIG_HIGH_RES_TIMERS
diff --git a/include/linux/ide.h b/include/linux/ide.h
index bc26b2f..6c39482 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -82,24 +82,10 @@
 
 #define IDE_FEATURE_OFFSET	IDE_ERROR_OFFSET
 #define IDE_COMMAND_OFFSET	IDE_STATUS_OFFSET
-
-#define IDE_DATA_REG		(HWIF(drive)->io_ports[IDE_DATA_OFFSET])
-#define IDE_ERROR_REG		(HWIF(drive)->io_ports[IDE_ERROR_OFFSET])
-#define IDE_NSECTOR_REG		(HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET])
-#define IDE_SECTOR_REG		(HWIF(drive)->io_ports[IDE_SECTOR_OFFSET])
-#define IDE_LCYL_REG		(HWIF(drive)->io_ports[IDE_LCYL_OFFSET])
-#define IDE_HCYL_REG		(HWIF(drive)->io_ports[IDE_HCYL_OFFSET])
-#define IDE_SELECT_REG		(HWIF(drive)->io_ports[IDE_SELECT_OFFSET])
-#define IDE_STATUS_REG		(HWIF(drive)->io_ports[IDE_STATUS_OFFSET])
-#define IDE_CONTROL_REG		(HWIF(drive)->io_ports[IDE_CONTROL_OFFSET])
-#define IDE_IRQ_REG		(HWIF(drive)->io_ports[IDE_IRQ_OFFSET])
-
-#define IDE_FEATURE_REG		IDE_ERROR_REG
-#define IDE_COMMAND_REG		IDE_STATUS_REG
-#define IDE_ALTSTATUS_REG	IDE_CONTROL_REG
-#define IDE_IREASON_REG		IDE_NSECTOR_REG
-#define IDE_BCOUNTL_REG		IDE_LCYL_REG
-#define IDE_BCOUNTH_REG		IDE_HCYL_REG
+#define IDE_ALTSTATUS_OFFSET	IDE_CONTROL_OFFSET
+#define IDE_IREASON_OFFSET	IDE_NSECTOR_OFFSET
+#define IDE_BCOUNTL_OFFSET	IDE_LCYL_OFFSET
+#define IDE_BCOUNTH_OFFSET	IDE_HCYL_OFFSET
 
 #define OK_STAT(stat,good,bad)	(((stat)&((good)|(bad)))==(good))
 #define BAD_R_STAT		(BUSY_STAT   | ERR_STAT)
@@ -169,7 +155,7 @@
 		ide_rz1000,	ide_trm290,
 		ide_cmd646,	ide_cy82c693,	ide_4drives,
 		ide_pmac,	ide_etrax100,	ide_acorn,
-		ide_au1xxx,	ide_palm3710,	ide_forced
+		ide_au1xxx,	ide_palm3710
 };
 
 typedef u8 hwif_chipset_t;
@@ -186,14 +172,9 @@
 } hw_regs_t;
 
 struct hwif_s * ide_find_port(unsigned long);
-struct hwif_s *ide_deprecated_find_port(unsigned long);
 void ide_init_port_data(struct hwif_s *, unsigned int);
 void ide_init_port_hw(struct hwif_s *, hw_regs_t *);
 
-struct ide_drive_s;
-int ide_register_hw(hw_regs_t *, void (*)(struct ide_drive_s *),
-		    struct hwif_s **);
-
 static inline void ide_std_init_ports(hw_regs_t *hw,
 				      unsigned long io_addr,
 				      unsigned long ctl_addr)
@@ -213,45 +194,6 @@
 #define MAX_HWIFS	CONFIG_IDE_MAX_HWIFS
 #endif
 
-/* needed on alpha, x86/x86_64, ia64, mips, ppc32 and sh */
-#ifndef IDE_ARCH_OBSOLETE_DEFAULTS
-# define ide_default_io_base(index)	(0)
-# define ide_default_irq(base)		(0)
-# define ide_init_default_irq(base)	(0)
-#endif
-
-#ifdef CONFIG_IDE_ARCH_OBSOLETE_INIT
-static inline void ide_init_hwif_ports(hw_regs_t *hw,
-				       unsigned long io_addr,
-				       unsigned long ctl_addr,
-				       int *irq)
-{
-	if (!ctl_addr)
-		ide_std_init_ports(hw, io_addr, ide_default_io_ctl(io_addr));
-	else
-		ide_std_init_ports(hw, io_addr, ctl_addr);
-
-	if (irq)
-		*irq = 0;
-
-	hw->io_ports[IDE_IRQ_OFFSET] = 0;
-
-#ifdef CONFIG_PPC32
-	if (ppc_ide_md.ide_init_hwif)
-		ppc_ide_md.ide_init_hwif(hw, io_addr, ctl_addr, irq);
-#endif
-}
-#else
-static inline void ide_init_hwif_ports(hw_regs_t *hw,
-				       unsigned long io_addr,
-				       unsigned long ctl_addr,
-				       int *irq)
-{
-	if (io_addr || ctl_addr)
-		printk(KERN_WARNING "%s: must not be called\n", __FUNCTION__);
-}
-#endif /* CONFIG_IDE_ARCH_OBSOLETE_INIT */
-
 /* Currently only m68k, apus and m8xx need it */
 #ifndef IDE_ARCH_ACK_INTR
 # define ide_ack_intr(hwif) (1)
@@ -406,7 +348,7 @@
         u8	wcache;		/* status of write cache */
 	u8	acoustic;	/* acoustic management */
 	u8	media;		/* disk, cdrom, tape, floppy, ... */
-	u8	ctl;		/* "normal" value for IDE_CONTROL_REG */
+	u8	ctl;		/* "normal" value for Control register */
 	u8	ready_stat;	/* min status value for drive ready */
 	u8	mult_count;	/* current multiple sector setting */
 	u8	mult_req;	/* requested multiple sector setting */
@@ -507,8 +449,6 @@
 	void	(*maskproc)(ide_drive_t *, int);
 	/* check host's drive quirk list */
 	void	(*quirkproc)(ide_drive_t *);
-	/* driver soft-power interface */
-	int	(*busproc)(ide_drive_t *, int);
 #endif
 	u8 (*mdma_filter)(ide_drive_t *);
 	u8 (*udma_filter)(ide_drive_t *);
@@ -578,7 +518,6 @@
 
 	unsigned	noprobe    : 1;	/* don't probe for this interface */
 	unsigned	present    : 1;	/* this interface exists */
-	unsigned	hold       : 1; /* this interface is always present */
 	unsigned	serialized : 1;	/* serialized all channel operation */
 	unsigned	sharing_irq: 1;	/* 1 = sharing irq with another hwif */
 	unsigned	reset      : 1;	/* reset after probe */
@@ -586,7 +525,9 @@
 	unsigned	mmio       : 1; /* host uses MMIO */
 	unsigned	straight8  : 1;	/* Alan's straight 8 check */
 
-	struct device	gendev;
+	struct device		gendev;
+	struct device		*portdev;
+
 	struct completion gendev_rel_comp; /* To deal with device release() */
 
 	void		*hwif_data;	/* extra hwif data */
@@ -647,6 +588,68 @@
 int set_pio_mode(ide_drive_t *, int);
 int set_using_dma(ide_drive_t *, int);
 
+/* ATAPI packet command flags */
+enum {
+	/* set when an error is considered normal - no retry (ide-tape) */
+	PC_FLAG_ABORT			= (1 << 0),
+	PC_FLAG_SUPPRESS_ERROR		= (1 << 1),
+	PC_FLAG_WAIT_FOR_DSC		= (1 << 2),
+	PC_FLAG_DMA_OK			= (1 << 3),
+	PC_FLAG_DMA_RECOMMENDED		= (1 << 4),
+	PC_FLAG_DMA_IN_PROGRESS		= (1 << 5),
+	PC_FLAG_DMA_ERROR		= (1 << 6),
+	PC_FLAG_WRITING			= (1 << 7),
+	/* command timed out */
+	PC_FLAG_TIMEDOUT		= (1 << 8),
+};
+
+struct ide_atapi_pc {
+	/* actual packet bytes */
+	u8 c[12];
+	/* incremented on each retry */
+	int retries;
+	int error;
+
+	/* bytes to transfer */
+	int req_xfer;
+	/* bytes actually transferred */
+	int xferred;
+
+	/* data buffer */
+	u8 *buf;
+	/* current buffer position */
+	u8 *cur_pos;
+	int buf_size;
+	/* missing/available data on the current buffer */
+	int b_count;
+
+	/* the corresponding request */
+	struct request *rq;
+
+	unsigned long flags;
+
+	/*
+	 * those are more or less driver-specific and some of them are subject
+	 * to change/removal later.
+	 */
+	u8 pc_buf[256];
+	void (*idefloppy_callback) (ide_drive_t *);
+	ide_startstop_t (*idetape_callback) (ide_drive_t *);
+
+	/* idetape only */
+	struct idetape_bh *bh;
+	char *b_data;
+
+	/* idescsi only for now */
+	struct scatterlist *sg;
+	unsigned int sg_cnt;
+
+	struct scsi_cmnd *scsi_cmd;
+	void (*done) (struct scsi_cmnd *);
+
+	unsigned long timeout;
+};
+
 #ifdef CONFIG_IDE_PROC_FS
 /*
  * configurable drive settings
@@ -691,6 +694,7 @@
 void proc_ide_destroy(void);
 void ide_proc_register_port(ide_hwif_t *);
 void ide_proc_port_register_devices(ide_hwif_t *);
+void ide_proc_unregister_device(ide_drive_t *);
 void ide_proc_unregister_port(ide_hwif_t *);
 void ide_proc_register_driver(ide_drive_t *, ide_driver_t *);
 void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *);
@@ -724,6 +728,7 @@
 static inline void proc_ide_destroy(void) { ; }
 static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
 static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
+static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
 static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
 static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; }
 static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; }
@@ -990,7 +995,6 @@
 void ide_init_disk(struct gendisk *, ide_drive_t *);
 
 #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-extern int ide_scan_direction;
 extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
 #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
 #else
@@ -1195,7 +1199,7 @@
 void ide_remove_port_from_hwgroup(ide_hwif_t *);
 extern int ide_hwif_request_regions(ide_hwif_t *hwif);
 extern void ide_hwif_release_regions(ide_hwif_t* hwif);
-void ide_unregister(unsigned int, int, int);
+void ide_unregister(unsigned int);
 
 void ide_register_region(struct gendisk *);
 void ide_unregister_region(struct gendisk *);
@@ -1204,6 +1208,8 @@
 
 int ide_device_add_all(u8 *idx, const struct ide_port_info *);
 int ide_device_add(u8 idx[4], const struct ide_port_info *);
+void ide_port_unregister_devices(ide_hwif_t *);
+void ide_port_scan(ide_hwif_t *);
 
 static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
 {
@@ -1279,6 +1285,7 @@
 #define local_irq_set(flags)	do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0)
 
 extern struct bus_type ide_bus_type;
+extern struct class *ide_port_class;
 
 /* check if CACHE FLUSH (EXT) command is supported (bits defined in ATA-6) */
 #define ide_id_has_flush_cache(id)	((id)->cfs_enable_2 & 0x3000)
@@ -1307,7 +1314,10 @@
 
 static inline void ide_set_irq(ide_drive_t *drive, int on)
 {
-	drive->hwif->OUTB(drive->ctl | (on ? 0 : 2), IDE_CONTROL_REG);
+	ide_hwif_t *hwif = drive->hwif;
+
+	hwif->OUTB(drive->ctl | (on ? 0 : 2),
+		   hwif->io_ports[IDE_CONTROL_OFFSET]);
 }
 
 static inline u8 ide_read_status(ide_drive_t *drive)
@@ -1331,4 +1341,26 @@
 	return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]);
 }
 
+/*
+ * Too bad. The drive wants to send us data which we are not ready to accept.
+ * Just throw it away.
+ */
+static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount)
+{
+	ide_hwif_t *hwif = drive->hwif;
+
+	/* FIXME: use ->atapi_input_bytes */
+	while (bcount--)
+		(void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]);
+}
+
+static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
+{
+	ide_hwif_t *hwif = drive->hwif;
+
+	/* FIXME: use ->atapi_output_bytes */
+	while (bcount--)
+		hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]);
+}
+
 #endif /* _IDE_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f8ab4ce..b5fef13 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -102,6 +102,25 @@
 extern void disable_irq(unsigned int irq);
 extern void enable_irq(unsigned int irq);
 
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+
+extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
+extern int irq_can_set_affinity(unsigned int irq);
+
+#else /* CONFIG_SMP */
+
+static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+{
+	return -EINVAL;
+}
+
+static inline int irq_can_set_affinity(unsigned int irq)
+{
+	return 0;
+}
+
+#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
+
 #ifdef CONFIG_GENERIC_HARDIRQS
 /*
  * Special lockdep variants of irq disabling/enabling.
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 176e5e7..1883a85 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -228,21 +228,11 @@
 
 #endif /* CONFIG_GENERIC_PENDING_IRQ */
 
-extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
-extern int irq_can_set_affinity(unsigned int irq);
-
 #else /* CONFIG_SMP */
 
 #define move_native_irq(x)
 #define move_masked_irq(x)
 
-static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
-{
-	return -EINVAL;
-}
-
-static inline int irq_can_set_affinity(unsigned int irq) { return 0; }
-
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_IRQBALANCE
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
new file mode 100644
index 0000000..9757b1a6
--- /dev/null
+++ b/include/linux/kgdb.h
@@ -0,0 +1,281 @@
+/*
+ * This provides the callbacks and functions that KGDB needs to share between
+ * the core, I/O and arch-specific portions.
+ *
+ * Author: Amit Kale <amitkale@linsyssoft.com> and
+ *         Tom Rini <trini@kernel.crashing.org>
+ *
+ * 2001-2004 (c) Amit S. Kale and 2003-2005 (c) MontaVista Software, Inc.
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef _KGDB_H_
+#define _KGDB_H_
+
+#include <linux/serial_8250.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/atomic.h>
+#include <asm/kgdb.h>
+
+struct pt_regs;
+
+/**
+ *	kgdb_skipexception - (optional) exit kgdb_handle_exception early
+ *	@exception: Exception vector number
+ *	@regs: Current &struct pt_regs.
+ *
+ *	On some architectures it is required to skip a breakpoint
+ *	exception when it occurs after a breakpoint has been removed.
+ *	This can be implemented in the architecture specific portion of
+ *	for kgdb.
+ */
+extern int kgdb_skipexception(int exception, struct pt_regs *regs);
+
+/**
+ *	kgdb_post_primary_code - (optional) Save error vector/code numbers.
+ *	@regs: Original pt_regs.
+ *	@e_vector: Original error vector.
+ *	@err_code: Original error code.
+ *
+ *	This is usually needed on architectures which support SMP and
+ *	KGDB.  This function is called after all the secondary cpus have
+ *	been put to a know spin state and the primary CPU has control over
+ *	KGDB.
+ */
+extern void kgdb_post_primary_code(struct pt_regs *regs, int e_vector,
+				  int err_code);
+
+/**
+ *	kgdb_disable_hw_debug - (optional) Disable hardware debugging hook
+ *	@regs: Current &struct pt_regs.
+ *
+ *	This function will be called if the particular architecture must
+ *	disable hardware debugging while it is processing gdb packets or
+ *	handling exception.
+ */
+extern void kgdb_disable_hw_debug(struct pt_regs *regs);
+
+struct tasklet_struct;
+struct task_struct;
+struct uart_port;
+
+/**
+ *	kgdb_breakpoint - compiled in breakpoint
+ *
+ *	This will be impelmented a static inline per architecture.  This
+ *	function is called by the kgdb core to execute an architecture
+ *	specific trap to cause kgdb to enter the exception processing.
+ *
+ */
+void kgdb_breakpoint(void);
+
+extern int kgdb_connected;
+
+extern atomic_t			kgdb_setting_breakpoint;
+extern atomic_t			kgdb_cpu_doing_single_step;
+
+extern struct task_struct	*kgdb_usethread;
+extern struct task_struct	*kgdb_contthread;
+
+enum kgdb_bptype {
+	BP_BREAKPOINT = 0,
+	BP_HARDWARE_BREAKPOINT,
+	BP_WRITE_WATCHPOINT,
+	BP_READ_WATCHPOINT,
+	BP_ACCESS_WATCHPOINT
+};
+
+enum kgdb_bpstate {
+	BP_UNDEFINED = 0,
+	BP_REMOVED,
+	BP_SET,
+	BP_ACTIVE
+};
+
+struct kgdb_bkpt {
+	unsigned long		bpt_addr;
+	unsigned char		saved_instr[BREAK_INSTR_SIZE];
+	enum kgdb_bptype	type;
+	enum kgdb_bpstate	state;
+};
+
+#ifndef KGDB_MAX_BREAKPOINTS
+# define KGDB_MAX_BREAKPOINTS	1000
+#endif
+
+#define KGDB_HW_BREAKPOINT	1
+
+/*
+ * Functions each KGDB-supporting architecture must provide:
+ */
+
+/**
+ *	kgdb_arch_init - Perform any architecture specific initalization.
+ *
+ *	This function will handle the initalization of any architecture
+ *	specific callbacks.
+ */
+extern int kgdb_arch_init(void);
+
+/**
+ *	kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ *	This function will handle the uninitalization of any architecture
+ *	specific callbacks, for dynamic registration and unregistration.
+ */
+extern void kgdb_arch_exit(void);
+
+/**
+ *	pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs
+ *	@gdb_regs: A pointer to hold the registers in the order GDB wants.
+ *	@regs: The &struct pt_regs of the current process.
+ *
+ *	Convert the pt_regs in @regs into the format for registers that
+ *	GDB expects, stored in @gdb_regs.
+ */
+extern void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs);
+
+/**
+ *	sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
+ *	@gdb_regs: A pointer to hold the registers in the order GDB wants.
+ *	@p: The &struct task_struct of the desired process.
+ *
+ *	Convert the register values of the sleeping process in @p to
+ *	the format that GDB expects.
+ *	This function is called when kgdb does not have access to the
+ *	&struct pt_regs and therefore it should fill the gdb registers
+ *	@gdb_regs with what has	been saved in &struct thread_struct
+ *	thread field during switch_to.
+ */
+extern void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p);
+
+/**
+ *	gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs.
+ *	@gdb_regs: A pointer to hold the registers we've received from GDB.
+ *	@regs: A pointer to a &struct pt_regs to hold these values in.
+ *
+ *	Convert the GDB regs in @gdb_regs into the pt_regs, and store them
+ *	in @regs.
+ */
+extern void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs);
+
+/**
+ *	kgdb_arch_handle_exception - Handle architecture specific GDB packets.
+ *	@vector: The error vector of the exception that happened.
+ *	@signo: The signal number of the exception that happened.
+ *	@err_code: The error code of the exception that happened.
+ *	@remcom_in_buffer: The buffer of the packet we have read.
+ *	@remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ *	@regs: The &struct pt_regs of the current process.
+ *
+ *	This function MUST handle the 'c' and 's' command packets,
+ *	as well packets to set / remove a hardware breakpoint, if used.
+ *	If there are additional packets which the hardware needs to handle,
+ *	they are handled here.  The code should return -1 if it wants to
+ *	process more packets, and a %0 or %1 if it wants to exit from the
+ *	kgdb callback.
+ */
+extern int
+kgdb_arch_handle_exception(int vector, int signo, int err_code,
+			   char *remcom_in_buffer,
+			   char *remcom_out_buffer,
+			   struct pt_regs *regs);
+
+/**
+ *	kgdb_roundup_cpus - Get other CPUs into a holding pattern
+ *	@flags: Current IRQ state
+ *
+ *	On SMP systems, we need to get the attention of the other CPUs
+ *	and get them be in a known state.  This should do what is needed
+ *	to get the other CPUs to call kgdb_wait(). Note that on some arches,
+ *	the NMI approach is not used for rounding up all the CPUs. For example,
+ *	in case of MIPS, smp_call_function() is used to roundup CPUs. In
+ *	this case, we have to make sure that interrupts are enabled before
+ *	calling smp_call_function(). The argument to this function is
+ *	the flags that will be used when restoring the interrupts. There is
+ *	local_irq_save() call before kgdb_roundup_cpus().
+ *
+ *	On non-SMP systems, this is not called.
+ */
+extern void kgdb_roundup_cpus(unsigned long flags);
+
+/* Optional functions. */
+extern int kgdb_validate_break_address(unsigned long addr);
+extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
+extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle);
+
+/**
+ * struct kgdb_arch - Describe architecture specific values.
+ * @gdb_bpt_instr: The instruction to trigger a breakpoint.
+ * @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT.
+ * @set_breakpoint: Allow an architecture to specify how to set a software
+ * breakpoint.
+ * @remove_breakpoint: Allow an architecture to specify how to remove a
+ * software breakpoint.
+ * @set_hw_breakpoint: Allow an architecture to specify how to set a hardware
+ * breakpoint.
+ * @remove_hw_breakpoint: Allow an architecture to specify how to remove a
+ * hardware breakpoint.
+ * @remove_all_hw_break: Allow an architecture to specify how to remove all
+ * hardware breakpoints.
+ * @correct_hw_break: Allow an architecture to specify how to correct the
+ * hardware debug registers.
+ */
+struct kgdb_arch {
+	unsigned char		gdb_bpt_instr[BREAK_INSTR_SIZE];
+	unsigned long		flags;
+
+	int	(*set_breakpoint)(unsigned long, char *);
+	int	(*remove_breakpoint)(unsigned long, char *);
+	int	(*set_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
+	int	(*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
+	void	(*remove_all_hw_break)(void);
+	void	(*correct_hw_break)(void);
+};
+
+/**
+ * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
+ * @name: Name of the I/O driver.
+ * @read_char: Pointer to a function that will return one char.
+ * @write_char: Pointer to a function that will write one char.
+ * @flush: Pointer to a function that will flush any pending writes.
+ * @init: Pointer to a function that will initialize the device.
+ * @pre_exception: Pointer to a function that will do any prep work for
+ * the I/O driver.
+ * @post_exception: Pointer to a function that will do any cleanup work
+ * for the I/O driver.
+ */
+struct kgdb_io {
+	const char		*name;
+	int			(*read_char) (void);
+	void			(*write_char) (u8);
+	void			(*flush) (void);
+	int			(*init) (void);
+	void			(*pre_exception) (void);
+	void			(*post_exception) (void);
+};
+
+extern struct kgdb_arch		arch_kgdb_ops;
+
+extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
+extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
+
+extern int kgdb_hex2long(char **ptr, long *long_val);
+extern int kgdb_mem2hex(char *mem, char *buf, int count);
+extern int kgdb_hex2mem(char *buf, char *mem, int count);
+
+extern int kgdb_isremovedbreak(unsigned long addr);
+
+extern int
+kgdb_handle_exception(int ex_vector, int signo, int err_code,
+		      struct pt_regs *regs);
+extern int kgdb_nmicallback(int cpu, void *regs);
+
+extern int			kgdb_single_step;
+extern atomic_t			kgdb_active;
+
+#endif /* _KGDB_H_ */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 37ee881..165734a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -122,6 +122,8 @@
 
 	ATAPI_MAX_DRAIN		= 16 << 10,
 
+	ATA_ALL_DEVICES		= (1 << ATA_MAX_DEVICES) - 1,
+
 	ATA_SHT_EMULATED	= 1,
 	ATA_SHT_CMD_PER_LUN	= 1,
 	ATA_SHT_THIS_ID		= -1,
@@ -163,9 +165,6 @@
 	ATA_DEV_NONE		= 9,	/* no device */
 
 	/* struct ata_link flags */
-	ATA_LFLAG_HRST_TO_RESUME = (1 << 0), /* hardreset to resume link */
-	ATA_LFLAG_SKIP_D2H_BSY	= (1 << 1), /* can't wait for the first D2H
-					     * Register FIS clearing BSY */
 	ATA_LFLAG_NO_SRST	= (1 << 2), /* avoid softreset */
 	ATA_LFLAG_ASSUME_ATA	= (1 << 3), /* assume ATA class */
 	ATA_LFLAG_ASSUME_SEMB	= (1 << 4), /* assume SEMB class */
@@ -225,6 +224,7 @@
 	ATA_QCFLAG_RESULT_TF	= (1 << 4), /* result TF requested */
 	ATA_QCFLAG_CLEAR_EXCL	= (1 << 5), /* clear excl_link on completion */
 	ATA_QCFLAG_QUIET	= (1 << 6), /* don't report device error */
+	ATA_QCFLAG_RETRY	= (1 << 7), /* retry after failure */
 
 	ATA_QCFLAG_FAILED	= (1 << 16), /* cmd failed and is owned by EH */
 	ATA_QCFLAG_SENSE_VALID	= (1 << 17), /* sense data valid */
@@ -249,6 +249,25 @@
 	 */
 	ATA_TMOUT_FF_WAIT	= 4 * HZ / 5,
 
+	/* Spec mandates to wait for ">= 2ms" before checking status
+	 * after reset.  We wait 150ms, because that was the magic
+	 * delay used for ATAPI devices in Hale Landis's ATADRVR, for
+	 * the period of time between when the ATA command register is
+	 * written, and then status is checked.  Because waiting for
+	 * "a while" before checking status is fine, post SRST, we
+	 * perform this magic delay here as well.
+	 *
+	 * Old drivers/ide uses the 2mS rule and then waits for ready.
+	 */
+	ATA_WAIT_AFTER_RESET_MSECS = 150,
+
+	/* If PMP is supported, we have to do follow-up SRST.  As some
+	 * PMPs don't send D2H Reg FIS after hardreset, LLDs are
+	 * advised to wait only for the following duration before
+	 * doing SRST.
+	 */
+	ATA_TMOUT_PMP_SRST_WAIT	= 1 * HZ,
+
 	/* ATA bus states */
 	BUS_UNKNOWN		= 0,
 	BUS_DMA			= 1,
@@ -292,17 +311,16 @@
 
 	/* reset / recovery action types */
 	ATA_EH_REVALIDATE	= (1 << 0),
-	ATA_EH_SOFTRESET	= (1 << 1),
-	ATA_EH_HARDRESET	= (1 << 2),
+	ATA_EH_SOFTRESET	= (1 << 1), /* meaningful only in ->prereset */
+	ATA_EH_HARDRESET	= (1 << 2), /* meaningful only in ->prereset */
+	ATA_EH_RESET		= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
 	ATA_EH_ENABLE_LINK	= (1 << 3),
 	ATA_EH_LPM		= (1 << 4),  /* link power management action */
 
-	ATA_EH_RESET_MASK	= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
 	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE,
 
 	/* ata_eh_info->flags */
 	ATA_EHI_HOTPLUGGED	= (1 << 0),  /* could have been hotplugged */
-	ATA_EHI_RESUME_LINK	= (1 << 1),  /* resume link (reset modifier) */
 	ATA_EHI_NO_AUTOPSY	= (1 << 2),  /* no autopsy */
 	ATA_EHI_QUIET		= (1 << 3),  /* be quiet */
 
@@ -313,7 +331,6 @@
 	ATA_EHI_POST_SETMODE	= (1 << 20), /* revaildating after setmode */
 
 	ATA_EHI_DID_RESET	= ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
-	ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
 
 	/* max tries if error condition is still set after ->error_handler */
 	ATA_EH_MAX_TRIES	= 5,
@@ -352,6 +369,22 @@
 	ATAPI_READ_CD		= 2,		/* READ CD [MSF] */
 	ATAPI_PASS_THRU		= 3,		/* SAT pass-thru */
 	ATAPI_MISC		= 4,		/* the rest */
+
+	/* Timing constants */
+	ATA_TIMING_SETUP	= (1 << 0),
+	ATA_TIMING_ACT8B	= (1 << 1),
+	ATA_TIMING_REC8B	= (1 << 2),
+	ATA_TIMING_CYC8B	= (1 << 3),
+	ATA_TIMING_8BIT		= ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
+				  ATA_TIMING_CYC8B,
+	ATA_TIMING_ACTIVE	= (1 << 4),
+	ATA_TIMING_RECOVER	= (1 << 5),
+	ATA_TIMING_CYCLE	= (1 << 6),
+	ATA_TIMING_UDMA		= (1 << 7),
+	ATA_TIMING_ALL		= ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
+				  ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
+				  ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
+				  ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
 };
 
 enum ata_xfer_mask {
@@ -412,6 +445,7 @@
 };
 extern struct class_device_attribute class_device_attr_link_power_management_policy;
 
+#ifdef CONFIG_ATA_SFF
 struct ata_ioports {
 	void __iomem		*cmd_addr;
 	void __iomem		*data_addr;
@@ -429,6 +463,7 @@
 	void __iomem		*bmdma_addr;
 	void __iomem		*scr_addr;
 };
+#endif /* CONFIG_ATA_SFF */
 
 struct ata_host {
 	spinlock_t		lock;
@@ -436,7 +471,7 @@
 	void __iomem * const	*iomap;
 	unsigned int		n_ports;
 	void			*private_data;
-	const struct ata_port_operations *ops;
+	struct ata_port_operations *ops;
 	unsigned long		flags;
 #ifdef CONFIG_ATA_ACPI
 	acpi_handle		acpi_handle;
@@ -605,7 +640,7 @@
 
 struct ata_port {
 	struct Scsi_Host	*scsi_host; /* our co-allocated scsi host */
-	const struct ata_port_operations *ops;
+	struct ata_port_operations *ops;
 	spinlock_t		*lock;
 	unsigned long		flags;	/* ATA_FLAG_xxx */
 	unsigned int		pflags; /* ATA_PFLAG_xxx */
@@ -615,7 +650,9 @@
 	struct ata_prd		*prd;	 /* our SG list */
 	dma_addr_t		prd_dma; /* and its DMA mapping */
 
+#ifdef CONFIG_ATA_SFF
 	struct ata_ioports	ioaddr;	/* ATA cmd/ctl/dma register blocks */
+#endif /* CONFIG_ATA_SFF */
 
 	u8			ctl;	/* cache of ATA control register */
 	u8			last_ctl;	/* Cache last written value */
@@ -667,81 +704,108 @@
 	u8			sector_buf[ATA_SECT_SIZE]; /* owned by EH */
 };
 
+/* The following initializer overrides a method to NULL whether one of
+ * its parent has the method defined or not.  This is equivalent to
+ * ERR_PTR(-ENOENT).  Unfortunately, ERR_PTR doesn't render a constant
+ * expression and thus can't be used as an initializer.
+ */
+#define ATA_OP_NULL		(void *)(unsigned long)(-ENOENT)
+
 struct ata_port_operations {
-	void (*dev_config) (struct ata_device *);
-
-	void (*set_piomode) (struct ata_port *, struct ata_device *);
-	void (*set_dmamode) (struct ata_port *, struct ata_device *);
-	unsigned long (*mode_filter) (struct ata_device *, unsigned long);
-
-	void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
-	void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
-
-	void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
-	u8   (*check_status)(struct ata_port *ap);
-	u8   (*check_altstatus)(struct ata_port *ap);
-	void (*dev_select)(struct ata_port *ap, unsigned int device);
-
-	void (*phy_reset) (struct ata_port *ap); /* obsolete */
-	int  (*set_mode) (struct ata_link *link, struct ata_device **r_failed_dev);
-
-	int (*cable_detect) (struct ata_port *ap);
-
-	int  (*check_atapi_dma) (struct ata_queued_cmd *qc);
-
-	void (*bmdma_setup) (struct ata_queued_cmd *qc);
-	void (*bmdma_start) (struct ata_queued_cmd *qc);
-
-	unsigned int (*data_xfer) (struct ata_device *dev, unsigned char *buf,
-				   unsigned int buflen, int rw);
-
-	int (*qc_defer) (struct ata_queued_cmd *qc);
-	void (*qc_prep) (struct ata_queued_cmd *qc);
-	unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
-
-	/* port multiplier */
-	void (*pmp_attach) (struct ata_port *ap);
-	void (*pmp_detach) (struct ata_port *ap);
-
-	/* Error handlers.  ->error_handler overrides ->eng_timeout and
-	 * indicates that new-style EH is in place.
+	/*
+	 * Command execution
 	 */
-	void (*eng_timeout) (struct ata_port *ap); /* obsolete */
+	int  (*qc_defer)(struct ata_queued_cmd *qc);
+	int  (*check_atapi_dma)(struct ata_queued_cmd *qc);
+	void (*qc_prep)(struct ata_queued_cmd *qc);
+	unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
+	bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
 
-	void (*freeze) (struct ata_port *ap);
-	void (*thaw) (struct ata_port *ap);
-	void (*error_handler) (struct ata_port *ap);
-	void (*post_internal_cmd) (struct ata_queued_cmd *qc);
+	/*
+	 * Configuration and exception handling
+	 */
+	int  (*cable_detect)(struct ata_port *ap);
+	unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
+	void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
+	void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
+	int  (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
 
-	irq_handler_t irq_handler;
-	void (*irq_clear) (struct ata_port *);
-	u8 (*irq_on) (struct ata_port *);
+	void (*dev_config)(struct ata_device *dev);
 
-	int (*scr_read) (struct ata_port *ap, unsigned int sc_reg, u32 *val);
-	int (*scr_write) (struct ata_port *ap, unsigned int sc_reg, u32 val);
+	void (*freeze)(struct ata_port *ap);
+	void (*thaw)(struct ata_port *ap);
+	ata_prereset_fn_t	prereset;
+	ata_reset_fn_t		softreset;
+	ata_reset_fn_t		hardreset;
+	ata_postreset_fn_t	postreset;
+	ata_prereset_fn_t	pmp_prereset;
+	ata_reset_fn_t		pmp_softreset;
+	ata_reset_fn_t		pmp_hardreset;
+	ata_postreset_fn_t	pmp_postreset;
+	void (*error_handler)(struct ata_port *ap);
+	void (*post_internal_cmd)(struct ata_queued_cmd *qc);
 
-	int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
-	int (*port_resume) (struct ata_port *ap);
-	int (*enable_pm) (struct ata_port *ap, enum link_pm policy);
-	void (*disable_pm) (struct ata_port *ap);
-	int (*port_start) (struct ata_port *ap);
-	void (*port_stop) (struct ata_port *ap);
+	/*
+	 * Optional features
+	 */
+	int  (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+	int  (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val);
+	void (*pmp_attach)(struct ata_port *ap);
+	void (*pmp_detach)(struct ata_port *ap);
+	int  (*enable_pm)(struct ata_port *ap, enum link_pm policy);
+	void (*disable_pm)(struct ata_port *ap);
 
-	void (*host_stop) (struct ata_host *host);
+	/*
+	 * Start, stop, suspend and resume
+	 */
+	int  (*port_suspend)(struct ata_port *ap, pm_message_t mesg);
+	int  (*port_resume)(struct ata_port *ap);
+	int  (*port_start)(struct ata_port *ap);
+	void (*port_stop)(struct ata_port *ap);
+	void (*host_stop)(struct ata_host *host);
 
-	void (*bmdma_stop) (struct ata_queued_cmd *qc);
-	u8   (*bmdma_status) (struct ata_port *ap);
+#ifdef CONFIG_ATA_SFF
+	/*
+	 * SFF / taskfile oriented ops
+	 */
+	void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
+	u8   (*sff_check_status)(struct ata_port *ap);
+	u8   (*sff_check_altstatus)(struct ata_port *ap);
+	void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf);
+	void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf);
+	void (*sff_exec_command)(struct ata_port *ap,
+				 const struct ata_taskfile *tf);
+	unsigned int (*sff_data_xfer)(struct ata_device *dev,
+			unsigned char *buf, unsigned int buflen, int rw);
+	u8   (*sff_irq_on)(struct ata_port *);
+	void (*sff_irq_clear)(struct ata_port *);
+
+	void (*bmdma_setup)(struct ata_queued_cmd *qc);
+	void (*bmdma_start)(struct ata_queued_cmd *qc);
+	void (*bmdma_stop)(struct ata_queued_cmd *qc);
+	u8   (*bmdma_status)(struct ata_port *ap);
+#endif /* CONFIG_ATA_SFF */
+
+	/*
+	 * Obsolete
+	 */
+	void (*phy_reset)(struct ata_port *ap);
+	void (*eng_timeout)(struct ata_port *ap);
+
+	/*
+	 * ->inherits must be the last field and all the preceding
+	 * fields must be pointers.
+	 */
+	const struct ata_port_operations	*inherits;
 };
 
 struct ata_port_info {
-	struct scsi_host_template	*sht;
 	unsigned long		flags;
 	unsigned long		link_flags;
 	unsigned long		pio_mask;
 	unsigned long		mwdma_mask;
 	unsigned long		udma_mask;
-	const struct ata_port_operations *port_ops;
-	irq_handler_t		irq_handler;
+	struct ata_port_operations *port_ops;
 	void 			*private_data;
 };
 
@@ -759,11 +823,14 @@
 
 #define FIT(v, vmin, vmax)	max_t(short, min_t(short, v, vmax), vmin)
 
+/*
+ * Core layer - drivers/ata/libata-core.c
+ */
 extern const unsigned long sata_deb_timing_normal[];
 extern const unsigned long sata_deb_timing_hotplug[];
 extern const unsigned long sata_deb_timing_long[];
 
-extern const struct ata_port_operations ata_dummy_port_ops;
+extern struct ata_port_operations ata_dummy_port_ops;
 extern const struct ata_port_info ata_dummy_port_info;
 
 static inline const unsigned long *
@@ -782,22 +849,21 @@
 
 extern void sata_print_link_status(struct ata_link *link);
 extern void ata_port_probe(struct ata_port *);
-extern void ata_bus_reset(struct ata_port *ap);
 extern int sata_set_spd(struct ata_link *link);
+extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
+extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
+				int (*check_ready)(struct ata_link *link));
 extern int sata_link_debounce(struct ata_link *link,
 			const unsigned long *params, unsigned long deadline);
 extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
 			    unsigned long deadline);
-extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
-extern int ata_std_softreset(struct ata_link *link, unsigned int *classes,
-			     unsigned long deadline);
 extern int sata_link_hardreset(struct ata_link *link,
-			const unsigned long *timing, unsigned long deadline);
+			const unsigned long *timing, unsigned long deadline,
+			bool *online, int (*check_ready)(struct ata_link *));
 extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
 			      unsigned long deadline);
 extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
 extern void ata_port_disable(struct ata_port *);
-extern void ata_std_ports(struct ata_ioports *ioaddr);
 
 extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
 extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
@@ -810,7 +876,7 @@
 			     struct scsi_host_template *sht);
 extern void ata_host_detach(struct ata_host *host);
 extern void ata_host_init(struct ata_host *, struct device *,
-			  unsigned long, const struct ata_port_operations *);
+			  unsigned long, struct ata_port_operations *);
 extern int ata_scsi_detect(struct scsi_host_template *sht);
 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
@@ -823,7 +889,6 @@
 extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
 extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
 			    struct ata_port *ap);
-extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
 extern int sata_scr_valid(struct ata_link *link);
 extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
 extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
@@ -835,21 +900,9 @@
 extern void ata_host_resume(struct ata_host *host);
 #endif
 extern int ata_ratelimit(void);
-extern int ata_busy_sleep(struct ata_port *ap,
-			  unsigned long timeout_pat, unsigned long timeout);
-extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline);
-extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline);
 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
 			     unsigned long interval_msec,
 			     unsigned long timeout_msec);
-extern unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
-					 u8 *r_err);
-
-/*
- * Default driver ops implementations
- */
-extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
-extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 extern int atapi_cmd_type(u8 opcode);
 extern void ata_tf_to_fis(const struct ata_taskfile *tf,
 			  u8 pmp, int is_cmd, u8 *fis);
@@ -864,23 +917,9 @@
 extern int ata_xfer_mode2shift(unsigned long xfer_mode);
 extern const char *ata_mode_string(unsigned long xfer_mask);
 extern unsigned long ata_id_xfermask(const u16 *id);
-extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device);
-extern void ata_std_dev_select(struct ata_port *ap, unsigned int device);
-extern u8 ata_check_status(struct ata_port *ap);
-extern u8 ata_altstatus(struct ata_port *ap);
-extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
 extern int ata_port_start(struct ata_port *ap);
-extern int ata_sff_port_start(struct ata_port *ap);
-extern irqreturn_t ata_interrupt(int irq, void *dev_instance);
-extern unsigned int ata_data_xfer(struct ata_device *dev,
-			unsigned char *buf, unsigned int buflen, int rw);
-extern unsigned int ata_data_xfer_noirq(struct ata_device *dev,
-			unsigned char *buf, unsigned int buflen, int rw);
 extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc);
-extern void ata_qc_prep(struct ata_queued_cmd *qc);
 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
-extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
 		 unsigned int n_elem);
 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -889,24 +928,8 @@
 			  unsigned int ofs, unsigned int len);
 extern void ata_id_c_string(const u16 *id, unsigned char *s,
 			    unsigned int ofs, unsigned int len);
-extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
-extern void ata_bmdma_start(struct ata_queued_cmd *qc);
-extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
-extern u8   ata_bmdma_status(struct ata_port *ap);
-extern void ata_bmdma_irq_clear(struct ata_port *ap);
-extern void ata_bmdma_freeze(struct ata_port *ap);
-extern void ata_bmdma_thaw(struct ata_port *ap);
-extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
-			       ata_reset_fn_t softreset,
-			       ata_reset_fn_t hardreset,
-			       ata_postreset_fn_t postreset);
-extern void ata_bmdma_error_handler(struct ata_port *ap);
-extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
-extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
-			u8 status, int in_wq);
 extern void ata_qc_complete(struct ata_queued_cmd *qc);
-extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
-				    void (*finish_qc)(struct ata_queued_cmd *));
+extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active);
 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
 			      void (*done)(struct scsi_cmnd *));
 extern int ata_std_bios_param(struct scsi_device *sdev,
@@ -918,7 +941,6 @@
 				       int queue_depth);
 extern struct ata_device *ata_dev_pair(struct ata_device *adev);
 extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
-extern u8 ata_irq_on(struct ata_port *ap);
 
 extern int ata_cable_40wire(struct ata_port *ap);
 extern int ata_cable_80wire(struct ata_port *ap);
@@ -926,10 +948,7 @@
 extern int ata_cable_ignore(struct ata_port *ap);
 extern int ata_cable_unknown(struct ata_port *ap);
 
-/*
- * Timing helpers
- */
-
+/* Timing helpers */
 extern unsigned int ata_pio_need_iordy(const struct ata_device *);
 extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
 extern int ata_timing_compute(struct ata_device *, unsigned short,
@@ -939,24 +958,31 @@
 			     unsigned int);
 extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
 
-enum {
-	ATA_TIMING_SETUP	= (1 << 0),
-	ATA_TIMING_ACT8B	= (1 << 1),
-	ATA_TIMING_REC8B	= (1 << 2),
-	ATA_TIMING_CYC8B	= (1 << 3),
-	ATA_TIMING_8BIT		= ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
-				  ATA_TIMING_CYC8B,
-	ATA_TIMING_ACTIVE	= (1 << 4),
-	ATA_TIMING_RECOVER	= (1 << 5),
-	ATA_TIMING_CYCLE	= (1 << 6),
-	ATA_TIMING_UDMA		= (1 << 7),
-	ATA_TIMING_ALL		= ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
-				  ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
-				  ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
-				  ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
+/* PCI */
+#ifdef CONFIG_PCI
+struct pci_dev;
+
+struct pci_bits {
+	unsigned int		reg;	/* PCI config register to read */
+	unsigned int		width;	/* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
+	unsigned long		mask;
+	unsigned long		val;
 };
 
-/* libata-acpi.c */
+extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
+extern void ata_pci_remove_one(struct pci_dev *pdev);
+
+#ifdef CONFIG_PM
+extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
+extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
+extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+extern int ata_pci_device_resume(struct pci_dev *pdev);
+#endif /* CONFIG_PM */
+#endif /* CONFIG_PCI */
+
+/*
+ * ACPI - drivers/ata/libata-acpi.c
+ */
 #ifdef CONFIG_ATA_ACPI
 static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
 {
@@ -1000,56 +1026,8 @@
 }
 #endif
 
-#ifdef CONFIG_PCI
-struct pci_dev;
-
-extern int ata_pci_init_one(struct pci_dev *pdev,
-			     const struct ata_port_info * const * ppi);
-extern void ata_pci_remove_one(struct pci_dev *pdev);
-#ifdef CONFIG_PM
-extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
-extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
-extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
-extern int ata_pci_device_resume(struct pci_dev *pdev);
-#endif
-extern int ata_pci_clear_simplex(struct pci_dev *pdev);
-
-struct pci_bits {
-	unsigned int		reg;	/* PCI config register to read */
-	unsigned int		width;	/* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
-	unsigned long		mask;
-	unsigned long		val;
-};
-
-extern int ata_pci_init_sff_host(struct ata_host *host);
-extern int ata_pci_init_bmdma(struct ata_host *host);
-extern int ata_pci_prepare_sff_host(struct pci_dev *pdev,
-				    const struct ata_port_info * const * ppi,
-				    struct ata_host **r_host);
-extern int ata_pci_activate_sff_host(struct ata_host *host,
-				     irq_handler_t irq_handler,
-				     struct scsi_host_template *sht);
-extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
-extern unsigned long ata_pci_default_filter(struct ata_device *dev,
-					    unsigned long xfer_mask);
-#endif /* CONFIG_PCI */
-
 /*
- * PMP
- */
-extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc);
-extern int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline);
-extern int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class,
-				  unsigned long deadline);
-extern void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class);
-extern void sata_pmp_do_eh(struct ata_port *ap,
-		ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
-		ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
-		ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
-		ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset);
-
-/*
- * EH
+ * EH - drivers/ata/libata-eh.c
  */
 extern void ata_port_schedule_eh(struct ata_port *ap);
 extern int ata_link_abort(struct ata_link *link);
@@ -1066,6 +1044,92 @@
 extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
 		      ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
 		      ata_postreset_fn_t postreset);
+extern void ata_std_error_handler(struct ata_port *ap);
+
+/*
+ * Base operations to inherit from and initializers for sht
+ *
+ * Operations
+ *
+ * base  : Common to all libata drivers.
+ * sata  : SATA controllers w/ native interface.
+ * pmp   : SATA controllers w/ PMP support.
+ * sff   : SFF ATA controllers w/o BMDMA support.
+ * bmdma : SFF ATA controllers w/ BMDMA support.
+ *
+ * sht initializers
+ *
+ * BASE  : Common to all libata drivers.  The user must set
+ *	   sg_tablesize and dma_boundary.
+ * PIO   : SFF ATA controllers w/ only PIO support.
+ * BMDMA : SFF ATA controllers w/ BMDMA support.  sg_tablesize and
+ *	   dma_boundary are set to BMDMA limits.
+ * NCQ   : SATA controllers supporting NCQ.  The user must set
+ *	   sg_tablesize, dma_boundary and can_queue.
+ */
+extern const struct ata_port_operations ata_base_port_ops;
+extern const struct ata_port_operations sata_port_ops;
+
+#define ATA_BASE_SHT(drv_name)					\
+	.module			= THIS_MODULE,			\
+	.name			= drv_name,			\
+	.ioctl			= ata_scsi_ioctl,		\
+	.queuecommand		= ata_scsi_queuecmd,		\
+	.can_queue		= ATA_DEF_QUEUE,		\
+	.this_id		= ATA_SHT_THIS_ID,		\
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,		\
+	.emulated		= ATA_SHT_EMULATED,		\
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,	\
+	.proc_name		= drv_name,			\
+	.slave_configure	= ata_scsi_slave_config,	\
+	.slave_destroy		= ata_scsi_slave_destroy,	\
+	.bios_param		= ata_std_bios_param
+
+#define ATA_NCQ_SHT(drv_name)					\
+	ATA_BASE_SHT(drv_name),					\
+	.change_queue_depth	= ata_scsi_change_queue_depth
+
+/*
+ * PMP helpers
+ */
+#ifdef CONFIG_SATA_PMP
+static inline bool sata_pmp_supported(struct ata_port *ap)
+{
+	return ap->flags & ATA_FLAG_PMP;
+}
+
+static inline bool sata_pmp_attached(struct ata_port *ap)
+{
+	return ap->nr_pmp_links != 0;
+}
+
+static inline int ata_is_host_link(const struct ata_link *link)
+{
+	return link == &link->ap->link;
+}
+#else /* CONFIG_SATA_PMP */
+static inline bool sata_pmp_supported(struct ata_port *ap)
+{
+	return false;
+}
+
+static inline bool sata_pmp_attached(struct ata_port *ap)
+{
+	return false;
+}
+
+static inline int ata_is_host_link(const struct ata_link *link)
+{
+	return 1;
+}
+#endif /* CONFIG_SATA_PMP */
+
+static inline int sata_srst_pmp(struct ata_link *link)
+{
+	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
+		return SATA_PMP_CTRL_PORT;
+	return link->pmp;
+}
 
 /*
  * printk helpers
@@ -1074,7 +1138,7 @@
 	printk("%sata%u: "fmt, lv, (ap)->print_id , ##args)
 
 #define ata_link_printk(link, lv, fmt, args...) do { \
-	if ((link)->ap->nr_pmp_links) \
+	if (sata_pmp_attached((link)->ap)) \
 		printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id,	\
 		       (link)->pmp , ##args); \
 	else \
@@ -1094,18 +1158,11 @@
 	__attribute__ ((format (printf, 2, 3)));
 extern void ata_ehi_clear_desc(struct ata_eh_info *ehi);
 
-static inline void ata_ehi_schedule_probe(struct ata_eh_info *ehi)
-{
-	ehi->flags |= ATA_EHI_RESUME_LINK;
-	ehi->action |= ATA_EH_SOFTRESET;
-	ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
-}
-
 static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
 {
-	ata_ehi_schedule_probe(ehi);
+	ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
 	ehi->flags |= ATA_EHI_HOTPLUGGED;
-	ehi->action |= ATA_EH_ENABLE_LINK;
+	ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK;
 	ehi->err_mask |= AC_ERR_ATA_BUS;
 }
 
@@ -1126,7 +1183,7 @@
 
 static inline unsigned int ata_tag_internal(unsigned int tag)
 {
-	return tag == ATA_MAX_QUEUE - 1;
+	return tag == ATA_TAG_INTERNAL;
 }
 
 /*
@@ -1167,11 +1224,6 @@
 /*
  * link helpers
  */
-static inline int ata_is_host_link(const struct ata_link *link)
-{
-	return link == &link->ap->link;
-}
-
 static inline int ata_link_max_devices(const struct ata_link *link)
 {
 	if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS)
@@ -1186,7 +1238,7 @@
 
 static inline struct ata_link *ata_port_first_link(struct ata_port *ap)
 {
-	if (ap->nr_pmp_links)
+	if (sata_pmp_attached(ap))
 		return ap->pmp_link;
 	return &ap->link;
 }
@@ -1195,8 +1247,8 @@
 {
 	struct ata_port *ap = link->ap;
 
-	if (link == &ap->link) {
-		if (!ap->nr_pmp_links)
+	if (ata_is_host_link(link)) {
+		if (!sata_pmp_attached(ap))
 			return NULL;
 		return ap->pmp_link;
 	}
@@ -1222,11 +1274,6 @@
 	for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \
 	     (dev) >= (link)->device || ((dev) = NULL); (dev)--)
 
-static inline u8 ata_chk_status(struct ata_port *ap)
-{
-	return ap->ops->check_status(ap);
-}
-
 /**
  *	ata_ncq_enabled - Test whether NCQ is enabled
  *	@dev: ATA device to test for
@@ -1243,74 +1290,6 @@
 			      ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
 }
 
-/**
- *	ata_pause - Flush writes and pause 400 nanoseconds.
- *	@ap: Port to wait for.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-static inline void ata_pause(struct ata_port *ap)
-{
-	ata_altstatus(ap);
-	ndelay(400);
-}
-
-
-/**
- *	ata_busy_wait - Wait for a port status register
- *	@ap: Port to wait for.
- *	@bits: bits that must be clear
- *	@max: number of 10uS waits to perform
- *
- *	Waits up to max*10 microseconds for the selected bits in the port's
- *	status register to be cleared.
- *	Returns final value of status register.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
-			       unsigned int max)
-{
-	u8 status;
-
-	do {
-		udelay(10);
-		status = ata_chk_status(ap);
-		max--;
-	} while (status != 0xff && (status & bits) && (max > 0));
-
-	return status;
-}
-
-
-/**
- *	ata_wait_idle - Wait for a port to be idle.
- *	@ap: Port to wait for.
- *
- *	Waits up to 10ms for port's BUSY and DRQ signals to clear.
- *	Returns final value of status register.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-static inline u8 ata_wait_idle(struct ata_port *ap)
-{
-	u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
-
-#ifdef ATA_DEBUG
-	if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
-		ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
-				status);
-#endif
-
-	return status;
-}
-
 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
 {
 	qc->tf.ctl |= ATA_NIEN;
@@ -1403,4 +1382,171 @@
 	return *(struct ata_port **)&host->hostdata[0];
 }
 
+
+/**************************************************************************
+ * PMP - drivers/ata/libata-pmp.c
+ */
+#ifdef CONFIG_SATA_PMP
+
+extern const struct ata_port_operations sata_pmp_port_ops;
+
+extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc);
+extern void sata_pmp_error_handler(struct ata_port *ap);
+
+#else /* CONFIG_SATA_PMP */
+
+#define sata_pmp_port_ops		sata_port_ops
+#define sata_pmp_qc_defer_cmd_switch	ata_std_qc_defer
+#define sata_pmp_error_handler		ata_std_error_handler
+
+#endif /* CONFIG_SATA_PMP */
+
+
+/**************************************************************************
+ * SFF - drivers/ata/libata-sff.c
+ */
+#ifdef CONFIG_ATA_SFF
+
+extern const struct ata_port_operations ata_sff_port_ops;
+extern const struct ata_port_operations ata_bmdma_port_ops;
+
+/* PIO only, sg_tablesize and dma_boundary limits can be removed */
+#define ATA_PIO_SHT(drv_name)					\
+	ATA_BASE_SHT(drv_name),					\
+	.sg_tablesize		= LIBATA_MAX_PRD,		\
+	.dma_boundary		= ATA_DMA_BOUNDARY
+
+#define ATA_BMDMA_SHT(drv_name)					\
+	ATA_BASE_SHT(drv_name),					\
+	.sg_tablesize		= LIBATA_MAX_PRD,		\
+	.dma_boundary		= ATA_DMA_BOUNDARY
+
+extern void ata_sff_qc_prep(struct ata_queued_cmd *qc);
+extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc);
+extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
+extern u8 ata_sff_check_status(struct ata_port *ap);
+extern u8 ata_sff_altstatus(struct ata_port *ap);
+extern int ata_sff_busy_sleep(struct ata_port *ap,
+			      unsigned long timeout_pat, unsigned long timeout);
+extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline);
+extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
+extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_sff_exec_command(struct ata_port *ap,
+				 const struct ata_taskfile *tf);
+extern unsigned int ata_sff_data_xfer(struct ata_device *dev,
+			unsigned char *buf, unsigned int buflen, int rw);
+extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev,
+			unsigned char *buf, unsigned int buflen, int rw);
+extern u8 ata_sff_irq_on(struct ata_port *ap);
+extern void ata_sff_irq_clear(struct ata_port *ap);
+extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+			    u8 status, int in_wq);
+extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
+extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
+extern unsigned int ata_sff_host_intr(struct ata_port *ap,
+				      struct ata_queued_cmd *qc);
+extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
+extern void ata_sff_freeze(struct ata_port *ap);
+extern void ata_sff_thaw(struct ata_port *ap);
+extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline);
+extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
+					  u8 *r_err);
+extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
+				    unsigned long deadline);
+extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
+			     unsigned long deadline);
+extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
+			       unsigned long deadline);
+extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes);
+extern void ata_sff_error_handler(struct ata_port *ap);
+extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc);
+extern int ata_sff_port_start(struct ata_port *ap);
+extern void ata_sff_std_ports(struct ata_ioports *ioaddr);
+extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev,
+					   unsigned long xfer_mask);
+extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
+extern void ata_bmdma_start(struct ata_queued_cmd *qc);
+extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
+extern u8 ata_bmdma_status(struct ata_port *ap);
+extern void ata_bus_reset(struct ata_port *ap);
+
+#ifdef CONFIG_PCI
+extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev);
+extern int ata_pci_bmdma_init(struct ata_host *host);
+extern int ata_pci_sff_init_host(struct ata_host *host);
+extern int ata_pci_sff_prepare_host(struct pci_dev *pdev,
+				    const struct ata_port_info * const * ppi,
+				    struct ata_host **r_host);
+extern int ata_pci_sff_activate_host(struct ata_host *host,
+				     irq_handler_t irq_handler,
+				     struct scsi_host_template *sht);
+extern int ata_pci_sff_init_one(struct pci_dev *pdev,
+				const struct ata_port_info * const * ppi,
+				struct scsi_host_template *sht, void *host_priv);
+#endif /* CONFIG_PCI */
+
+/**
+ *	ata_sff_pause - Flush writes and pause 400 nanoseconds.
+ *	@ap: Port to wait for.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static inline void ata_sff_pause(struct ata_port *ap)
+{
+	ata_sff_altstatus(ap);
+	ndelay(400);
+}
+
+/**
+ *	ata_sff_busy_wait - Wait for a port status register
+ *	@ap: Port to wait for.
+ *	@bits: bits that must be clear
+ *	@max: number of 10uS waits to perform
+ *
+ *	Waits up to max*10 microseconds for the selected bits in the port's
+ *	status register to be cleared.
+ *	Returns final value of status register.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits,
+				   unsigned int max)
+{
+	u8 status;
+
+	do {
+		udelay(10);
+		status = ap->ops->sff_check_status(ap);
+		max--;
+	} while (status != 0xff && (status & bits) && (max > 0));
+
+	return status;
+}
+
+/**
+ *	ata_wait_idle - Wait for a port to be idle.
+ *	@ap: Port to wait for.
+ *
+ *	Waits up to 10ms for port's BUSY and DRQ signals to clear.
+ *	Returns final value of status register.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static inline u8 ata_wait_idle(struct ata_port *ap)
+{
+	u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+
+#ifdef ATA_DEBUG
+	if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
+		ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
+				status);
+#endif
+
+	return status;
+}
+#endif /* CONFIG_ATA_SFF */
+
 #endif /* __LINUX_LIBATA_H__ */
diff --git a/include/linux/lm_interface.h b/include/linux/lm_interface.h
index 1418fdc..f274997 100644
--- a/include/linux/lm_interface.h
+++ b/include/linux/lm_interface.h
@@ -21,9 +21,15 @@
  * modify the filesystem.  The lock module shouldn't assign a journal to the FS
  * mount.  It shouldn't send recovery callbacks to the FS mount.  If the node
  * dies or withdraws, all locks can be wiped immediately.
+ *
+ * LM_MFLAG_CONV_NODROP
+ * Do not allow the dlm to internally resolve conversion deadlocks by demoting
+ * the lock to unlocked and then reacquiring it in the requested mode. Instead,
+ * it should cancel the request and return LM_OUT_CONV_DEADLK.
  */
 
 #define LM_MFLAG_SPECTATOR	0x00000001
+#define LM_MFLAG_CONV_NODROP	0x00000002
 
 /*
  * lm_lockstruct flags
@@ -110,6 +116,9 @@
  *
  * LM_OUT_ASYNC
  * The result of the request will be returned in an LM_CB_ASYNC callback.
+ *
+ * LM_OUT_CONV_DEADLK
+ * The lock request was canceled do to a conversion deadlock.
  */
 
 #define LM_OUT_ST_MASK		0x00000003
@@ -117,6 +126,7 @@
 #define LM_OUT_CANCELED		0x00000008
 #define LM_OUT_ASYNC		0x00000080
 #define LM_OUT_ERROR		0x00000100
+#define LM_OUT_CONV_DEADLK	0x00000200
 
 /*
  * lm_callback_t types
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 7d1eaa9..77323a7 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -81,7 +81,7 @@
 	MLX4_CMD_SW2HW_CQ	 = 0x16,
 	MLX4_CMD_HW2SW_CQ	 = 0x17,
 	MLX4_CMD_QUERY_CQ	 = 0x18,
-	MLX4_CMD_RESIZE_CQ	 = 0x2c,
+	MLX4_CMD_MODIFY_CQ	 = 0x2c,
 
 	/* SRQ commands */
 	MLX4_CMD_SW2HW_SRQ	 = 0x35,
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 0181e0a..071cf96 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -45,11 +45,11 @@
 	u8			sl;
 	u8			reserved1;
 	__be16			rlid;
-	u32			reserved2;
+	__be32			ipoib_status;
 	__be32			byte_cnt;
 	__be16			wqe_index;
 	__be16			checksum;
-	u8			reserved3[3];
+	u8			reserved2[3];
 	u8			owner_sr_opcode;
 };
 
@@ -85,6 +85,16 @@
 	MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22,
 };
 
+enum {
+	MLX4_CQE_IPOIB_STATUS_IPV4			= 1 << 22,
+	MLX4_CQE_IPOIB_STATUS_IPV4F			= 1 << 23,
+	MLX4_CQE_IPOIB_STATUS_IPV6			= 1 << 24,
+	MLX4_CQE_IPOIB_STATUS_IPV4OPT			= 1 << 25,
+	MLX4_CQE_IPOIB_STATUS_TCP			= 1 << 26,
+	MLX4_CQE_IPOIB_STATUS_UDP			= 1 << 27,
+	MLX4_CQE_IPOIB_STATUS_IPOK			= 1 << 28,
+};
+
 static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
 			       void __iomem *uar_page,
 			       spinlock_t *doorbell_lock)
@@ -120,4 +130,9 @@
 	MLX4_CQ_DB_REQ_NOT		= 2 << 24
 };
 
+int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
+		   u16 count, u16 period);
+int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
+		   int entries, struct mlx4_mtt *mtt);
+
 #endif /* MLX4_CQ_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6cdf813..ff7df1a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -186,6 +186,7 @@
 	u32			flags;
 	u16			stat_rate_support;
 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
+	int			max_gso_sz;
 };
 
 struct mlx4_buf_list {
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 1b835ca..53c5fdb 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -48,8 +48,7 @@
 	void *			(*add)	 (struct mlx4_dev *dev);
 	void			(*remove)(struct mlx4_dev *dev, void *context);
 	void			(*event) (struct mlx4_dev *dev, void *context,
-					  enum mlx4_dev_event event, int subtype,
-					  int port);
+					  enum mlx4_dev_event event, int port);
 	struct list_head	list;
 };
 
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 09a2230..a5e43fe 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -158,10 +158,12 @@
 #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
 
 enum {
-	MLX4_WQE_CTRL_NEC	= 1 << 29,
-	MLX4_WQE_CTRL_FENCE	= 1 << 6,
-	MLX4_WQE_CTRL_CQ_UPDATE	= 3 << 2,
-	MLX4_WQE_CTRL_SOLICITED	= 1 << 1,
+	MLX4_WQE_CTRL_NEC		= 1 << 29,
+	MLX4_WQE_CTRL_FENCE		= 1 << 6,
+	MLX4_WQE_CTRL_CQ_UPDATE		= 3 << 2,
+	MLX4_WQE_CTRL_SOLICITED		= 1 << 1,
+	MLX4_WQE_CTRL_IP_CSUM		= 1 << 4,
+	MLX4_WQE_CTRL_TCP_UDP_CSUM	= 1 << 5,
 };
 
 struct mlx4_wqe_ctrl_seg {
@@ -217,6 +219,11 @@
 	__be32			reservd[2];
 };
 
+struct mlx4_lso_seg {
+	__be32			mss_hdr_size;
+	__be32			header[0];
+};
+
 struct mlx4_wqe_bind_seg {
 	__be32			flags1;
 	__be32			flags2;
diff --git a/include/linux/mtio.h b/include/linux/mtio.h
index 6f8d2d4..ef01d6a 100644
--- a/include/linux/mtio.h
+++ b/include/linux/mtio.h
@@ -192,6 +192,7 @@
 #define MT_ST_SCSI2LOGICAL      0x800
 #define MT_ST_SYSV              0x1000
 #define MT_ST_NOWAIT            0x2000
+#define MT_ST_SILI		0x4000
 
 /* The mode parameters to be controlled. Parameter chosen with bits 20-28 */
 #define MT_ST_CLEAR_DEFAULT	0xfffff
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 6e0393a..eb560d0 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -160,14 +160,18 @@
 
 
 #ifdef __KERNEL__
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
+#include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
 
 #include <linux/dqblk_xfs.h>
 #include <linux/dqblk_v1.h>
 #include <linux/dqblk_v2.h>
 
+#include <asm/atomic.h>
+
 extern spinlock_t dq_data_lock;
 
 /* Maximal numbers of writes for quota operation (insert/delete/update)
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index a3d567a..71fc813 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -213,6 +213,11 @@
 		     sg_alloc_fn *);
 int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
 
+size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+			   void *buf, size_t buflen);
+size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+			 void *buf, size_t buflen);
+
 /*
  * Maximum number of entries that will be allocated in one piece, if
  * a list larger than this is required then chaining will be utilized.
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
new file mode 100644
index 0000000..9cae64b
--- /dev/null
+++ b/include/linux/semaphore.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ * Author: Matthew Wilcox <willy@linux.intel.com>
+ *
+ * Distributed under the terms of the GNU GPL, version 2
+ *
+ * Please see kernel/semaphore.c for documentation of these functions
+ */
+#ifndef __LINUX_SEMAPHORE_H
+#define __LINUX_SEMAPHORE_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+/* Please don't access any members of this structure directly */
+struct semaphore {
+	spinlock_t		lock;
+	unsigned int		count;
+	struct list_head	wait_list;
+};
+
+#define __SEMAPHORE_INITIALIZER(name, n)				\
+{									\
+	.lock		= __SPIN_LOCK_UNLOCKED((name).lock),		\
+	.count		= n,						\
+	.wait_list	= LIST_HEAD_INIT((name).wait_list),		\
+}
+
+#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
+	struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
+
+#define DECLARE_MUTEX(name)	__DECLARE_SEMAPHORE_GENERIC(name, 1)
+
+static inline void sema_init(struct semaphore *sem, int val)
+{
+	static struct lock_class_key __key;
+	*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
+	lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
+}
+
+#define init_MUTEX(sem)		sema_init(sem, 1)
+#define init_MUTEX_LOCKED(sem)	sema_init(sem, 0)
+
+extern void down(struct semaphore *sem);
+extern int __must_check down_interruptible(struct semaphore *sem);
+extern int __must_check down_killable(struct semaphore *sem);
+extern int __must_check down_trylock(struct semaphore *sem);
+extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
+extern void up(struct semaphore *sem);
+
+#endif /* __LINUX_SEMAPHORE_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 289942f..7cb094a8 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -213,6 +213,10 @@
 	void		(*config_port)(struct uart_port *, int);
 	int		(*verify_port)(struct uart_port *, struct serial_struct *);
 	int		(*ioctl)(struct uart_port *, unsigned int, unsigned long);
+#ifdef CONFIG_CONSOLE_POLL
+	void	(*poll_put_char)(struct uart_port *, unsigned char);
+	int		(*poll_get_char)(struct uart_port *);
+#endif
 };
 
 #define UART_CONFIG_TYPE	(1 << 0)
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index b00c1c7..79d59c9 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -45,9 +45,9 @@
 struct kmem_cache_node {
 	spinlock_t list_lock;	/* Protect partial list and nr_partial */
 	unsigned long nr_partial;
-	atomic_long_t nr_slabs;
 	struct list_head partial;
 #ifdef CONFIG_SLUB_DEBUG
+	atomic_long_t nr_slabs;
 	struct list_head full;
 #endif
 };
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 1129ee0..d311a09 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -296,43 +296,6 @@
 })
 
 /*
- * Locks two spinlocks l1 and l2.
- * l1_first indicates if spinlock l1 should be taken first.
- */
-static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2,
-				    bool l1_first)
-	__acquires(l1)
-	__acquires(l2)
-{
-	if (l1_first) {
-		spin_lock(l1);
-		spin_lock(l2);
-	} else {
-		spin_lock(l2);
-		spin_lock(l1);
-	}
-}
-
-/*
- * Unlocks two spinlocks l1 and l2.
- * l1_taken_first indicates if spinlock l1 was taken first and therefore
- * should be released after spinlock l2.
- */
-static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2,
-				      bool l1_taken_first)
-	__releases(l1)
-	__releases(l2)
-{
-	if (l1_taken_first) {
-		spin_unlock(l2);
-		spin_unlock(l1);
-	} else {
-		spin_unlock(l1);
-		spin_unlock(l2);
-	}
-}
-
-/*
  * Pull the atomic_t declaration:
  * (asm-mips/atomic.h needs above definitions)
  */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 421323e..accd7ba 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,6 +9,9 @@
 
 #include <linux/types.h>
 
+struct timespec;
+struct compat_timespec;
+
 /*
  * System call restart block.
  */
@@ -26,6 +29,15 @@
 			u32 bitset;
 			u64 time;
 		} futex;
+		/* For nanosleep */
+		struct {
+			clockid_t index;
+			struct timespec __user *rmtp;
+#ifdef CONFIG_COMPAT
+			struct compat_timespec __user *compat_rmtp;
+#endif
+			u64 expires;
+		} nanosleep;
 	};
 };
 
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
index 1d6cc22..6696cf7 100644
--- a/include/linux/transport_class.h
+++ b/include/linux/transport_class.h
@@ -86,9 +86,10 @@
 	return attribute_container_register(&tc->ac);
 }
 
-static inline int transport_container_unregister(struct transport_container *tc)
+static inline void transport_container_unregister(struct transport_container *tc)
 {
-	return attribute_container_unregister(&tc->ac);
+	if (unlikely(attribute_container_unregister(&tc->ac)))
+		BUG();
 }
 
 int transport_class_register(struct transport_class *);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 85c95cd..21f69ac 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -125,6 +125,7 @@
 #include <linux/cdev.h>
 
 struct tty_struct;
+struct tty_driver;
 
 struct tty_operations {
 	int  (*open)(struct tty_struct * tty, struct file * filp);
@@ -157,6 +158,11 @@
 	int (*tiocmget)(struct tty_struct *tty, struct file *file);
 	int (*tiocmset)(struct tty_struct *tty, struct file *file,
 			unsigned int set, unsigned int clear);
+#ifdef CONFIG_CONSOLE_POLL
+	int (*poll_init)(struct tty_driver *driver, int line, char *options);
+	int (*poll_get_char)(struct tty_driver *driver, int line);
+	void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
+#endif
 };
 
 struct tty_driver {
@@ -220,6 +226,11 @@
 	int (*tiocmget)(struct tty_struct *tty, struct file *file);
 	int (*tiocmset)(struct tty_struct *tty, struct file *file,
 			unsigned int set, unsigned int clear);
+#ifdef CONFIG_CONSOLE_POLL
+	int (*poll_init)(struct tty_driver *driver, int line, char *options);
+	int (*poll_get_char)(struct tty_driver *driver, int line);
+	void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
+#endif
 
 	struct list_head tty_drivers;
 };
@@ -230,6 +241,7 @@
 void put_tty_driver(struct tty_driver *driver);
 void tty_set_operations(struct tty_driver *driver,
 			const struct tty_operations *op);
+extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
 
 /* tty driver magic number */
 #define TTY_DRIVER_MAGIC		0x5402
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 975c963..fec6dec 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -84,4 +84,26 @@
 		ret;					\
 	})
 
+/*
+ * probe_kernel_read(): safely attempt to read from a location
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from
+ * @size: size of the data chunk
+ *
+ * Safely read from address @src to the buffer at @dst.  If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long probe_kernel_read(void *dst, void *src, size_t size);
+
+/*
+ * probe_kernel_write(): safely attempt to write to a location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src.  If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long probe_kernel_write(void *dst, void *src, size_t size);
+
 #endif		/* __LINUX_UACCESS_H__ */
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 64a721f..8d65bf0 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -533,7 +533,10 @@
 	__u32 num_sge;
 	__u32 opcode;
 	__u32 send_flags;
-	__u32 imm_data;
+	union {
+		__u32 imm_data;
+		__u32 invalidate_rkey;
+	} ex;
 	union {
 		struct {
 			__u64 remote_addr;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 701e7b4..95bf4ba 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -94,7 +94,7 @@
 	IB_DEVICE_SRQ_RESIZE		= (1<<13),
 	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
 	IB_DEVICE_ZERO_STAG		= (1<<15),
-	IB_DEVICE_SEND_W_INV		= (1<<16),
+	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
 	IB_DEVICE_MEM_WINDOW		= (1<<17),
 	/*
 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
@@ -104,6 +104,8 @@
 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 	 */
 	IB_DEVICE_UD_IP_CSUM		= (1<<18),
+	IB_DEVICE_UD_TSO		= (1<<19),
+	IB_DEVICE_SEND_W_INV		= (1<<21),
 };
 
 enum ib_atomic_cap {
@@ -411,6 +413,7 @@
 	IB_WC_COMP_SWAP,
 	IB_WC_FETCH_ADD,
 	IB_WC_BIND_MW,
+	IB_WC_LSO,
 /*
  * Set value of IB_WC_RECV so consumers can test if a completion is a
  * receive by testing (opcode & IB_WC_RECV).
@@ -495,6 +498,10 @@
 	IB_QPT_RAW_ETY
 };
 
+enum ib_qp_create_flags {
+	IB_QP_CREATE_IPOIB_UD_LSO	= 1 << 0,
+};
+
 struct ib_qp_init_attr {
 	void                  (*event_handler)(struct ib_event *, void *);
 	void		       *qp_context;
@@ -504,6 +511,7 @@
 	struct ib_qp_cap	cap;
 	enum ib_sig_type	sq_sig_type;
 	enum ib_qp_type		qp_type;
+	enum ib_qp_create_flags	create_flags;
 	u8			port_num; /* special QP types only */
 };
 
@@ -617,7 +625,9 @@
 	IB_WR_SEND_WITH_IMM,
 	IB_WR_RDMA_READ,
 	IB_WR_ATOMIC_CMP_AND_SWP,
-	IB_WR_ATOMIC_FETCH_AND_ADD
+	IB_WR_ATOMIC_FETCH_AND_ADD,
+	IB_WR_LSO,
+	IB_WR_SEND_WITH_INV,
 };
 
 enum ib_send_flags {
@@ -641,7 +651,10 @@
 	int			num_sge;
 	enum ib_wr_opcode	opcode;
 	int			send_flags;
-	__be32			imm_data;
+	union {
+		__be32		imm_data;
+		u32		invalidate_rkey;
+	} ex;
 	union {
 		struct {
 			u64	remote_addr;
@@ -655,6 +668,9 @@
 		} atomic;
 		struct {
 			struct ib_ah *ah;
+			void   *header;
+			int     hlen;
+			int     mss;
 			u32	remote_qpn;
 			u32	remote_qkey;
 			u16	pkey_index; /* valid for GSI only */
@@ -730,7 +746,7 @@
 	struct ib_ucontext     *context;	/* associated user context */
 	void		       *object;		/* containing object */
 	struct list_head	list;		/* link to context's list */
-	u32			id;		/* index into kernel idr */
+	int			id;		/* index into kernel idr */
 	struct kref		ref;
 	struct rw_semaphore	mutex;		/* protects .live */
 	int			live;
@@ -971,6 +987,8 @@
 						int comp_vector,
 						struct ib_ucontext *context,
 						struct ib_udata *udata);
+	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
+						u16 cq_period);
 	int                        (*destroy_cq)(struct ib_cq *cq);
 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
 						struct ib_udata *udata);
@@ -1376,6 +1394,15 @@
 int ib_resize_cq(struct ib_cq *cq, int cqe);
 
 /**
+ * ib_modify_cq - Modifies moderation params of the CQ
+ * @cq: The CQ to modify.
+ * @cq_count: number of CQEs that will trigger an event
+ * @cq_period: max period of time in usec before triggering an event
+ *
+ */
+int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+
+/**
  * ib_destroy_cq - Destroys the specified CQ.
  * @cq: The CQ to destroy.
  */
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index 5ffec8a..e0593bf 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -112,6 +112,7 @@
 
 #define ISCSI_AHSTYPE_CDB		1
 #define ISCSI_AHSTYPE_RLENGTH		2
+#define ISCSI_CDB_SIZE			16
 
 /* iSCSI PDU Header */
 struct iscsi_cmd {
@@ -125,7 +126,7 @@
 	__be32 data_length;
 	__be32 cmdsn;
 	__be32 exp_statsn;
-	uint8_t cdb[16];	/* SCSI Command Block */
+	uint8_t cdb[ISCSI_CDB_SIZE];	/* SCSI Command Block */
 	/* Additional Data (Command Dependent) */
 };
 
@@ -154,7 +155,8 @@
 	__be16 ahslength;	/* CDB length - 15, including reserved byte */
 	uint8_t ahstype;
 	uint8_t reserved;
-	uint8_t ecdb[260 - 16];	/* 4-byte aligned extended CDB spillover */
+	/* 4-byte aligned extended CDB spillover */
+	uint8_t ecdb[260 - ISCSI_CDB_SIZE];
 };
 
 /* SCSI Response Header */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 39e1cac..98724ba 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -677,4 +677,6 @@
 				  struct ssp_response_iu *iu);
 struct sas_phy *sas_find_local_phy(struct domain_device *dev);
 
+int sas_request_addr(struct Scsi_Host *shost, u8 *addr);
+
 #endif /* _SASLIB_H_ */
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index dd5edc9..c583193 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -47,12 +47,12 @@
 {
 	return 0;
 }
-int sas_ata_init_host_and_port(struct domain_device *found_dev,
+static inline int sas_ata_init_host_and_port(struct domain_device *found_dev,
 			       struct scsi_target *starget)
 {
 	return 0;
 }
-void sas_ata_task_abort(struct sas_task *task)
+static inline void sas_ata_task_abort(struct sas_task *task)
 {
 }
 #endif
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index de28aab..8d20e60 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -130,6 +130,9 @@
 extern int scsi_dma_map(struct scsi_cmnd *cmd);
 extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
 
+struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask);
+void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd);
+
 static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
 {
 	return cmd->sdb.table.nents;
@@ -175,4 +178,18 @@
 	return &cmd->sdb;
 }
 
+static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd,
+					   void *buf, int buflen)
+{
+	return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+				   buf, buflen);
+}
+
+static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd,
+					 void *buf, int buflen)
+{
+	return sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+				 buf, buflen);
+}
+
 #endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 25071d5..d3a133b 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -57,13 +57,16 @@
 
 extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
 				   u64 * info_out);
- 
+
+extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
+
 /*
  * Reset request from external source
  */
 #define SCSI_TRY_RESET_DEVICE	1
 #define SCSI_TRY_RESET_BUS	2
 #define SCSI_TRY_RESET_HOST	3
+#define SCSI_TRY_RESET_TARGET	4
 
 extern int scsi_reset_provider(struct scsi_device *, int);
 
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 530ff4c..4913286 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -172,6 +172,7 @@
 	 */
 	int (* eh_abort_handler)(struct scsi_cmnd *);
 	int (* eh_device_reset_handler)(struct scsi_cmnd *);
+	int (* eh_target_reset_handler)(struct scsi_cmnd *);
 	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
 	int (* eh_host_reset_handler)(struct scsi_cmnd *);
 
diff --git a/init/Kconfig b/init/Kconfig
index a97924b..7fccf09 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -763,7 +763,7 @@
 config SLABINFO
 	bool
 	depends on PROC_FS
-	depends on SLAB || SLUB
+	depends on SLAB || SLUB_DEBUG
 	default y
 
 config RT_MUTEXES
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c584c5..6c5f081 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,7 +8,7 @@
 	    signal.o sys.o kmod.o workqueue.o pid.o \
 	    rcupdate.o extable.o params.o posix-timers.o \
 	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
-	    hrtimer.o rwsem.o nsproxy.o srcu.o \
+	    hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
 	    notifier.o ksysfs.o pm_qos_params.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_check.o
@@ -53,6 +53,7 @@
 obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
 obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
 obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_KGDB) += kgdb.o
 obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
 obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
 obj-$(CONFIG_SECCOMP) += seccomp.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2727f92..6d8de05 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1722,7 +1722,12 @@
 	use_task_css_set_links = 1;
 	do_each_thread(g, p) {
 		task_lock(p);
-		if (list_empty(&p->cg_list))
+		/*
+		 * We should check if the process is exiting, otherwise
+		 * it will race with cgroup_exit() in that the list
+		 * entry won't be deleted though the process has exited.
+		 */
+		if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
 			list_add(&p->cg_list, &p->cgroups->tasks);
 		task_unlock(p);
 	} while_each_thread(g, p);
diff --git a/kernel/compat.c b/kernel/compat.c
index 5f0e201..9c48abf 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -47,15 +47,14 @@
 	mm_segment_t oldfs;
 	long ret;
 
-	rmtp = (struct compat_timespec __user *)(restart->arg1);
-	restart->arg1 = (unsigned long)&rmt;
+	restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	ret = hrtimer_nanosleep_restart(restart);
 	set_fs(oldfs);
 
 	if (ret) {
-		restart->arg1 = (unsigned long)rmtp;
+		rmtp = restart->nanosleep.compat_rmtp;
 
 		if (rmtp && put_compat_timespec(&rmt, rmtp))
 			return -EFAULT;
@@ -89,7 +88,7 @@
 			= &current_thread_info()->restart_block;
 
 		restart->fn = compat_nanosleep_restart;
-		restart->arg1 = (unsigned long)rmtp;
+		restart->nanosleep.compat_rmtp = rmtp;
 
 		if (rmtp && put_compat_timespec(&rmt, rmtp))
 			return -EFAULT;
@@ -607,9 +606,9 @@
 	long err;
 	mm_segment_t oldfs;
 	struct timespec tu;
-	struct compat_timespec *rmtp = (struct compat_timespec *)(restart->arg1);
+	struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
 
-	restart->arg1 = (unsigned long) &tu;
+	restart->nanosleep.rmtp = (struct timespec __user *) &tu;
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	err = clock_nanosleep_restart(restart);
@@ -621,7 +620,7 @@
 
 	if (err == -ERESTART_RESTARTBLOCK) {
 		restart->fn = compat_clock_nanosleep_restart;
-		restart->arg1 = (unsigned long) rmtp;
+		restart->nanosleep.compat_rmtp = rmtp;
 	}
 	return err;
 }
@@ -652,7 +651,7 @@
 	if (err == -ERESTART_RESTARTBLOCK) {
 		restart = &current_thread_info()->restart_block;
 		restart->fn = compat_clock_nanosleep_restart;
-		restart->arg1 = (unsigned long) rmtp;
+		restart->nanosleep.compat_rmtp = rmtp;
 	}
 	return err;
 }
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 98bee01..c642ef7 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1354,13 +1354,13 @@
 	struct hrtimer_sleeper t;
 	struct timespec __user  *rmtp;
 
-	hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS);
-	t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2;
+	hrtimer_init(&t.timer, restart->nanosleep.index, HRTIMER_MODE_ABS);
+	t.timer.expires.tv64 = restart->nanosleep.expires;
 
 	if (do_nanosleep(&t, HRTIMER_MODE_ABS))
 		return 0;
 
-	rmtp = (struct timespec __user *)restart->arg1;
+	rmtp = restart->nanosleep.rmtp;
 	if (rmtp) {
 		int ret = update_rmtp(&t.timer, rmtp);
 		if (ret <= 0)
@@ -1394,10 +1394,9 @@
 
 	restart = &current_thread_info()->restart_block;
 	restart->fn = hrtimer_nanosleep_restart;
-	restart->arg0 = (unsigned long) t.timer.base->index;
-	restart->arg1 = (unsigned long) rmtp;
-	restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF;
-	restart->arg3 = t.timer.expires.tv64 >> 32;
+	restart->nanosleep.index = t.timer.base->index;
+	restart->nanosleep.rmtp = rmtp;
+	restart->nanosleep.expires = t.timer.expires.tv64;
 
 	return -ERESTART_RESTARTBLOCK;
 }
@@ -1425,7 +1424,6 @@
 	int i;
 
 	spin_lock_init(&cpu_base->lock);
-	lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key);
 
 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
 		cpu_base->clock_base[i].cpu_base = cpu_base;
@@ -1466,16 +1464,16 @@
 	tick_cancel_sched_timer(cpu);
 
 	local_irq_disable();
-	double_spin_lock(&new_base->lock, &old_base->lock,
-			 smp_processor_id() < cpu);
+	spin_lock(&new_base->lock);
+	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
 		migrate_hrtimer_list(&old_base->clock_base[i],
 				     &new_base->clock_base[i]);
 	}
 
-	double_spin_unlock(&new_base->lock, &old_base->lock,
-			   smp_processor_id() < cpu);
+	spin_unlock(&old_base->lock);
+	spin_unlock(&new_base->lock);
 	local_irq_enable();
 	put_cpu_var(hrtimer_bases);
 }
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
new file mode 100644
index 0000000..1bd0ec1
--- /dev/null
+++ b/kernel/kgdb.c
@@ -0,0 +1,1700 @@
+/*
+ * KGDB stub.
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (C) 2000-2001 VERITAS Software Corporation.
+ * Copyright (C) 2002-2004 Timesys Corporation
+ * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
+ * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
+ * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
+ * Copyright (C) 2005-2008 Wind River Systems, Inc.
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Contributors at various stages not listed above:
+ *  Jason Wessel ( jason.wessel@windriver.com )
+ *  George Anzinger <george@mvista.com>
+ *  Anurekh Saxena (anurekh.saxena@timesys.com)
+ *  Lake Stevens Instrument Division (Glenn Engel)
+ *  Jim Kingdon, Cygnus Support.
+ *
+ * Original KGDB stub: David Grothe <dave@gcom.com>,
+ * Tigran Aivazian <tigran@sco.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#include <linux/pid_namespace.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/console.h>
+#include <linux/threads.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/reboot.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/init.h>
+#include <linux/kgdb.h>
+#include <linux/pid.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+static int kgdb_break_asap;
+
+struct kgdb_state {
+	int			ex_vector;
+	int			signo;
+	int			err_code;
+	int			cpu;
+	int			pass_exception;
+	long			threadid;
+	long			kgdb_usethreadid;
+	struct pt_regs		*linux_regs;
+};
+
+static struct debuggerinfo_struct {
+	void			*debuggerinfo;
+	struct task_struct	*task;
+} kgdb_info[NR_CPUS];
+
+/**
+ * kgdb_connected - Is a host GDB connected to us?
+ */
+int				kgdb_connected;
+EXPORT_SYMBOL_GPL(kgdb_connected);
+
+/* All the KGDB handlers are installed */
+static int			kgdb_io_module_registered;
+
+/* Guard for recursive entry */
+static int			exception_level;
+
+static struct kgdb_io		*kgdb_io_ops;
+static DEFINE_SPINLOCK(kgdb_registration_lock);
+
+/* kgdb console driver is loaded */
+static int kgdb_con_registered;
+/* determine if kgdb console output should be used */
+static int kgdb_use_con;
+
+static int __init opt_kgdb_con(char *str)
+{
+	kgdb_use_con = 1;
+	return 0;
+}
+
+early_param("kgdbcon", opt_kgdb_con);
+
+module_param(kgdb_use_con, int, 0644);
+
+/*
+ * Holds information about breakpoints in a kernel. These breakpoints are
+ * added and removed by gdb.
+ */
+static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
+	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
+};
+
+/*
+ * The CPU# of the active CPU, or -1 if none:
+ */
+atomic_t			kgdb_active = ATOMIC_INIT(-1);
+
+/*
+ * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
+ * bootup code (which might not have percpu set up yet):
+ */
+static atomic_t			passive_cpu_wait[NR_CPUS];
+static atomic_t			cpu_in_kgdb[NR_CPUS];
+atomic_t			kgdb_setting_breakpoint;
+
+struct task_struct		*kgdb_usethread;
+struct task_struct		*kgdb_contthread;
+
+int				kgdb_single_step;
+
+/* Our I/O buffers. */
+static char			remcom_in_buffer[BUFMAX];
+static char			remcom_out_buffer[BUFMAX];
+
+/* Storage for the registers, in GDB format. */
+static unsigned long		gdb_regs[(NUMREGBYTES +
+					sizeof(unsigned long) - 1) /
+					sizeof(unsigned long)];
+
+/* to keep track of the CPU which is doing the single stepping*/
+atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+
+/*
+ * If you are debugging a problem where roundup (the collection of
+ * all other CPUs) is a problem [this should be extremely rare],
+ * then use the nokgdbroundup option to avoid roundup. In that case
+ * the other CPUs might interfere with your debugging context, so
+ * use this with care:
+ */
+int				kgdb_do_roundup = 1;
+
+static int __init opt_nokgdbroundup(char *str)
+{
+	kgdb_do_roundup = 0;
+
+	return 0;
+}
+
+early_param("nokgdbroundup", opt_nokgdbroundup);
+
+/*
+ * Finally, some KGDB code :-)
+ */
+
+/*
+ * Weak aliases for breakpoint management,
+ * can be overriden by architectures when needed:
+ */
+int __weak kgdb_validate_break_address(unsigned long addr)
+{
+	char tmp_variable[BREAK_INSTR_SIZE];
+
+	return probe_kernel_read(tmp_variable, (char *)addr, BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
+{
+	int err;
+
+	err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
+	if (err)
+		return err;
+
+	return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+				  BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
+{
+	return probe_kernel_write((char *)addr,
+				  (char *)bundle, BREAK_INSTR_SIZE);
+}
+
+unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+	return instruction_pointer(regs);
+}
+
+int __weak kgdb_arch_init(void)
+{
+	return 0;
+}
+
+int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+	return 0;
+}
+
+void __weak
+kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
+{
+	return;
+}
+
+/**
+ *	kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
+ *	@regs: Current &struct pt_regs.
+ *
+ *	This function will be called if the particular architecture must
+ *	disable hardware debugging while it is processing gdb packets or
+ *	handling exception.
+ */
+void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
+{
+}
+
+/*
+ * GDB remote protocol parser:
+ */
+
+static const char	hexchars[] = "0123456789abcdef";
+
+static int hex(char ch)
+{
+	if ((ch >= 'a') && (ch <= 'f'))
+		return ch - 'a' + 10;
+	if ((ch >= '0') && (ch <= '9'))
+		return ch - '0';
+	if ((ch >= 'A') && (ch <= 'F'))
+		return ch - 'A' + 10;
+	return -1;
+}
+
+/* scan for the sequence $<data>#<checksum> */
+static void get_packet(char *buffer)
+{
+	unsigned char checksum;
+	unsigned char xmitcsum;
+	int count;
+	char ch;
+
+	do {
+		/*
+		 * Spin and wait around for the start character, ignore all
+		 * other characters:
+		 */
+		while ((ch = (kgdb_io_ops->read_char())) != '$')
+			/* nothing */;
+
+		kgdb_connected = 1;
+		checksum = 0;
+		xmitcsum = -1;
+
+		count = 0;
+
+		/*
+		 * now, read until a # or end of buffer is found:
+		 */
+		while (count < (BUFMAX - 1)) {
+			ch = kgdb_io_ops->read_char();
+			if (ch == '#')
+				break;
+			checksum = checksum + ch;
+			buffer[count] = ch;
+			count = count + 1;
+		}
+		buffer[count] = 0;
+
+		if (ch == '#') {
+			xmitcsum = hex(kgdb_io_ops->read_char()) << 4;
+			xmitcsum += hex(kgdb_io_ops->read_char());
+
+			if (checksum != xmitcsum)
+				/* failed checksum */
+				kgdb_io_ops->write_char('-');
+			else
+				/* successful transfer */
+				kgdb_io_ops->write_char('+');
+			if (kgdb_io_ops->flush)
+				kgdb_io_ops->flush();
+		}
+	} while (checksum != xmitcsum);
+}
+
+/*
+ * Send the packet in buffer.
+ * Check for gdb connection if asked for.
+ */
+static void put_packet(char *buffer)
+{
+	unsigned char checksum;
+	int count;
+	char ch;
+
+	/*
+	 * $<packet info>#<checksum>.
+	 */
+	while (1) {
+		kgdb_io_ops->write_char('$');
+		checksum = 0;
+		count = 0;
+
+		while ((ch = buffer[count])) {
+			kgdb_io_ops->write_char(ch);
+			checksum += ch;
+			count++;
+		}
+
+		kgdb_io_ops->write_char('#');
+		kgdb_io_ops->write_char(hexchars[checksum >> 4]);
+		kgdb_io_ops->write_char(hexchars[checksum & 0xf]);
+		if (kgdb_io_ops->flush)
+			kgdb_io_ops->flush();
+
+		/* Now see what we get in reply. */
+		ch = kgdb_io_ops->read_char();
+
+		if (ch == 3)
+			ch = kgdb_io_ops->read_char();
+
+		/* If we get an ACK, we are done. */
+		if (ch == '+')
+			return;
+
+		/*
+		 * If we get the start of another packet, this means
+		 * that GDB is attempting to reconnect.  We will NAK
+		 * the packet being sent, and stop trying to send this
+		 * packet.
+		 */
+		if (ch == '$') {
+			kgdb_io_ops->write_char('-');
+			if (kgdb_io_ops->flush)
+				kgdb_io_ops->flush();
+			return;
+		}
+	}
+}
+
+static char *pack_hex_byte(char *pkt, u8 byte)
+{
+	*pkt++ = hexchars[byte >> 4];
+	*pkt++ = hexchars[byte & 0xf];
+
+	return pkt;
+}
+
+/*
+ * Convert the memory pointed to by mem into hex, placing result in buf.
+ * Return a pointer to the last char put in buf (null). May return an error.
+ */
+int kgdb_mem2hex(char *mem, char *buf, int count)
+{
+	char *tmp;
+	int err;
+
+	/*
+	 * We use the upper half of buf as an intermediate buffer for the
+	 * raw memory copy.  Hex conversion will work against this one.
+	 */
+	tmp = buf + count;
+
+	err = probe_kernel_read(tmp, mem, count);
+	if (!err) {
+		while (count > 0) {
+			buf = pack_hex_byte(buf, *tmp);
+			tmp++;
+			count--;
+		}
+
+		*buf = 0;
+	}
+
+	return err;
+}
+
+/*
+ * Copy the binary array pointed to by buf into mem.  Fix $, #, and
+ * 0x7d escaped with 0x7d.  Return a pointer to the character after
+ * the last byte written.
+ */
+static int kgdb_ebin2mem(char *buf, char *mem, int count)
+{
+	int err = 0;
+	char c;
+
+	while (count-- > 0) {
+		c = *buf++;
+		if (c == 0x7d)
+			c = *buf++ ^ 0x20;
+
+		err = probe_kernel_write(mem, &c, 1);
+		if (err)
+			break;
+
+		mem++;
+	}
+
+	return err;
+}
+
+/*
+ * Convert the hex array pointed to by buf into binary to be placed in mem.
+ * Return a pointer to the character AFTER the last byte written.
+ * May return an error.
+ */
+int kgdb_hex2mem(char *buf, char *mem, int count)
+{
+	char *tmp_raw;
+	char *tmp_hex;
+
+	/*
+	 * We use the upper half of buf as an intermediate buffer for the
+	 * raw memory that is converted from hex.
+	 */
+	tmp_raw = buf + count * 2;
+
+	tmp_hex = tmp_raw - 1;
+	while (tmp_hex >= buf) {
+		tmp_raw--;
+		*tmp_raw = hex(*tmp_hex--);
+		*tmp_raw |= hex(*tmp_hex--) << 4;
+	}
+
+	return probe_kernel_write(mem, tmp_raw, count);
+}
+
+/*
+ * While we find nice hex chars, build a long_val.
+ * Return number of chars processed.
+ */
+int kgdb_hex2long(char **ptr, long *long_val)
+{
+	int hex_val;
+	int num = 0;
+
+	*long_val = 0;
+
+	while (**ptr) {
+		hex_val = hex(**ptr);
+		if (hex_val < 0)
+			break;
+
+		*long_val = (*long_val << 4) | hex_val;
+		num++;
+		(*ptr)++;
+	}
+
+	return num;
+}
+
+/* Write memory due to an 'M' or 'X' packet. */
+static int write_mem_msg(int binary)
+{
+	char *ptr = &remcom_in_buffer[1];
+	unsigned long addr;
+	unsigned long length;
+	int err;
+
+	if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' &&
+	    kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') {
+		if (binary)
+			err = kgdb_ebin2mem(ptr, (char *)addr, length);
+		else
+			err = kgdb_hex2mem(ptr, (char *)addr, length);
+		if (err)
+			return err;
+		if (CACHE_FLUSH_IS_SAFE)
+			flush_icache_range(addr, addr + length + 1);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void error_packet(char *pkt, int error)
+{
+	error = -error;
+	pkt[0] = 'E';
+	pkt[1] = hexchars[(error / 10)];
+	pkt[2] = hexchars[(error % 10)];
+	pkt[3] = '\0';
+}
+
+/*
+ * Thread ID accessors. We represent a flat TID space to GDB, where
+ * the per CPU idle threads (which under Linux all have PID 0) are
+ * remapped to negative TIDs.
+ */
+
+#define BUF_THREAD_ID_SIZE	16
+
+static char *pack_threadid(char *pkt, unsigned char *id)
+{
+	char *limit;
+
+	limit = pkt + BUF_THREAD_ID_SIZE;
+	while (pkt < limit)
+		pkt = pack_hex_byte(pkt, *id++);
+
+	return pkt;
+}
+
+static void int_to_threadref(unsigned char *id, int value)
+{
+	unsigned char *scan;
+	int i = 4;
+
+	scan = (unsigned char *)id;
+	while (i--)
+		*scan++ = 0;
+	*scan++ = (value >> 24) & 0xff;
+	*scan++ = (value >> 16) & 0xff;
+	*scan++ = (value >> 8) & 0xff;
+	*scan++ = (value & 0xff);
+}
+
+static struct task_struct *getthread(struct pt_regs *regs, int tid)
+{
+	/*
+	 * Non-positive TIDs are remapped idle tasks:
+	 */
+	if (tid <= 0)
+		return idle_task(-tid);
+
+	/*
+	 * find_task_by_pid_ns() does not take the tasklist lock anymore
+	 * but is nicely RCU locked - hence is a pretty resilient
+	 * thing to use:
+	 */
+	return find_task_by_pid_ns(tid, &init_pid_ns);
+}
+
+/*
+ * CPU debug state control:
+ */
+
+#ifdef CONFIG_SMP
+static void kgdb_wait(struct pt_regs *regs)
+{
+	unsigned long flags;
+	int cpu;
+
+	local_irq_save(flags);
+	cpu = raw_smp_processor_id();
+	kgdb_info[cpu].debuggerinfo = regs;
+	kgdb_info[cpu].task = current;
+	/*
+	 * Make sure the above info reaches the primary CPU before
+	 * our cpu_in_kgdb[] flag setting does:
+	 */
+	smp_wmb();
+	atomic_set(&cpu_in_kgdb[cpu], 1);
+
+	/* Wait till primary CPU is done with debugging */
+	while (atomic_read(&passive_cpu_wait[cpu]))
+		cpu_relax();
+
+	kgdb_info[cpu].debuggerinfo = NULL;
+	kgdb_info[cpu].task = NULL;
+
+	/* fix up hardware debug registers on local cpu */
+	if (arch_kgdb_ops.correct_hw_break)
+		arch_kgdb_ops.correct_hw_break();
+
+	/* Signal the primary CPU that we are done: */
+	atomic_set(&cpu_in_kgdb[cpu], 0);
+	clocksource_touch_watchdog();
+	local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Some architectures need cache flushes when we set/clear a
+ * breakpoint:
+ */
+static void kgdb_flush_swbreak_addr(unsigned long addr)
+{
+	if (!CACHE_FLUSH_IS_SAFE)
+		return;
+
+	if (current->mm && current->mm->mmap_cache) {
+		flush_cache_range(current->mm->mmap_cache,
+				  addr, addr + BREAK_INSTR_SIZE);
+	}
+	/* Force flush instruction cache if it was outside the mm */
+	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+}
+
+/*
+ * SW breakpoint management:
+ */
+static int kgdb_activate_sw_breakpoints(void)
+{
+	unsigned long addr;
+	int error = 0;
+	int i;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if (kgdb_break[i].state != BP_SET)
+			continue;
+
+		addr = kgdb_break[i].bpt_addr;
+		error = kgdb_arch_set_breakpoint(addr,
+				kgdb_break[i].saved_instr);
+		if (error)
+			return error;
+
+		kgdb_flush_swbreak_addr(addr);
+		kgdb_break[i].state = BP_ACTIVE;
+	}
+	return 0;
+}
+
+static int kgdb_set_sw_break(unsigned long addr)
+{
+	int err = kgdb_validate_break_address(addr);
+	int breakno = -1;
+	int i;
+
+	if (err)
+		return err;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if ((kgdb_break[i].state == BP_SET) &&
+					(kgdb_break[i].bpt_addr == addr))
+			return -EEXIST;
+	}
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if (kgdb_break[i].state == BP_REMOVED &&
+					kgdb_break[i].bpt_addr == addr) {
+			breakno = i;
+			break;
+		}
+	}
+
+	if (breakno == -1) {
+		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+			if (kgdb_break[i].state == BP_UNDEFINED) {
+				breakno = i;
+				break;
+			}
+		}
+	}
+
+	if (breakno == -1)
+		return -E2BIG;
+
+	kgdb_break[breakno].state = BP_SET;
+	kgdb_break[breakno].type = BP_BREAKPOINT;
+	kgdb_break[breakno].bpt_addr = addr;
+
+	return 0;
+}
+
+static int kgdb_deactivate_sw_breakpoints(void)
+{
+	unsigned long addr;
+	int error = 0;
+	int i;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if (kgdb_break[i].state != BP_ACTIVE)
+			continue;
+		addr = kgdb_break[i].bpt_addr;
+		error = kgdb_arch_remove_breakpoint(addr,
+					kgdb_break[i].saved_instr);
+		if (error)
+			return error;
+
+		kgdb_flush_swbreak_addr(addr);
+		kgdb_break[i].state = BP_SET;
+	}
+	return 0;
+}
+
+static int kgdb_remove_sw_break(unsigned long addr)
+{
+	int i;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if ((kgdb_break[i].state == BP_SET) &&
+				(kgdb_break[i].bpt_addr == addr)) {
+			kgdb_break[i].state = BP_REMOVED;
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+int kgdb_isremovedbreak(unsigned long addr)
+{
+	int i;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if ((kgdb_break[i].state == BP_REMOVED) &&
+					(kgdb_break[i].bpt_addr == addr))
+			return 1;
+	}
+	return 0;
+}
+
+int remove_all_break(void)
+{
+	unsigned long addr;
+	int error;
+	int i;
+
+	/* Clear memory breakpoints. */
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if (kgdb_break[i].state != BP_ACTIVE)
+			goto setundefined;
+		addr = kgdb_break[i].bpt_addr;
+		error = kgdb_arch_remove_breakpoint(addr,
+				kgdb_break[i].saved_instr);
+		if (error)
+			printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+			   addr);
+setundefined:
+		kgdb_break[i].state = BP_UNDEFINED;
+	}
+
+	/* Clear hardware breakpoints. */
+	if (arch_kgdb_ops.remove_all_hw_break)
+		arch_kgdb_ops.remove_all_hw_break();
+
+	return 0;
+}
+
+/*
+ * Remap normal tasks to their real PID, idle tasks to -1 ... -NR_CPUs:
+ */
+static inline int shadow_pid(int realpid)
+{
+	if (realpid)
+		return realpid;
+
+	return -1-raw_smp_processor_id();
+}
+
+static char gdbmsgbuf[BUFMAX + 1];
+
+static void kgdb_msg_write(const char *s, int len)
+{
+	char *bufptr;
+	int wcount;
+	int i;
+
+	/* 'O'utput */
+	gdbmsgbuf[0] = 'O';
+
+	/* Fill and send buffers... */
+	while (len > 0) {
+		bufptr = gdbmsgbuf + 1;
+
+		/* Calculate how many this time */
+		if ((len << 1) > (BUFMAX - 2))
+			wcount = (BUFMAX - 2) >> 1;
+		else
+			wcount = len;
+
+		/* Pack in hex chars */
+		for (i = 0; i < wcount; i++)
+			bufptr = pack_hex_byte(bufptr, s[i]);
+		*bufptr = '\0';
+
+		/* Move up */
+		s += wcount;
+		len -= wcount;
+
+		/* Write packet */
+		put_packet(gdbmsgbuf);
+	}
+}
+
+/*
+ * Return true if there is a valid kgdb I/O module.  Also if no
+ * debugger is attached a message can be printed to the console about
+ * waiting for the debugger to attach.
+ *
+ * The print_wait argument is only to be true when called from inside
+ * the core kgdb_handle_exception, because it will wait for the
+ * debugger to attach.
+ */
+static int kgdb_io_ready(int print_wait)
+{
+	if (!kgdb_io_ops)
+		return 0;
+	if (kgdb_connected)
+		return 1;
+	if (atomic_read(&kgdb_setting_breakpoint))
+		return 1;
+	if (print_wait)
+		printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+	return 1;
+}
+
+/*
+ * All the functions that start with gdb_cmd are the various
+ * operations to implement the handlers for the gdbserial protocol
+ * where KGDB is communicating with an external debugger
+ */
+
+/* Handle the '?' status packets */
+static void gdb_cmd_status(struct kgdb_state *ks)
+{
+	/*
+	 * We know that this packet is only sent
+	 * during initial connect.  So to be safe,
+	 * we clear out our breakpoints now in case
+	 * GDB is reconnecting.
+	 */
+	remove_all_break();
+
+	remcom_out_buffer[0] = 'S';
+	pack_hex_byte(&remcom_out_buffer[1], ks->signo);
+}
+
+/* Handle the 'g' get registers request */
+static void gdb_cmd_getregs(struct kgdb_state *ks)
+{
+	struct task_struct *thread;
+	void *local_debuggerinfo;
+	int i;
+
+	thread = kgdb_usethread;
+	if (!thread) {
+		thread = kgdb_info[ks->cpu].task;
+		local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
+	} else {
+		local_debuggerinfo = NULL;
+		for (i = 0; i < NR_CPUS; i++) {
+			/*
+			 * Try to find the task on some other
+			 * or possibly this node if we do not
+			 * find the matching task then we try
+			 * to approximate the results.
+			 */
+			if (thread == kgdb_info[i].task)
+				local_debuggerinfo = kgdb_info[i].debuggerinfo;
+		}
+	}
+
+	/*
+	 * All threads that don't have debuggerinfo should be
+	 * in __schedule() sleeping, since all other CPUs
+	 * are in kgdb_wait, and thus have debuggerinfo.
+	 */
+	if (local_debuggerinfo) {
+		pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo);
+	} else {
+		/*
+		 * Pull stuff saved during switch_to; nothing
+		 * else is accessible (or even particularly
+		 * relevant).
+		 *
+		 * This should be enough for a stack trace.
+		 */
+		sleeping_thread_to_gdb_regs(gdb_regs, thread);
+	}
+	kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES);
+}
+
+/* Handle the 'G' set registers request */
+static void gdb_cmd_setregs(struct kgdb_state *ks)
+{
+	kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES);
+
+	if (kgdb_usethread && kgdb_usethread != current) {
+		error_packet(remcom_out_buffer, -EINVAL);
+	} else {
+		gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs);
+		strcpy(remcom_out_buffer, "OK");
+	}
+}
+
+/* Handle the 'm' memory read bytes */
+static void gdb_cmd_memread(struct kgdb_state *ks)
+{
+	char *ptr = &remcom_in_buffer[1];
+	unsigned long length;
+	unsigned long addr;
+	int err;
+
+	if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' &&
+					kgdb_hex2long(&ptr, &length) > 0) {
+		err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length);
+		if (err)
+			error_packet(remcom_out_buffer, err);
+	} else {
+		error_packet(remcom_out_buffer, -EINVAL);
+	}
+}
+
+/* Handle the 'M' memory write bytes */
+static void gdb_cmd_memwrite(struct kgdb_state *ks)
+{
+	int err = write_mem_msg(0);
+
+	if (err)
+		error_packet(remcom_out_buffer, err);
+	else
+		strcpy(remcom_out_buffer, "OK");
+}
+
+/* Handle the 'X' memory binary write bytes */
+static void gdb_cmd_binwrite(struct kgdb_state *ks)
+{
+	int err = write_mem_msg(1);
+
+	if (err)
+		error_packet(remcom_out_buffer, err);
+	else
+		strcpy(remcom_out_buffer, "OK");
+}
+
+/* Handle the 'D' or 'k', detach or kill packets */
+static void gdb_cmd_detachkill(struct kgdb_state *ks)
+{
+	int error;
+
+	/* The detach case */
+	if (remcom_in_buffer[0] == 'D') {
+		error = remove_all_break();
+		if (error < 0) {
+			error_packet(remcom_out_buffer, error);
+		} else {
+			strcpy(remcom_out_buffer, "OK");
+			kgdb_connected = 0;
+		}
+		put_packet(remcom_out_buffer);
+	} else {
+		/*
+		 * Assume the kill case, with no exit code checking,
+		 * trying to force detach the debugger:
+		 */
+		remove_all_break();
+		kgdb_connected = 0;
+	}
+}
+
+/* Handle the 'R' reboot packets */
+static int gdb_cmd_reboot(struct kgdb_state *ks)
+{
+	/* For now, only honor R0 */
+	if (strcmp(remcom_in_buffer, "R0") == 0) {
+		printk(KERN_CRIT "Executing emergency reboot\n");
+		strcpy(remcom_out_buffer, "OK");
+		put_packet(remcom_out_buffer);
+
+		/*
+		 * Execution should not return from
+		 * machine_emergency_restart()
+		 */
+		machine_emergency_restart();
+		kgdb_connected = 0;
+
+		return 1;
+	}
+	return 0;
+}
+
+/* Handle the 'q' query packets */
+static void gdb_cmd_query(struct kgdb_state *ks)
+{
+	struct task_struct *thread;
+	unsigned char thref[8];
+	char *ptr;
+	int i;
+
+	switch (remcom_in_buffer[1]) {
+	case 's':
+	case 'f':
+		if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
+			error_packet(remcom_out_buffer, -EINVAL);
+			break;
+		}
+
+		if (remcom_in_buffer[1] == 'f')
+			ks->threadid = 1;
+
+		remcom_out_buffer[0] = 'm';
+		ptr = remcom_out_buffer + 1;
+
+		for (i = 0; i < 17; ks->threadid++) {
+			thread = getthread(ks->linux_regs, ks->threadid);
+			if (thread) {
+				int_to_threadref(thref, ks->threadid);
+				pack_threadid(ptr, thref);
+				ptr += BUF_THREAD_ID_SIZE;
+				*(ptr++) = ',';
+				i++;
+			}
+		}
+		*(--ptr) = '\0';
+		break;
+
+	case 'C':
+		/* Current thread id */
+		strcpy(remcom_out_buffer, "QC");
+		ks->threadid = shadow_pid(current->pid);
+		int_to_threadref(thref, ks->threadid);
+		pack_threadid(remcom_out_buffer + 2, thref);
+		break;
+	case 'T':
+		if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
+			error_packet(remcom_out_buffer, -EINVAL);
+			break;
+		}
+		ks->threadid = 0;
+		ptr = remcom_in_buffer + 17;
+		kgdb_hex2long(&ptr, &ks->threadid);
+		if (!getthread(ks->linux_regs, ks->threadid)) {
+			error_packet(remcom_out_buffer, -EINVAL);
+			break;
+		}
+		if (ks->threadid > 0) {
+			kgdb_mem2hex(getthread(ks->linux_regs,
+					ks->threadid)->comm,
+					remcom_out_buffer, 16);
+		} else {
+			static char tmpstr[23 + BUF_THREAD_ID_SIZE];
+
+			sprintf(tmpstr, "Shadow task %d for pid 0",
+					(int)(-ks->threadid-1));
+			kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
+		}
+		break;
+	}
+}
+
+/* Handle the 'H' task query packets */
+static void gdb_cmd_task(struct kgdb_state *ks)
+{
+	struct task_struct *thread;
+	char *ptr;
+
+	switch (remcom_in_buffer[1]) {
+	case 'g':
+		ptr = &remcom_in_buffer[2];
+		kgdb_hex2long(&ptr, &ks->threadid);
+		thread = getthread(ks->linux_regs, ks->threadid);
+		if (!thread && ks->threadid > 0) {
+			error_packet(remcom_out_buffer, -EINVAL);
+			break;
+		}
+		kgdb_usethread = thread;
+		ks->kgdb_usethreadid = ks->threadid;
+		strcpy(remcom_out_buffer, "OK");
+		break;
+	case 'c':
+		ptr = &remcom_in_buffer[2];
+		kgdb_hex2long(&ptr, &ks->threadid);
+		if (!ks->threadid) {
+			kgdb_contthread = NULL;
+		} else {
+			thread = getthread(ks->linux_regs, ks->threadid);
+			if (!thread && ks->threadid > 0) {
+				error_packet(remcom_out_buffer, -EINVAL);
+				break;
+			}
+			kgdb_contthread = thread;
+		}
+		strcpy(remcom_out_buffer, "OK");
+		break;
+	}
+}
+
+/* Handle the 'T' thread query packets */
+static void gdb_cmd_thread(struct kgdb_state *ks)
+{
+	char *ptr = &remcom_in_buffer[1];
+	struct task_struct *thread;
+
+	kgdb_hex2long(&ptr, &ks->threadid);
+	thread = getthread(ks->linux_regs, ks->threadid);
+	if (thread)
+		strcpy(remcom_out_buffer, "OK");
+	else
+		error_packet(remcom_out_buffer, -EINVAL);
+}
+
+/* Handle the 'z' or 'Z' breakpoint remove or set packets */
+static void gdb_cmd_break(struct kgdb_state *ks)
+{
+	/*
+	 * Since GDB-5.3, it's been drafted that '0' is a software
+	 * breakpoint, '1' is a hardware breakpoint, so let's do that.
+	 */
+	char *bpt_type = &remcom_in_buffer[1];
+	char *ptr = &remcom_in_buffer[2];
+	unsigned long addr;
+	unsigned long length;
+	int error = 0;
+
+	if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') {
+		/* Unsupported */
+		if (*bpt_type > '4')
+			return;
+	} else {
+		if (*bpt_type != '0' && *bpt_type != '1')
+			/* Unsupported. */
+			return;
+	}
+
+	/*
+	 * Test if this is a hardware breakpoint, and
+	 * if we support it:
+	 */
+	if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT))
+		/* Unsupported. */
+		return;
+
+	if (*(ptr++) != ',') {
+		error_packet(remcom_out_buffer, -EINVAL);
+		return;
+	}
+	if (!kgdb_hex2long(&ptr, &addr)) {
+		error_packet(remcom_out_buffer, -EINVAL);
+		return;
+	}
+	if (*(ptr++) != ',' ||
+		!kgdb_hex2long(&ptr, &length)) {
+		error_packet(remcom_out_buffer, -EINVAL);
+		return;
+	}
+
+	if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0')
+		error = kgdb_set_sw_break(addr);
+	else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0')
+		error = kgdb_remove_sw_break(addr);
+	else if (remcom_in_buffer[0] == 'Z')
+		error = arch_kgdb_ops.set_hw_breakpoint(addr,
+			(int)length, *bpt_type - '0');
+	else if (remcom_in_buffer[0] == 'z')
+		error = arch_kgdb_ops.remove_hw_breakpoint(addr,
+			(int) length, *bpt_type - '0');
+
+	if (error == 0)
+		strcpy(remcom_out_buffer, "OK");
+	else
+		error_packet(remcom_out_buffer, error);
+}
+
+/* Handle the 'C' signal / exception passing packets */
+static int gdb_cmd_exception_pass(struct kgdb_state *ks)
+{
+	/* C09 == pass exception
+	 * C15 == detach kgdb, pass exception
+	 */
+	if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') {
+
+		ks->pass_exception = 1;
+		remcom_in_buffer[0] = 'c';
+
+	} else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') {
+
+		ks->pass_exception = 1;
+		remcom_in_buffer[0] = 'D';
+		remove_all_break();
+		kgdb_connected = 0;
+		return 1;
+
+	} else {
+		error_packet(remcom_out_buffer, -EINVAL);
+		return 0;
+	}
+
+	/* Indicate fall through */
+	return -1;
+}
+
+/*
+ * This function performs all gdbserial command procesing
+ */
+static int gdb_serial_stub(struct kgdb_state *ks)
+{
+	int error = 0;
+	int tmp;
+
+	/* Clear the out buffer. */
+	memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
+
+	if (kgdb_connected) {
+		unsigned char thref[8];
+		char *ptr;
+
+		/* Reply to host that an exception has occurred */
+		ptr = remcom_out_buffer;
+		*ptr++ = 'T';
+		ptr = pack_hex_byte(ptr, ks->signo);
+		ptr += strlen(strcpy(ptr, "thread:"));
+		int_to_threadref(thref, shadow_pid(current->pid));
+		ptr = pack_threadid(ptr, thref);
+		*ptr++ = ';';
+		put_packet(remcom_out_buffer);
+	}
+
+	kgdb_usethread = kgdb_info[ks->cpu].task;
+	ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
+	ks->pass_exception = 0;
+
+	while (1) {
+		error = 0;
+
+		/* Clear the out buffer. */
+		memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
+
+		get_packet(remcom_in_buffer);
+
+		switch (remcom_in_buffer[0]) {
+		case '?': /* gdbserial status */
+			gdb_cmd_status(ks);
+			break;
+		case 'g': /* return the value of the CPU registers */
+			gdb_cmd_getregs(ks);
+			break;
+		case 'G': /* set the value of the CPU registers - return OK */
+			gdb_cmd_setregs(ks);
+			break;
+		case 'm': /* mAA..AA,LLLL  Read LLLL bytes at address AA..AA */
+			gdb_cmd_memread(ks);
+			break;
+		case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */
+			gdb_cmd_memwrite(ks);
+			break;
+		case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */
+			gdb_cmd_binwrite(ks);
+			break;
+			/* kill or detach. KGDB should treat this like a
+			 * continue.
+			 */
+		case 'D': /* Debugger detach */
+		case 'k': /* Debugger detach via kill */
+			gdb_cmd_detachkill(ks);
+			goto default_handle;
+		case 'R': /* Reboot */
+			if (gdb_cmd_reboot(ks))
+				goto default_handle;
+			break;
+		case 'q': /* query command */
+			gdb_cmd_query(ks);
+			break;
+		case 'H': /* task related */
+			gdb_cmd_task(ks);
+			break;
+		case 'T': /* Query thread status */
+			gdb_cmd_thread(ks);
+			break;
+		case 'z': /* Break point remove */
+		case 'Z': /* Break point set */
+			gdb_cmd_break(ks);
+			break;
+		case 'C': /* Exception passing */
+			tmp = gdb_cmd_exception_pass(ks);
+			if (tmp > 0)
+				goto default_handle;
+			if (tmp == 0)
+				break;
+			/* Fall through on tmp < 0 */
+		case 'c': /* Continue packet */
+		case 's': /* Single step packet */
+			if (kgdb_contthread && kgdb_contthread != current) {
+				/* Can't switch threads in kgdb */
+				error_packet(remcom_out_buffer, -EINVAL);
+				break;
+			}
+			kgdb_activate_sw_breakpoints();
+			/* Fall through to default processing */
+		default:
+default_handle:
+			error = kgdb_arch_handle_exception(ks->ex_vector,
+						ks->signo,
+						ks->err_code,
+						remcom_in_buffer,
+						remcom_out_buffer,
+						ks->linux_regs);
+			/*
+			 * Leave cmd processing on error, detach,
+			 * kill, continue, or single step.
+			 */
+			if (error >= 0 || remcom_in_buffer[0] == 'D' ||
+			    remcom_in_buffer[0] == 'k') {
+				error = 0;
+				goto kgdb_exit;
+			}
+
+		}
+
+		/* reply to the request */
+		put_packet(remcom_out_buffer);
+	}
+
+kgdb_exit:
+	if (ks->pass_exception)
+		error = 1;
+	return error;
+}
+
+static int kgdb_reenter_check(struct kgdb_state *ks)
+{
+	unsigned long addr;
+
+	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
+		return 0;
+
+	/* Panic on recursive debugger calls: */
+	exception_level++;
+	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
+	kgdb_deactivate_sw_breakpoints();
+
+	/*
+	 * If the break point removed ok at the place exception
+	 * occurred, try to recover and print a warning to the end
+	 * user because the user planted a breakpoint in a place that
+	 * KGDB needs in order to function.
+	 */
+	if (kgdb_remove_sw_break(addr) == 0) {
+		exception_level = 0;
+		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+		kgdb_activate_sw_breakpoints();
+		printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
+			addr);
+		WARN_ON_ONCE(1);
+
+		return 1;
+	}
+	remove_all_break();
+	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+
+	if (exception_level > 1) {
+		dump_stack();
+		panic("Recursive entry to debugger");
+	}
+
+	printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+	dump_stack();
+	panic("Recursive entry to debugger");
+
+	return 1;
+}
+
+/*
+ * kgdb_handle_exception() - main entry point from a kernel exception
+ *
+ * Locking hierarchy:
+ *	interface locks, if any (begin_session)
+ *	kgdb lock (kgdb_active)
+ */
+int
+kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
+{
+	struct kgdb_state kgdb_var;
+	struct kgdb_state *ks = &kgdb_var;
+	unsigned long flags;
+	int error = 0;
+	int i, cpu;
+
+	ks->cpu			= raw_smp_processor_id();
+	ks->ex_vector		= evector;
+	ks->signo		= signo;
+	ks->ex_vector		= evector;
+	ks->err_code		= ecode;
+	ks->kgdb_usethreadid	= 0;
+	ks->linux_regs		= regs;
+
+	if (kgdb_reenter_check(ks))
+		return 0; /* Ouch, double exception ! */
+
+acquirelock:
+	/*
+	 * Interrupts will be restored by the 'trap return' code, except when
+	 * single stepping.
+	 */
+	local_irq_save(flags);
+
+	cpu = raw_smp_processor_id();
+
+	/*
+	 * Acquire the kgdb_active lock:
+	 */
+	while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1)
+		cpu_relax();
+
+	/*
+	 * Do not start the debugger connection on this CPU if the last
+	 * instance of the exception handler wanted to come into the
+	 * debugger on a different CPU via a single step
+	 */
+	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
+	    atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
+
+		atomic_set(&kgdb_active, -1);
+		clocksource_touch_watchdog();
+		local_irq_restore(flags);
+
+		goto acquirelock;
+	}
+
+	if (!kgdb_io_ready(1)) {
+		error = 1;
+		goto kgdb_restore; /* No I/O connection, so resume the system */
+	}
+
+	/*
+	 * Don't enter if we have hit a removed breakpoint.
+	 */
+	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
+		goto kgdb_restore;
+
+	/* Call the I/O driver's pre_exception routine */
+	if (kgdb_io_ops->pre_exception)
+		kgdb_io_ops->pre_exception();
+
+	kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs;
+	kgdb_info[ks->cpu].task = current;
+
+	kgdb_disable_hw_debug(ks->linux_regs);
+
+	/*
+	 * Get the passive CPU lock which will hold all the non-primary
+	 * CPU in a spin state while the debugger is active
+	 */
+	if (!kgdb_single_step || !kgdb_contthread) {
+		for (i = 0; i < NR_CPUS; i++)
+			atomic_set(&passive_cpu_wait[i], 1);
+	}
+
+	/*
+	 * spin_lock code is good enough as a barrier so we don't
+	 * need one here:
+	 */
+	atomic_set(&cpu_in_kgdb[ks->cpu], 1);
+
+#ifdef CONFIG_SMP
+	/* Signal the other CPUs to enter kgdb_wait() */
+	if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup)
+		kgdb_roundup_cpus(flags);
+#endif
+
+	/*
+	 * Wait for the other CPUs to be notified and be waiting for us:
+	 */
+	for_each_online_cpu(i) {
+		while (!atomic_read(&cpu_in_kgdb[i]))
+			cpu_relax();
+	}
+
+	/*
+	 * At this point the primary processor is completely
+	 * in the debugger and all secondary CPUs are quiescent
+	 */
+	kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code);
+	kgdb_deactivate_sw_breakpoints();
+	kgdb_single_step = 0;
+	kgdb_contthread = NULL;
+	exception_level = 0;
+
+	/* Talk to debugger with gdbserial protocol */
+	error = gdb_serial_stub(ks);
+
+	/* Call the I/O driver's post_exception routine */
+	if (kgdb_io_ops->post_exception)
+		kgdb_io_ops->post_exception();
+
+	kgdb_info[ks->cpu].debuggerinfo = NULL;
+	kgdb_info[ks->cpu].task = NULL;
+	atomic_set(&cpu_in_kgdb[ks->cpu], 0);
+
+	if (!kgdb_single_step || !kgdb_contthread) {
+		for (i = NR_CPUS-1; i >= 0; i--)
+			atomic_set(&passive_cpu_wait[i], 0);
+		/*
+		 * Wait till all the CPUs have quit
+		 * from the debugger.
+		 */
+		for_each_online_cpu(i) {
+			while (atomic_read(&cpu_in_kgdb[i]))
+				cpu_relax();
+		}
+	}
+
+kgdb_restore:
+	/* Free kgdb_active */
+	atomic_set(&kgdb_active, -1);
+	clocksource_touch_watchdog();
+	local_irq_restore(flags);
+
+	return error;
+}
+
+int kgdb_nmicallback(int cpu, void *regs)
+{
+#ifdef CONFIG_SMP
+	if (!atomic_read(&cpu_in_kgdb[cpu]) &&
+			atomic_read(&kgdb_active) != cpu &&
+			atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) {
+		kgdb_wait((struct pt_regs *)regs);
+		return 0;
+	}
+#endif
+	return 1;
+}
+
+void kgdb_console_write(struct console *co, const char *s, unsigned count)
+{
+	unsigned long flags;
+
+	/* If we're debugging, or KGDB has not connected, don't try
+	 * and print. */
+	if (!kgdb_connected || atomic_read(&kgdb_active) != -1)
+		return;
+
+	local_irq_save(flags);
+	kgdb_msg_write(s, count);
+	local_irq_restore(flags);
+}
+
+static struct console kgdbcons = {
+	.name		= "kgdb",
+	.write		= kgdb_console_write,
+	.flags		= CON_PRINTBUFFER | CON_ENABLED,
+	.index		= -1,
+};
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_handle_gdb(int key, struct tty_struct *tty)
+{
+	if (!kgdb_io_ops) {
+		printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+		return;
+	}
+	if (!kgdb_connected)
+		printk(KERN_CRIT "Entering KGDB\n");
+
+	kgdb_breakpoint();
+}
+
+static struct sysrq_key_op sysrq_gdb_op = {
+	.handler	= sysrq_handle_gdb,
+	.help_msg	= "Gdb",
+	.action_msg	= "GDB",
+};
+#endif
+
+static void kgdb_register_callbacks(void)
+{
+	if (!kgdb_io_module_registered) {
+		kgdb_io_module_registered = 1;
+		kgdb_arch_init();
+#ifdef CONFIG_MAGIC_SYSRQ
+		register_sysrq_key('g', &sysrq_gdb_op);
+#endif
+		if (kgdb_use_con && !kgdb_con_registered) {
+			register_console(&kgdbcons);
+			kgdb_con_registered = 1;
+		}
+	}
+}
+
+static void kgdb_unregister_callbacks(void)
+{
+	/*
+	 * When this routine is called KGDB should unregister from the
+	 * panic handler and clean up, making sure it is not handling any
+	 * break exceptions at the time.
+	 */
+	if (kgdb_io_module_registered) {
+		kgdb_io_module_registered = 0;
+		kgdb_arch_exit();
+#ifdef CONFIG_MAGIC_SYSRQ
+		unregister_sysrq_key('g', &sysrq_gdb_op);
+#endif
+		if (kgdb_con_registered) {
+			unregister_console(&kgdbcons);
+			kgdb_con_registered = 0;
+		}
+	}
+}
+
+static void kgdb_initial_breakpoint(void)
+{
+	kgdb_break_asap = 0;
+
+	printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+	kgdb_breakpoint();
+}
+
+/**
+ *	kgdb_register_io_module - register KGDB IO module
+ *	@new_kgdb_io_ops: the io ops vector
+ *
+ *	Register it with the KGDB core.
+ */
+int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
+{
+	int err;
+
+	spin_lock(&kgdb_registration_lock);
+
+	if (kgdb_io_ops) {
+		spin_unlock(&kgdb_registration_lock);
+
+		printk(KERN_ERR "kgdb: Another I/O driver is already "
+				"registered with KGDB.\n");
+		return -EBUSY;
+	}
+
+	if (new_kgdb_io_ops->init) {
+		err = new_kgdb_io_ops->init();
+		if (err) {
+			spin_unlock(&kgdb_registration_lock);
+			return err;
+		}
+	}
+
+	kgdb_io_ops = new_kgdb_io_ops;
+
+	spin_unlock(&kgdb_registration_lock);
+
+	printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
+	       new_kgdb_io_ops->name);
+
+	/* Arm KGDB now. */
+	kgdb_register_callbacks();
+
+	if (kgdb_break_asap)
+		kgdb_initial_breakpoint();
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kgdb_register_io_module);
+
+/**
+ *	kkgdb_unregister_io_module - unregister KGDB IO module
+ *	@old_kgdb_io_ops: the io ops vector
+ *
+ *	Unregister it with the KGDB core.
+ */
+void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
+{
+	BUG_ON(kgdb_connected);
+
+	/*
+	 * KGDB is no longer able to communicate out, so
+	 * unregister our callbacks and reset state.
+	 */
+	kgdb_unregister_callbacks();
+
+	spin_lock(&kgdb_registration_lock);
+
+	WARN_ON_ONCE(kgdb_io_ops != old_kgdb_io_ops);
+	kgdb_io_ops = NULL;
+
+	spin_unlock(&kgdb_registration_lock);
+
+	printk(KERN_INFO
+		"kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+		old_kgdb_io_ops->name);
+}
+EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
+
+/**
+ * kgdb_breakpoint - generate breakpoint exception
+ *
+ * This function will generate a breakpoint exception.  It is used at the
+ * beginning of a program to sync up with a debugger and can be used
+ * otherwise as a quick means to stop program execution and "break" into
+ * the debugger.
+ */
+void kgdb_breakpoint(void)
+{
+	atomic_set(&kgdb_setting_breakpoint, 1);
+	wmb(); /* Sync point before breakpoint */
+	arch_kgdb_breakpoint();
+	wmb(); /* Sync point after breakpoint */
+	atomic_set(&kgdb_setting_breakpoint, 0);
+}
+EXPORT_SYMBOL_GPL(kgdb_breakpoint);
+
+static int __init opt_kgdb_wait(char *str)
+{
+	kgdb_break_asap = 1;
+
+	if (kgdb_io_module_registered)
+		kgdb_initial_breakpoint();
+
+	return 0;
+}
+
+early_param("kgdbwait", opt_kgdb_wait);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 2eae91f..ae5c6c1 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1087,45 +1087,45 @@
 	maxfire = 20;
 	prof_expires = cputime_zero;
 	while (!list_empty(timers)) {
-		struct cpu_timer_list *t = list_first_entry(timers,
+		struct cpu_timer_list *tl = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
-		if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
-			prof_expires = t->expires.cpu;
+		if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
+			prof_expires = tl->expires.cpu;
 			break;
 		}
-		t->firing = 1;
-		list_move_tail(&t->entry, firing);
+		tl->firing = 1;
+		list_move_tail(&tl->entry, firing);
 	}
 
 	++timers;
 	maxfire = 20;
 	virt_expires = cputime_zero;
 	while (!list_empty(timers)) {
-		struct cpu_timer_list *t = list_first_entry(timers,
+		struct cpu_timer_list *tl = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
-		if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
-			virt_expires = t->expires.cpu;
+		if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
+			virt_expires = tl->expires.cpu;
 			break;
 		}
-		t->firing = 1;
-		list_move_tail(&t->entry, firing);
+		tl->firing = 1;
+		list_move_tail(&tl->entry, firing);
 	}
 
 	++timers;
 	maxfire = 20;
 	sched_expires = 0;
 	while (!list_empty(timers)) {
-		struct cpu_timer_list *t = list_first_entry(timers,
+		struct cpu_timer_list *tl = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
-		if (!--maxfire || sum_sched_runtime < t->expires.sched) {
-			sched_expires = t->expires.sched;
+		if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
+			sched_expires = tl->expires.sched;
 			break;
 		}
-		t->firing = 1;
-		list_move_tail(&t->entry, firing);
+		tl->firing = 1;
+		list_move_tail(&tl->entry, firing);
 	}
 
 	/*
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
new file mode 100644
index 0000000..5c2942e
--- /dev/null
+++ b/kernel/semaphore.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ * Author: Matthew Wilcox <willy@linux.intel.com>
+ *
+ * Distributed under the terms of the GNU GPL, version 2
+ *
+ * This file implements counting semaphores.
+ * A counting semaphore may be acquired 'n' times before sleeping.
+ * See mutex.c for single-acquisition sleeping locks which enforce
+ * rules which allow code to be debugged more easily.
+ */
+
+/*
+ * Some notes on the implementation:
+ *
+ * The spinlock controls access to the other members of the semaphore.
+ * down_trylock() and up() can be called from interrupt context, so we
+ * have to disable interrupts when taking the lock.  It turns out various
+ * parts of the kernel expect to be able to use down() on a semaphore in
+ * interrupt context when they know it will succeed, so we have to use
+ * irqsave variants for down(), down_interruptible() and down_killable()
+ * too.
+ *
+ * The ->count variable represents how many more tasks can acquire this
+ * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+
+static noinline void __down(struct semaphore *sem);
+static noinline int __down_interruptible(struct semaphore *sem);
+static noinline int __down_killable(struct semaphore *sem);
+static noinline int __down_timeout(struct semaphore *sem, long jiffies);
+static noinline void __up(struct semaphore *sem);
+
+/**
+ * down - acquire the semaphore
+ * @sem: the semaphore to be acquired
+ *
+ * Acquires the semaphore.  If no more tasks are allowed to acquire the
+ * semaphore, calling this function will put the task to sleep until the
+ * semaphore is released.
+ *
+ * Use of this function is deprecated, please use down_interruptible() or
+ * down_killable() instead.
+ */
+void down(struct semaphore *sem)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sem->lock, flags);
+	if (likely(sem->count > 0))
+		sem->count--;
+	else
+		__down(sem);
+	spin_unlock_irqrestore(&sem->lock, flags);
+}
+EXPORT_SYMBOL(down);
+
+/**
+ * down_interruptible - acquire the semaphore unless interrupted
+ * @sem: the semaphore to be acquired
+ *
+ * Attempts to acquire the semaphore.  If no more tasks are allowed to
+ * acquire the semaphore, calling this function will put the task to sleep.
+ * If the sleep is interrupted by a signal, this function will return -EINTR.
+ * If the semaphore is successfully acquired, this function returns 0.
+ */
+int down_interruptible(struct semaphore *sem)
+{
+	unsigned long flags;
+	int result = 0;
+
+	spin_lock_irqsave(&sem->lock, flags);
+	if (likely(sem->count > 0))
+		sem->count--;
+	else
+		result = __down_interruptible(sem);
+	spin_unlock_irqrestore(&sem->lock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL(down_interruptible);
+
+/**
+ * down_killable - acquire the semaphore unless killed
+ * @sem: the semaphore to be acquired
+ *
+ * Attempts to acquire the semaphore.  If no more tasks are allowed to
+ * acquire the semaphore, calling this function will put the task to sleep.
+ * If the sleep is interrupted by a fatal signal, this function will return
+ * -EINTR.  If the semaphore is successfully acquired, this function returns
+ * 0.
+ */
+int down_killable(struct semaphore *sem)
+{
+	unsigned long flags;
+	int result = 0;
+
+	spin_lock_irqsave(&sem->lock, flags);
+	if (likely(sem->count > 0))
+		sem->count--;
+	else
+		result = __down_killable(sem);
+	spin_unlock_irqrestore(&sem->lock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL(down_killable);
+
+/**
+ * down_trylock - try to acquire the semaphore, without waiting
+ * @sem: the semaphore to be acquired
+ *
+ * Try to acquire the semaphore atomically.  Returns 0 if the mutex has
+ * been acquired successfully or 1 if it it cannot be acquired.
+ *
+ * NOTE: This return value is inverted from both spin_trylock and
+ * mutex_trylock!  Be careful about this when converting code.
+ *
+ * Unlike mutex_trylock, this function can be used from interrupt context,
+ * and the semaphore can be released by any task or interrupt.
+ */
+int down_trylock(struct semaphore *sem)
+{
+	unsigned long flags;
+	int count;
+
+	spin_lock_irqsave(&sem->lock, flags);
+	count = sem->count - 1;
+	if (likely(count >= 0))
+		sem->count = count;
+	spin_unlock_irqrestore(&sem->lock, flags);
+
+	return (count < 0);
+}
+EXPORT_SYMBOL(down_trylock);
+
+/**
+ * down_timeout - acquire the semaphore within a specified time
+ * @sem: the semaphore to be acquired
+ * @jiffies: how long to wait before failing
+ *
+ * Attempts to acquire the semaphore.  If no more tasks are allowed to
+ * acquire the semaphore, calling this function will put the task to sleep.
+ * If the semaphore is not released within the specified number of jiffies,
+ * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
+ */
+int down_timeout(struct semaphore *sem, long jiffies)
+{
+	unsigned long flags;
+	int result = 0;
+
+	spin_lock_irqsave(&sem->lock, flags);
+	if (likely(sem->count > 0))
+		sem->count--;
+	else
+		result = __down_timeout(sem, jiffies);
+	spin_unlock_irqrestore(&sem->lock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL(down_timeout);
+
+/**
+ * up - release the semaphore
+ * @sem: the semaphore to release
+ *
+ * Release the semaphore.  Unlike mutexes, up() may be called from any
+ * context and even by tasks which have never called down().
+ */
+void up(struct semaphore *sem)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sem->lock, flags);
+	if (likely(list_empty(&sem->wait_list)))
+		sem->count++;
+	else
+		__up(sem);
+	spin_unlock_irqrestore(&sem->lock, flags);
+}
+EXPORT_SYMBOL(up);
+
+/* Functions for the contended case */
+
+struct semaphore_waiter {
+	struct list_head list;
+	struct task_struct *task;
+	int up;
+};
+
+/*
+ * Because this function is inlined, the 'state' parameter will be
+ * constant, and thus optimised away by the compiler.  Likewise the
+ * 'timeout' parameter for the cases without timeouts.
+ */
+static inline int __sched __down_common(struct semaphore *sem, long state,
+								long timeout)
+{
+	struct task_struct *task = current;
+	struct semaphore_waiter waiter;
+
+	list_add_tail(&waiter.list, &sem->wait_list);
+	waiter.task = task;
+	waiter.up = 0;
+
+	for (;;) {
+		if (state == TASK_INTERRUPTIBLE && signal_pending(task))
+			goto interrupted;
+		if (state == TASK_KILLABLE && fatal_signal_pending(task))
+			goto interrupted;
+		if (timeout <= 0)
+			goto timed_out;
+		__set_task_state(task, state);
+		spin_unlock_irq(&sem->lock);
+		timeout = schedule_timeout(timeout);
+		spin_lock_irq(&sem->lock);
+		if (waiter.up)
+			return 0;
+	}
+
+ timed_out:
+	list_del(&waiter.list);
+	return -ETIME;
+
+ interrupted:
+	list_del(&waiter.list);
+	return -EINTR;
+}
+
+static noinline void __sched __down(struct semaphore *sem)
+{
+	__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+}
+
+static noinline int __sched __down_interruptible(struct semaphore *sem)
+{
+	return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+}
+
+static noinline int __sched __down_killable(struct semaphore *sem)
+{
+	return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
+}
+
+static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
+{
+	return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
+}
+
+static noinline void __sched __up(struct semaphore *sem)
+{
+	struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
+						struct semaphore_waiter, list);
+	list_del(&waiter->list);
+	waiter->up = 1;
+	wake_up_process(waiter->task);
+}
diff --git a/kernel/signal.c b/kernel/signal.c
index 6af1210..cc8303c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1757,6 +1757,45 @@
 	return 1;
 }
 
+static int ptrace_signal(int signr, siginfo_t *info,
+			 struct pt_regs *regs, void *cookie)
+{
+	if (!(current->ptrace & PT_PTRACED))
+		return signr;
+
+	ptrace_signal_deliver(regs, cookie);
+
+	/* Let the debugger run.  */
+	ptrace_stop(signr, 0, info);
+
+	/* We're back.  Did the debugger cancel the sig?  */
+	signr = current->exit_code;
+	if (signr == 0)
+		return signr;
+
+	current->exit_code = 0;
+
+	/* Update the siginfo structure if the signal has
+	   changed.  If the debugger wanted something
+	   specific in the siginfo structure then it should
+	   have updated *info via PTRACE_SETSIGINFO.  */
+	if (signr != info->si_signo) {
+		info->si_signo = signr;
+		info->si_errno = 0;
+		info->si_code = SI_USER;
+		info->si_pid = task_pid_vnr(current->parent);
+		info->si_uid = current->parent->uid;
+	}
+
+	/* If the (new) signal is now blocked, requeue it.  */
+	if (sigismember(&current->blocked, signr)) {
+		specific_send_sig_info(signr, info, current);
+		signr = 0;
+	}
+
+	return signr;
+}
+
 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
 			  struct pt_regs *regs, void *cookie)
 {
@@ -1785,36 +1824,10 @@
 		if (!signr)
 			break; /* will return 0 */
 
-		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
-			ptrace_signal_deliver(regs, cookie);
-
-			/* Let the debugger run.  */
-			ptrace_stop(signr, 0, info);
-
-			/* We're back.  Did the debugger cancel the sig?  */
-			signr = current->exit_code;
-			if (signr == 0)
+		if (signr != SIGKILL) {
+			signr = ptrace_signal(signr, info, regs, cookie);
+			if (!signr)
 				continue;
-
-			current->exit_code = 0;
-
-			/* Update the siginfo structure if the signal has
-			   changed.  If the debugger wanted something
-			   specific in the siginfo structure then it should
-			   have updated *info via PTRACE_SETSIGINFO.  */
-			if (signr != info->si_signo) {
-				info->si_signo = signr;
-				info->si_errno = 0;
-				info->si_code = SI_USER;
-				info->si_pid = task_pid_vnr(current->parent);
-				info->si_uid = current->parent->uid;
-			}
-
-			/* If the (new) signal is now blocked, requeue it.  */
-			if (sigismember(&current->blocked, signr)) {
-				specific_send_sig_info(signr, info, current);
-				continue;
-			}
 		}
 
 		ka = &current->sighand->action[signr-1];
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 7f60097..73961f3 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -141,8 +141,16 @@
 	}
 
 	if (!list_empty(&watchdog_list)) {
-		__mod_timer(&watchdog_timer,
-			    watchdog_timer.expires + WATCHDOG_INTERVAL);
+		/*
+		 * Cycle through CPUs to check if the CPUs stay
+		 * synchronized to each other.
+		 */
+		int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map);
+
+		if (next_cpu >= NR_CPUS)
+			next_cpu = first_cpu(cpu_online_map);
+		watchdog_timer.expires += WATCHDOG_INTERVAL;
+		add_timer_on(&watchdog_timer, next_cpu);
 	}
 	spin_unlock(&watchdog_lock);
 }
@@ -164,7 +172,8 @@
 		if (!started && watchdog) {
 			watchdog_last = watchdog->read();
 			watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
-			add_timer(&watchdog_timer);
+			add_timer_on(&watchdog_timer,
+				     first_cpu(cpu_online_map));
 		}
 	} else {
 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
@@ -185,7 +194,8 @@
 				watchdog_last = watchdog->read();
 				watchdog_timer.expires =
 					jiffies + WATCHDOG_INTERVAL;
-				add_timer(&watchdog_timer);
+				add_timer_on(&watchdog_timer,
+					     first_cpu(cpu_online_map));
 			}
 		}
 	}
@@ -222,6 +232,18 @@
 }
 
 /**
+ * clocksource_touch_watchdog - Update watchdog
+ *
+ * Update the watchdog after exception contexts such as kgdb so as not
+ * to incorrectly trip the watchdog.
+ *
+ */
+void clocksource_touch_watchdog(void)
+{
+	clocksource_resume_watchdog();
+}
+
+/**
  * clocksource_get_next - Returns the selected clocksource
  *
  */
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index e1bd50c..fdfa0c7 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -14,7 +14,7 @@
 #include <linux/cpu.h>
 #include <linux/err.h>
 #include <linux/hrtimer.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 1bea399..4f38865 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -14,12 +14,14 @@
 #include <linux/cpu.h>
 #include <linux/err.h>
 #include <linux/hrtimer.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
 #include <linux/tick.h>
 
+#include <asm/irq_regs.h>
+
 #include "tick-internal.h"
 
 /*
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 0258d31..450c049 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -14,7 +14,7 @@
 #include <linux/cpu.h>
 #include <linux/err.h>
 #include <linux/hrtimer.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 686da82..69dba0c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -158,9 +158,8 @@
 	}
 }
 
-static ktime_t tick_nohz_start_idle(int cpu)
+static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
 {
-	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 	ktime_t now, delta;
 
 	now = ktime_get();
@@ -201,8 +200,8 @@
 	local_irq_save(flags);
 
 	cpu = smp_processor_id();
-	now = tick_nohz_start_idle(cpu);
 	ts = &per_cpu(tick_cpu_sched, cpu);
+	now = tick_nohz_start_idle(ts);
 
 	/*
 	 * If this cpu is offline and it is the one which updates
@@ -222,7 +221,6 @@
 	if (need_resched())
 		goto end;
 
-	cpu = smp_processor_id();
 	if (unlikely(local_softirq_pending())) {
 		static int ratelimit;
 
diff --git a/kernel/timer.c b/kernel/timer.c
index b024106..f3d35d4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1228,13 +1228,6 @@
 	return 0;
 }
 
-/*
- * lockdep: we want to track each per-CPU base as a separate lock-class,
- * but timer-bases are kmalloc()-ed, so we need to attach separate
- * keys to them:
- */
-static struct lock_class_key base_lock_keys[NR_CPUS];
-
 static int __cpuinit init_timers_cpu(int cpu)
 {
 	int j;
@@ -1277,7 +1270,6 @@
 	}
 
 	spin_lock_init(&base->lock);
-	lockdep_set_class(&base->lock, base_lock_keys + cpu);
 
 	for (j = 0; j < TVN_SIZE; j++) {
 		INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1316,8 +1308,8 @@
 	new_base = get_cpu_var(tvec_bases);
 
 	local_irq_disable();
-	double_spin_lock(&new_base->lock, &old_base->lock,
-			 smp_processor_id() < cpu);
+	spin_lock(&new_base->lock);
+	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
 	BUG_ON(old_base->running_timer);
 
@@ -1330,8 +1322,8 @@
 		migrate_timer_list(new_base, old_base->tv5.vec + i);
 	}
 
-	double_spin_unlock(&new_base->lock, &old_base->lock,
-			   smp_processor_id() < cpu);
+	spin_unlock(&old_base->lock);
+	spin_unlock(&new_base->lock);
 	local_irq_enable();
 	put_cpu_var(tvec_bases);
 }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ff06611..00ff4d0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -219,6 +219,7 @@
 	struct timer_list *timer = &dwork->timer;
 	struct work_struct *work = &dwork->work;
 
+	timer_stats_timer_set_start_info(&dwork->timer);
 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
 		BUG_ON(timer_pending(timer));
 		BUG_ON(!list_empty(&work->entry));
@@ -580,6 +581,7 @@
 int schedule_delayed_work_on(int cpu,
 			struct delayed_work *dwork, unsigned long delay)
 {
+	timer_stats_timer_set_start_info(&dwork->timer);
 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0796c1a..95de310 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -211,7 +211,7 @@
 config SLUB_STATS
 	default n
 	bool "Enable SLUB performance statistics"
-	depends on SLUB
+	depends on SLUB && SLUB_DEBUG && SYSFS
 	help
 	  SLUB statistics are useful to debug SLUBs allocation behavior in
 	  order find ways to optimize the allocator. This should never be
@@ -265,16 +265,6 @@
 	 This feature allows mutex semantics violations to be detected and
 	 reported.
 
-config DEBUG_SEMAPHORE
-	bool "Semaphore debugging"
-	depends on DEBUG_KERNEL
-	depends on ALPHA || FRV
-	default n
-	help
-	  If you say Y here then semaphore processing will issue lots of
-	  verbose debugging messages.  If you suspect a semaphore problem or a
-	  kernel hacker asks for this option then say Y.  Otherwise say N.
-
 config DEBUG_LOCK_ALLOC
 	bool "Lock debugging: detect incorrect freeing of live locks"
 	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -593,7 +583,7 @@
 	  to find out which userspace is blocking on what kernel operations.
 
 config PROVIDE_OHCI1394_DMA_INIT
-	bool "Provide code for enabling DMA over FireWire early on boot"
+	bool "Remote debugging over FireWire early on boot"
 	depends on PCI && X86
 	help
 	  If you want to debug problems which hang or crash the kernel early
@@ -621,4 +611,17 @@
 
 	  See Documentation/debugging-via-ohci1394.txt for more information.
 
+config FIREWIRE_OHCI_REMOTE_DMA
+	bool "Remote debugging over FireWire with firewire-ohci"
+	depends on FIREWIRE_OHCI
+	help
+	  This option lets you use the FireWire bus for remote debugging
+	  with help of the firewire-ohci driver. It enables unfiltered
+	  remote DMA in firewire-ohci.
+	  See Documentation/debugging-via-ohci1394.txt for more information.
+
+	  If unsure, say N.
+
 source "samples/Kconfig"
+
+source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
new file mode 100644
index 0000000..f2e01ac
--- /dev/null
+++ b/lib/Kconfig.kgdb
@@ -0,0 +1,58 @@
+
+menuconfig KGDB
+	bool "KGDB: kernel debugging with remote gdb"
+	select FRAME_POINTER
+	depends on HAVE_ARCH_KGDB
+	depends on DEBUG_KERNEL && EXPERIMENTAL
+	help
+	  If you say Y here, it will be possible to remotely debug the
+	  kernel using gdb.  Documentation of kernel debugger is available
+	  at http://kgdb.sourceforge.net as well as in DocBook form
+	  in Documentation/DocBook/.  If unsure, say N.
+
+config HAVE_ARCH_KGDB_SHADOW_INFO
+	bool
+
+config HAVE_ARCH_KGDB
+	bool
+
+config KGDB_SERIAL_CONSOLE
+	tristate "KGDB: use kgdb over the serial console"
+	depends on KGDB
+	select CONSOLE_POLL
+	select MAGIC_SYSRQ
+	default y
+	help
+	  Share a serial console with kgdb. Sysrq-g must be used
+	  to break in initially.
+
+config KGDB_TESTS
+	bool "KGDB: internal test suite"
+	depends on KGDB
+	default n
+	help
+	  This is a kgdb I/O module specifically designed to test
+	  kgdb's internal functions.  This kgdb I/O module is
+	  intended to for the development of new kgdb stubs
+	  as well as regression testing the kgdb internals.
+	  See the drivers/misc/kgdbts.c for the details about
+	  the tests.  The most basic of this I/O module is to boot
+	  a kernel boot arguments "kgdbwait kgdbts=V1F100"
+
+config KGDB_TESTS_ON_BOOT
+	bool "KGDB: Run tests on boot"
+	depends on KGDB_TESTS
+	default n
+	help
+	  Run the kgdb tests on boot up automatically without the need
+	  to pass in a kernel parameter
+
+config KGDB_TESTS_BOOT_STRING
+	string "KGDB: which internal kgdb tests to run"
+	depends on KGDB_TESTS_ON_BOOT
+	default "V1F100"
+	help
+	  This is the command string to send the kgdb test suite on
+	  boot.  See the drivers/misc/kgdbts.c for detailed
+	  information about other strings you could use beyond the
+	  default of V1F100.
diff --git a/lib/Makefile b/lib/Makefile
index 4d059d4..4d7649c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,6 @@
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 812dbf0..fbc11a3 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,6 +8,7 @@
 #include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
+#include <asm/semaphore.h>
 
 /*
  * The 'big kernel semaphore'
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index acca490..b80c211 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -8,6 +8,7 @@
  */
 #include <linux/module.h>
 #include <linux/scatterlist.h>
+#include <linux/highmem.h>
 
 /**
  * sg_next - return the next scatterlist entry in a list
@@ -292,3 +293,104 @@
 	return ret;
 }
 EXPORT_SYMBOL(sg_alloc_table);
+
+/**
+ * sg_copy_buffer - Copy data between a linear buffer and an SG list
+ * @sgl:		 The SG list
+ * @nents:		 Number of SG entries
+ * @buf:		 Where to copy from
+ * @buflen:		 The number of bytes to copy
+ * @to_buffer: 		 transfer direction (non zero == from an sg list to a
+ * 			 buffer, 0 == from a buffer to an sg list
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
+			     void *buf, size_t buflen, int to_buffer)
+{
+	struct scatterlist *sg;
+	size_t buf_off = 0;
+	int i;
+
+	WARN_ON(!irqs_disabled());
+
+	for_each_sg(sgl, sg, nents, i) {
+		struct page *page;
+		int n = 0;
+		unsigned int sg_off = sg->offset;
+		unsigned int sg_copy = sg->length;
+
+		if (sg_copy > buflen)
+			sg_copy = buflen;
+		buflen -= sg_copy;
+
+		while (sg_copy > 0) {
+			unsigned int page_copy;
+			void *p;
+
+			page_copy = PAGE_SIZE - sg_off;
+			if (page_copy > sg_copy)
+				page_copy = sg_copy;
+
+			page = nth_page(sg_page(sg), n);
+			p = kmap_atomic(page, KM_BIO_SRC_IRQ);
+
+			if (to_buffer)
+				memcpy(buf + buf_off, p + sg_off, page_copy);
+			else {
+				memcpy(p + sg_off, buf + buf_off, page_copy);
+				flush_kernel_dcache_page(page);
+			}
+
+			kunmap_atomic(p, KM_BIO_SRC_IRQ);
+
+			buf_off += page_copy;
+			sg_off += page_copy;
+			if (sg_off == PAGE_SIZE) {
+				sg_off = 0;
+				n++;
+			}
+			sg_copy -= page_copy;
+		}
+
+		if (!buflen)
+			break;
+	}
+
+	return buf_off;
+}
+
+/**
+ * sg_copy_from_buffer - Copy from a linear buffer to an SG list
+ * @sgl:		 The SG list
+ * @nents:		 Number of SG entries
+ * @buf:		 Where to copy from
+ * @buflen:		 The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+			   void *buf, size_t buflen)
+{
+	return sg_copy_buffer(sgl, nents, buf, buflen, 0);
+}
+EXPORT_SYMBOL(sg_copy_from_buffer);
+
+/**
+ * sg_copy_to_buffer - Copy from an SG list to a linear buffer
+ * @sgl:		 The SG list
+ * @nents:		 Number of SG entries
+ * @buf:		 Where to copy to
+ * @buflen:		 The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+			 void *buf, size_t buflen)
+{
+	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
+}
+EXPORT_SYMBOL(sg_copy_to_buffer);
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
deleted file mode 100644
index 0198782..0000000
--- a/lib/semaphore-sleepers.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * i386 and x86-64 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- *	This program is free software; you can redistribute it and/or
- *	modify it under the terms of the GNU General Public License
- *	as published by the Free Software Foundation; either version
- *	2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- *  - only on a boundary condition do we need to care. When we go
- *    from a negative count to a non-negative, we wake people up.
- *  - when we go from a non-negative count to a negative do we
- *    (a) synchronize with the "sleeper" count and (b) make sure
- *    that we're on the wakeup list before we synchronize so that
- *    we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
-	wake_up(&sem->wait);
-}
-
-void __sched __down(struct semaphore *sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	unsigned long flags;
-
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	spin_lock_irqsave(&sem->wait.lock, flags);
-	add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
-	sem->sleepers++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock in
-		 * the wait_queue_head.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-		schedule();
-
-		spin_lock_irqsave(&sem->wait.lock, flags);
-		tsk->state = TASK_UNINTERRUPTIBLE;
-	}
-	remove_wait_queue_locked(&sem->wait, &wait);
-	wake_up_locked(&sem->wait);
-	spin_unlock_irqrestore(&sem->wait.lock, flags);
-	tsk->state = TASK_RUNNING;
-}
-
-int __sched __down_interruptible(struct semaphore *sem)
-{
-	int retval = 0;
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	unsigned long flags;
-
-	tsk->state = TASK_INTERRUPTIBLE;
-	spin_lock_irqsave(&sem->wait.lock, flags);
-	add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
-	sem->sleepers++;
-	for (;;) {
-		int sleepers = sem->sleepers;
-
-		/*
-		 * With signals pending, this turns into
-		 * the trylock failure case - we won't be
-		 * sleeping, and we* can't get the lock as
-		 * it has contention. Just correct the count
-		 * and exit.
-		 */
-		if (signal_pending(current)) {
-			retval = -EINTR;
-			sem->sleepers = 0;
-			atomic_add(sleepers, &sem->count);
-			break;
-		}
-
-		/*
-		 * Add "everybody else" into it. They aren't
-		 * playing, because we own the spinlock in
-		 * wait_queue_head. The "-1" is because we're
-		 * still hoping to get the semaphore.
-		 */
-		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
-			sem->sleepers = 0;
-			break;
-		}
-		sem->sleepers = 1;	/* us - see -1 above */
-		spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-		schedule();
-
-		spin_lock_irqsave(&sem->wait.lock, flags);
-		tsk->state = TASK_INTERRUPTIBLE;
-	}
-	remove_wait_queue_locked(&sem->wait, &wait);
-	wake_up_locked(&sem->wait);
-	spin_unlock_irqrestore(&sem->wait.lock, flags);
-
-	tsk->state = TASK_RUNNING;
-	return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-int __down_trylock(struct semaphore *sem)
-{
-	int sleepers;
-	unsigned long flags;
-
-	spin_lock_irqsave(&sem->wait.lock, flags);
-	sleepers = sem->sleepers + 1;
-	sem->sleepers = 0;
-
-	/*
-	 * Add "everybody else" and us into it. They aren't
-	 * playing, because we own the spinlock in the
-	 * wait_queue_head.
-	 */
-	if (!atomic_add_negative(sleepers, &sem->count)) {
-		wake_up_locked(&sem->wait);
-	}
-
-	spin_unlock_irqrestore(&sem->wait.lock, flags);
-	return 1;
-}
diff --git a/mm/Makefile b/mm/Makefile
index a5b0dd9..18c143b 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -8,7 +8,7 @@
 			   vmalloc.o
 
 obj-y			:= bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
-			   page_alloc.o page-writeback.o pdflush.o \
+			   maccess.o page_alloc.o page-writeback.o pdflush.o \
 			   readahead.o swap.o truncate.o vmscan.o \
 			   prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
 			   page_isolation.o $(mmu-y)
diff --git a/mm/maccess.c b/mm/maccess.c
new file mode 100644
index 0000000..ac40796
--- /dev/null
+++ b/mm/maccess.c
@@ -0,0 +1,55 @@
+/*
+ * Access kernel memory without faulting.
+ */
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+
+/**
+ * probe_kernel_read(): safely attempt to read from a location
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from
+ * @size: size of the data chunk
+ *
+ * Safely read from address @src to the buffer at @dst.  If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+long probe_kernel_read(void *dst, void *src, size_t size)
+{
+	long ret;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+	pagefault_disable();
+	ret = __copy_from_user_inatomic(dst,
+			(__force const void __user *)src, size);
+	pagefault_enable();
+	set_fs(old_fs);
+
+	return ret ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(probe_kernel_read);
+
+/**
+ * probe_kernel_write(): safely attempt to write to a location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src.  If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+long probe_kernel_write(void *dst, void *src, size_t size)
+{
+	long ret;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+	pagefault_disable();
+	ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
+	pagefault_enable();
+	set_fs(old_fs);
+
+	return ret ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(probe_kernel_write);
diff --git a/mm/slub.c b/mm/slub.c
index acc975f..7f8aaa2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -837,6 +837,35 @@
 	spin_unlock(&n->list_lock);
 }
 
+/* Tracking of the number of slabs for debugging purposes */
+static inline unsigned long slabs_node(struct kmem_cache *s, int node)
+{
+	struct kmem_cache_node *n = get_node(s, node);
+
+	return atomic_long_read(&n->nr_slabs);
+}
+
+static inline void inc_slabs_node(struct kmem_cache *s, int node)
+{
+	struct kmem_cache_node *n = get_node(s, node);
+
+	/*
+	 * May be called early in order to allocate a slab for the
+	 * kmem_cache_node structure. Solve the chicken-egg
+	 * dilemma by deferring the increment of the count during
+	 * bootstrap (see early_kmem_cache_node_alloc).
+	 */
+	if (!NUMA_BUILD || n)
+		atomic_long_inc(&n->nr_slabs);
+}
+static inline void dec_slabs_node(struct kmem_cache *s, int node)
+{
+	struct kmem_cache_node *n = get_node(s, node);
+
+	atomic_long_dec(&n->nr_slabs);
+}
+
+/* Object debug checks for alloc/free paths */
 static void setup_object_debug(struct kmem_cache *s, struct page *page,
 								void *object)
 {
@@ -1028,6 +1057,11 @@
 	return flags;
 }
 #define slub_debug 0
+
+static inline unsigned long slabs_node(struct kmem_cache *s, int node)
+							{ return 0; }
+static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
+static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
 #endif
 /*
  * Slab allocation and freeing
@@ -1066,7 +1100,6 @@
 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
 {
 	struct page *page;
-	struct kmem_cache_node *n;
 	void *start;
 	void *last;
 	void *p;
@@ -1078,9 +1111,7 @@
 	if (!page)
 		goto out;
 
-	n = get_node(s, page_to_nid(page));
-	if (n)
-		atomic_long_inc(&n->nr_slabs);
+	inc_slabs_node(s, page_to_nid(page));
 	page->slab = s;
 	page->flags |= 1 << PG_slab;
 	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
@@ -1125,6 +1156,8 @@
 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
 		-pages);
 
+	__ClearPageSlab(page);
+	reset_page_mapcount(page);
 	__free_pages(page, s->order);
 }
 
@@ -1151,11 +1184,7 @@
 
 static void discard_slab(struct kmem_cache *s, struct page *page)
 {
-	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-
-	atomic_long_dec(&n->nr_slabs);
-	reset_page_mapcount(page);
-	__ClearPageSlab(page);
+	dec_slabs_node(s, page_to_nid(page));
 	free_slab(s, page);
 }
 
@@ -1886,15 +1915,18 @@
 	c->node = 0;
 	c->offset = s->offset / sizeof(void *);
 	c->objsize = s->objsize;
+#ifdef CONFIG_SLUB_STATS
+	memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
+#endif
 }
 
 static void init_kmem_cache_node(struct kmem_cache_node *n)
 {
 	n->nr_partial = 0;
-	atomic_long_set(&n->nr_slabs, 0);
 	spin_lock_init(&n->list_lock);
 	INIT_LIST_HEAD(&n->partial);
 #ifdef CONFIG_SLUB_DEBUG
+	atomic_long_set(&n->nr_slabs, 0);
 	INIT_LIST_HEAD(&n->full);
 #endif
 }
@@ -2063,7 +2095,7 @@
 	init_tracking(kmalloc_caches, n);
 #endif
 	init_kmem_cache_node(n);
-	atomic_long_inc(&n->nr_slabs);
+	inc_slabs_node(kmalloc_caches, node);
 
 	/*
 	 * lockdep requires consistent irq usage for each lock
@@ -2376,7 +2408,7 @@
 		struct kmem_cache_node *n = get_node(s, node);
 
 		n->nr_partial -= free_list(s, n, &n->partial);
-		if (atomic_long_read(&n->nr_slabs))
+		if (slabs_node(s, node))
 			return 1;
 	}
 	free_kmem_cache_nodes(s);
@@ -2409,10 +2441,6 @@
 struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
 EXPORT_SYMBOL(kmalloc_caches);
 
-#ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
-#endif
-
 static int __init setup_slub_min_order(char *str)
 {
 	get_option(&str, &slub_min_order);
@@ -2472,6 +2500,7 @@
 }
 
 #ifdef CONFIG_ZONE_DMA
+static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
 
 static void sysfs_add_func(struct work_struct *w)
 {
@@ -2688,21 +2717,6 @@
 }
 EXPORT_SYMBOL(kfree);
 
-#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
-static unsigned long count_partial(struct kmem_cache_node *n)
-{
-	unsigned long flags;
-	unsigned long x = 0;
-	struct page *page;
-
-	spin_lock_irqsave(&n->list_lock, flags);
-	list_for_each_entry(page, &n->partial, lru)
-		x += page->inuse;
-	spin_unlock_irqrestore(&n->list_lock, flags);
-	return x;
-}
-#endif
-
 /*
  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
  * the remaining slabs by the number of items in use. The slabs with the
@@ -2816,7 +2830,7 @@
 			 * and offline_pages() function shoudn't call this
 			 * callback. So, we must fail.
 			 */
-			BUG_ON(atomic_long_read(&n->nr_slabs));
+			BUG_ON(slabs_node(s, offline_node));
 
 			s->node[offline_node] = NULL;
 			kmem_cache_free(kmalloc_caches, n);
@@ -3181,6 +3195,21 @@
 	return slab_alloc(s, gfpflags, node, caller);
 }
 
+#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
+static unsigned long count_partial(struct kmem_cache_node *n)
+{
+	unsigned long flags;
+	unsigned long x = 0;
+	struct page *page;
+
+	spin_lock_irqsave(&n->list_lock, flags);
+	list_for_each_entry(page, &n->partial, lru)
+		x += page->inuse;
+	spin_unlock_irqrestore(&n->list_lock, flags);
+	return x;
+}
+#endif
+
 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
 static int validate_slab(struct kmem_cache *s, struct page *page,
 						unsigned long *map)
@@ -3979,10 +4008,12 @@
 
 	len = sprintf(buf, "%lu", sum);
 
+#ifdef CONFIG_SMP
 	for_each_online_cpu(cpu) {
 		if (data[cpu] && len < PAGE_SIZE - 20)
-			len += sprintf(buf + len, " c%d=%u", cpu, data[cpu]);
+			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
 	}
+#endif
 	kfree(data);
 	return len + sprintf(buf + len, "\n");
 }
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index ffbf22a..8ea283e 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1573,7 +1573,6 @@
 	send_wr.sg_list = req->rl_send_iov;
 	send_wr.num_sge = req->rl_niovs;
 	send_wr.opcode = IB_WR_SEND;
-	send_wr.imm_data = 0;
 	if (send_wr.num_sge == 4)	/* no need to sync any pad (constant) */
 		ib_dma_sync_single_for_device(ia->ri_id->device,
 			req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
diff --git a/security/Kconfig b/security/Kconfig
index 5dfc206..49b51f9 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -113,10 +113,12 @@
 	  from userspace allocation.  Keeping a user from writing to low pages
 	  can help reduce the impact of kernel NULL pointer bugs.
 
-	  For most users with lots of address space a value of 65536 is
-	  reasonable and should cause no problems.  Programs which use vm86
-	  functionality would either need additional permissions from either
-	  the LSM or the capabilities module or have this protection disabled.
+	  For most ia64, ppc64 and x86 users with lots of address space
+	  a value of 65536 is reasonable and should cause no problems.
+	  On arm and other archs it should not be higher than 32768.
+	  Programs which use vm86 functionality would either need additional
+	  permissions from either the LSM or the capabilities module or have
+	  this protection disabled.
 
 	  This value can be changed after boot using the
 	  /proc/sys/vm/mmap_min_addr tunable.
diff --git a/security/commoncap.c b/security/commoncap.c
index 06d5c94..8529057 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -267,7 +267,7 @@
 	rc = cap_from_disk(&vcaps, bprm, rc);
 	if (rc)
 		printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
-			__FUNCTION__, rc, bprm->filename);
+			__func__, rc, bprm->filename);
 
 out:
 	dput(dentry);
@@ -302,7 +302,7 @@
 	ret = get_file_caps(bprm);
 	if (ret)
 		printk(KERN_NOTICE "%s: get_file_caps returned %d for %s\n",
-			__FUNCTION__, ret, bprm->filename);
+			__func__, ret, bprm->filename);
 
 	/*  To support inheritance of root-permissions and suid-root
 	 *  executables under compatibility mode, we raise all three
diff --git a/security/keys/internal.h b/security/keys/internal.h
index d36d693..7d894ef 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -22,16 +22,16 @@
 
 #ifdef __KDEBUG
 #define kenter(FMT, ...) \
-	printk(KERN_DEBUG "==> %s("FMT")\n", __FUNCTION__, ##__VA_ARGS__)
+	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
 #define kleave(FMT, ...) \
-	printk(KERN_DEBUG "<== %s()"FMT"\n", __FUNCTION__, ##__VA_ARGS__)
+	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
 #define kdebug(FMT, ...) \
 	printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
 #else
 #define kenter(FMT, ...) \
-	no_printk(KERN_DEBUG "==> %s("FMT")\n", __FUNCTION__, ##__VA_ARGS__)
+	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
 #define kleave(FMT, ...) \
-	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __FUNCTION__, ##__VA_ARGS__)
+	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
 #define kdebug(FMT, ...) \
 	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
 #endif
diff --git a/security/root_plug.c b/security/root_plug.c
index 870f130..6112d14 100644
--- a/security/root_plug.c
+++ b/security/root_plug.c
@@ -49,7 +49,7 @@
 	do {							\
 		if (debug)					\
 			printk(KERN_DEBUG "%s: %s: " fmt ,	\
-				MY_NAME , __FUNCTION__ , 	\
+				MY_NAME , __func__ , 	\
 				## arg);			\
 	} while (0)
 
diff --git a/security/security.c b/security/security.c
index c9ff7d1..54affd0 100644
--- a/security/security.c
+++ b/security/security.c
@@ -57,7 +57,7 @@
 
 	if (verify(&dummy_security_ops)) {
 		printk(KERN_ERR "%s could not verify "
-		       "dummy_security_ops structure.\n", __FUNCTION__);
+		       "dummy_security_ops structure.\n", __func__);
 		return -EIO;
 	}
 
@@ -82,7 +82,7 @@
 {
 	if (verify(ops)) {
 		printk(KERN_DEBUG "%s could not verify "
-		       "security_operations structure.\n", __FUNCTION__);
+		       "security_operations structure.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -110,13 +110,13 @@
 {
 	if (verify(ops)) {
 		printk(KERN_INFO "%s could not verify "
-		       "security operations.\n", __FUNCTION__);
+		       "security operations.\n", __func__);
 		return -EINVAL;
 	}
 
 	if (ops == security_ops) {
 		printk(KERN_INFO "%s security operations "
-		       "already registered.\n", __FUNCTION__);
+		       "already registered.\n", __func__);
 		return -EINVAL;
 	}
 
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 2b517d6..a436d1c 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -145,7 +145,7 @@
 config SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
 	int "NSA SELinux maximum supported policy format version value"
 	depends on SECURITY_SELINUX_POLICYDB_VERSION_MAX
-	range 15 22
+	range 15 23
 	default 19
 	help
 	  This option sets the value for the maximum policy format version
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 00afd85..d47fc5e 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -11,6 +11,7 @@
 	     nlmsgtab.o \
 	     netif.o \
 	     netnode.o \
+	     netport.o \
 	     exports.o
 
 selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 187964e..a4fc6e6 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -871,6 +871,8 @@
 	int rc = 0;
 	u32 denied;
 
+	BUG_ON(!requested);
+
 	rcu_read_lock();
 
 	node = avc_lookup(ssid, tsid, tclass, requested);
@@ -890,13 +892,14 @@
 
 	denied = requested & ~(p_ae->avd.allowed);
 
-	if (!requested || denied) {
-		if (selinux_enforcing || (flags & AVC_STRICT))
+	if (denied) {
+		if (flags & AVC_STRICT)
 			rc = -EACCES;
+		else if (!selinux_enforcing || security_permissive_sid(ssid))
+			avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
+					tsid, tclass);
 		else
-			if (node)
-				avc_update_node(AVC_CALLBACK_GRANT,requested,
-						ssid,tsid,tclass);
+			rc = -EACCES;
 	}
 
 	rcu_read_unlock();
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d39b59c..34f2d46 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -80,6 +80,7 @@
 #include "objsec.h"
 #include "netif.h"
 #include "netnode.h"
+#include "netport.h"
 #include "xfrm.h"
 #include "netlabel.h"
 
@@ -161,8 +162,7 @@
 	if (!tsec)
 		return -ENOMEM;
 
-	tsec->task = task;
-	tsec->osid = tsec->sid = tsec->ptrace_sid = SECINITSID_UNLABELED;
+	tsec->osid = tsec->sid = SECINITSID_UNLABELED;
 	task->security = tsec;
 
 	return 0;
@@ -218,7 +218,6 @@
 	if (!fsec)
 		return -ENOMEM;
 
-	fsec->file = file;
 	fsec->sid = tsec->sid;
 	fsec->fown_sid = tsec->sid;
 	file->f_security = fsec;
@@ -275,12 +274,11 @@
 	if (!ssec)
 		return -ENOMEM;
 
-	ssec->sk = sk;
 	ssec->peer_sid = SECINITSID_UNLABELED;
 	ssec->sid = SECINITSID_UNLABELED;
 	sk->sk_security = ssec;
 
-	selinux_netlbl_sk_security_init(ssec, family);
+	selinux_netlbl_sk_security_reset(ssec, family);
 
 	return 0;
 }
@@ -324,10 +322,10 @@
 };
 
 static match_table_t tokens = {
-	{Opt_context, "context=%s"},
-	{Opt_fscontext, "fscontext=%s"},
-	{Opt_defcontext, "defcontext=%s"},
-	{Opt_rootcontext, "rootcontext=%s"},
+	{Opt_context, CONTEXT_STR "%s"},
+	{Opt_fscontext, FSCONTEXT_STR "%s"},
+	{Opt_defcontext, DEFCONTEXT_STR "%s"},
+	{Opt_rootcontext, ROOTCONTEXT_STR "%s"},
 	{Opt_error, NULL},
 };
 
@@ -671,7 +669,7 @@
 	rc = security_fs_use(sb->s_type->name, &sbsec->behavior, &sbsec->sid);
 	if (rc) {
 		printk(KERN_WARNING "%s: security_fs_use(%s) returned %d\n",
-		       __FUNCTION__, sb->s_type->name, rc);
+		       __func__, sb->s_type->name, rc);
 		goto out;
 	}
 
@@ -1137,7 +1135,7 @@
 		}
 		if (!dentry) {
 			printk(KERN_WARNING "%s:  no dentry for dev=%s "
-			       "ino=%ld\n", __FUNCTION__, inode->i_sb->s_id,
+			       "ino=%ld\n", __func__, inode->i_sb->s_id,
 			       inode->i_ino);
 			goto out_unlock;
 		}
@@ -1175,7 +1173,7 @@
 		if (rc < 0) {
 			if (rc != -ENODATA) {
 				printk(KERN_WARNING "%s:  getxattr returned "
-				       "%d for dev=%s ino=%ld\n", __FUNCTION__,
+				       "%d for dev=%s ino=%ld\n", __func__,
 				       -rc, inode->i_sb->s_id, inode->i_ino);
 				kfree(context);
 				goto out_unlock;
@@ -1190,7 +1188,7 @@
 			if (rc) {
 				printk(KERN_WARNING "%s:  context_to_sid(%s) "
 				       "returned %d for dev=%s ino=%ld\n",
-				       __FUNCTION__, context, -rc,
+				       __func__, context, -rc,
 				       inode->i_sb->s_id, inode->i_ino);
 				kfree(context);
 				/* Leave with the unlabeled SID */
@@ -1618,6 +1616,35 @@
 	return av;
 }
 
+/*
+ * Convert a file mask to an access vector and include the correct open
+ * open permission.
+ */
+static inline u32 open_file_mask_to_av(int mode, int mask)
+{
+	u32 av = file_mask_to_av(mode, mask);
+
+	if (selinux_policycap_openperm) {
+		/*
+		 * lnk files and socks do not really have an 'open'
+		 */
+		if (S_ISREG(mode))
+			av |= FILE__OPEN;
+		else if (S_ISCHR(mode))
+			av |= CHR_FILE__OPEN;
+		else if (S_ISBLK(mode))
+			av |= BLK_FILE__OPEN;
+		else if (S_ISFIFO(mode))
+			av |= FIFO_FILE__OPEN;
+		else if (S_ISDIR(mode))
+			av |= DIR__OPEN;
+		else
+			printk(KERN_ERR "SELinux: WARNING: inside open_file_to_av "
+				"with unknown mode:%x\n", mode);
+	}
+	return av;
+}
+
 /* Convert a Linux file to an access vector. */
 static inline u32 file_to_av(struct file *file)
 {
@@ -1645,19 +1672,13 @@
 
 static int selinux_ptrace(struct task_struct *parent, struct task_struct *child)
 {
-	struct task_security_struct *psec = parent->security;
-	struct task_security_struct *csec = child->security;
 	int rc;
 
 	rc = secondary_ops->ptrace(parent,child);
 	if (rc)
 		return rc;
 
-	rc = task_has_perm(parent, child, PROCESS__PTRACE);
-	/* Save the SID of the tracing process for later use in apply_creds. */
-	if (!(child->ptrace & PT_PTRACED) && !rc)
-		csec->ptrace_sid = psec->sid;
-	return rc;
+	return task_has_perm(parent, child, PROCESS__PTRACE);
 }
 
 static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
@@ -1879,6 +1900,22 @@
 	return __vm_enough_memory(mm, pages, cap_sys_admin);
 }
 
+/**
+ * task_tracer_task - return the task that is tracing the given task
+ * @task:		task to consider
+ *
+ * Returns NULL if noone is tracing @task, or the &struct task_struct
+ * pointer to its tracer.
+ *
+ * Must be called under rcu_read_lock().
+ */
+static struct task_struct *task_tracer_task(struct task_struct *task)
+{
+	if (task->ptrace & PT_PTRACED)
+		return rcu_dereference(task->parent);
+	return NULL;
+}
+
 /* binprm security operations */
 
 static int selinux_bprm_alloc_security(struct linux_binprm *bprm)
@@ -1889,7 +1926,6 @@
 	if (!bsec)
 		return -ENOMEM;
 
-	bsec->bprm = bprm;
 	bsec->sid = SECINITSID_UNLABELED;
 	bsec->set = 0;
 
@@ -2126,12 +2162,25 @@
 		/* Check for ptracing, and update the task SID if ok.
 		   Otherwise, leave SID unchanged and kill. */
 		if (unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
-			rc = avc_has_perm(tsec->ptrace_sid, sid,
-					  SECCLASS_PROCESS, PROCESS__PTRACE,
-					  NULL);
-			if (rc) {
-				bsec->unsafe = 1;
-				return;
+			struct task_struct *tracer;
+			struct task_security_struct *sec;
+			u32 ptsid = 0;
+
+			rcu_read_lock();
+			tracer = task_tracer_task(current);
+			if (likely(tracer != NULL)) {
+				sec = tracer->security;
+				ptsid = sec->sid;
+			}
+			rcu_read_unlock();
+
+			if (ptsid != 0) {
+				rc = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
+						  PROCESS__PTRACE, NULL);
+				if (rc) {
+					bsec->unsafe = 1;
+					return;
+				}
 			}
 		}
 		tsec->sid = sid;
@@ -2239,10 +2288,10 @@
 
 static inline int selinux_option(char *option, int len)
 {
-	return (match_prefix("context=", sizeof("context=")-1, option, len) ||
-	        match_prefix("fscontext=", sizeof("fscontext=")-1, option, len) ||
-	        match_prefix("defcontext=", sizeof("defcontext=")-1, option, len) ||
-		match_prefix("rootcontext=", sizeof("rootcontext=")-1, option, len));
+	return (match_prefix(CONTEXT_STR, sizeof(CONTEXT_STR)-1, option, len) ||
+		match_prefix(FSCONTEXT_STR, sizeof(FSCONTEXT_STR)-1, option, len) ||
+		match_prefix(DEFCONTEXT_STR, sizeof(DEFCONTEXT_STR)-1, option, len) ||
+		match_prefix(ROOTCONTEXT_STR, sizeof(ROOTCONTEXT_STR)-1, option, len));
 }
 
 static inline void take_option(char **to, char *from, int *first, int len)
@@ -2412,7 +2461,7 @@
 			printk(KERN_WARNING "%s:  "
 			       "security_transition_sid failed, rc=%d (dev=%s "
 			       "ino=%ld)\n",
-			       __FUNCTION__,
+			       __func__,
 			       -rc, inode->i_sb->s_id, inode->i_ino);
 			return rc;
 		}
@@ -2536,7 +2585,7 @@
 	}
 
 	return inode_has_perm(current, inode,
-			       file_mask_to_av(inode->i_mode, mask), NULL);
+			       open_file_mask_to_av(inode->i_mode, mask), NULL);
 }
 
 static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
@@ -2646,7 +2695,7 @@
 	rc = security_context_to_sid(value, size, &newsid);
 	if (rc) {
 		printk(KERN_WARNING "%s:  unable to obtain SID for context "
-		       "%s, rc=%d\n", __FUNCTION__, (char*)value, -rc);
+		       "%s, rc=%d\n", __func__, (char *)value, -rc);
 		return;
 	}
 
@@ -3087,11 +3136,6 @@
 	tsec2->keycreate_sid = tsec1->keycreate_sid;
 	tsec2->sockcreate_sid = tsec1->sockcreate_sid;
 
-	/* Retain ptracer SID across fork, if any.
-	   This will be reset by the ptrace hook upon any
-	   subsequent ptrace_attach operations. */
-	tsec2->ptrace_sid = tsec1->ptrace_sid;
-
 	return 0;
 }
 
@@ -3627,10 +3671,8 @@
 			inet_get_local_port_range(&low, &high);
 
 			if (snum < max(PROT_SOCK, low) || snum > high) {
-				err = security_port_sid(sk->sk_family,
-							sk->sk_type,
-							sk->sk_protocol, snum,
-							&sid);
+				err = sel_netport_sid(sk->sk_protocol,
+						      snum, &sid);
 				if (err)
 					goto out;
 				AVC_AUDIT_DATA_INIT(&ad,NET);
@@ -3718,8 +3760,7 @@
 			snum = ntohs(addr6->sin6_port);
 		}
 
-		err = security_port_sid(sk->sk_family, sk->sk_type,
-					sk->sk_protocol, snum, &sid);
+		err = sel_netport_sid(sk->sk_protocol, snum, &sid);
 		if (err)
 			goto out;
 
@@ -3950,9 +3991,8 @@
 
 	if (!recv_perm)
 		return 0;
-	err = security_port_sid(sk->sk_family, sk->sk_type,
-				sk->sk_protocol, ntohs(ad->u.net.sport),
-				&port_sid);
+	err = sel_netport_sid(sk->sk_protocol,
+			      ntohs(ad->u.net.sport), &port_sid);
 	if (unlikely(err)) {
 		printk(KERN_WARNING
 		       "SELinux: failure in"
@@ -4139,7 +4179,7 @@
 	newssec->peer_sid = ssec->peer_sid;
 	newssec->sclass = ssec->sclass;
 
-	selinux_netlbl_sk_security_clone(ssec, newssec);
+	selinux_netlbl_sk_security_reset(newssec, newsk->sk_family);
 }
 
 static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
@@ -4373,9 +4413,8 @@
 	if (send_perm != 0)
 		return 0;
 
-	err = security_port_sid(sk->sk_family, sk->sk_type,
-				sk->sk_protocol, ntohs(ad->u.net.dport),
-				&port_sid);
+	err = sel_netport_sid(sk->sk_protocol,
+			      ntohs(ad->u.net.dport), &port_sid);
 	if (unlikely(err)) {
 		printk(KERN_WARNING
 		       "SELinux: failure in"
@@ -4561,7 +4600,6 @@
 		return -ENOMEM;
 
 	isec->sclass = sclass;
-	isec->ipc_perm = perm;
 	isec->sid = tsec->sid;
 	perm->security = isec;
 
@@ -4583,7 +4621,6 @@
 	if (!msec)
 		return -ENOMEM;
 
-	msec->msg = msg;
 	msec->sid = SECINITSID_UNLABELED;
 	msg->security = msec;
 
@@ -4994,14 +5031,14 @@
 {
 	if (secondary_ops != original_ops) {
 		printk(KERN_ERR "%s:  There is already a secondary security "
-		       "module registered.\n", __FUNCTION__);
+		       "module registered.\n", __func__);
 		return -EINVAL;
  	}
 
 	secondary_ops = ops;
 
 	printk(KERN_INFO "%s:  Registering secondary module %s\n",
-	       __FUNCTION__,
+	       __func__,
 	       name);
 
 	return 0;
@@ -5057,6 +5094,7 @@
 			       char *name, void *value, size_t size)
 {
 	struct task_security_struct *tsec;
+	struct task_struct *tracer;
 	u32 sid = 0;
 	int error;
 	char *str = value;
@@ -5145,18 +5183,24 @@
 		/* Check for ptracing, and update the task SID if ok.
 		   Otherwise, leave SID unchanged and fail. */
 		task_lock(p);
-		if (p->ptrace & PT_PTRACED) {
-			error = avc_has_perm_noaudit(tsec->ptrace_sid, sid,
+		rcu_read_lock();
+		tracer = task_tracer_task(p);
+		if (tracer != NULL) {
+			struct task_security_struct *ptsec = tracer->security;
+			u32 ptsid = ptsec->sid;
+			rcu_read_unlock();
+			error = avc_has_perm_noaudit(ptsid, sid,
 						     SECCLASS_PROCESS,
 						     PROCESS__PTRACE, 0, &avd);
 			if (!error)
 				tsec->sid = sid;
 			task_unlock(p);
-			avc_audit(tsec->ptrace_sid, sid, SECCLASS_PROCESS,
+			avc_audit(ptsid, sid, SECCLASS_PROCESS,
 				  PROCESS__PTRACE, &avd, error, NULL);
 			if (error)
 				return error;
 		} else {
+			rcu_read_unlock();
 			tsec->sid = sid;
 			task_unlock(p);
 		}
@@ -5194,7 +5238,6 @@
 	if (!ksec)
 		return -ENOMEM;
 
-	ksec->obj = k;
 	if (tsec->keycreate_sid)
 		ksec->sid = tsec->keycreate_sid;
 	else
@@ -5631,5 +5674,3 @@
 	return 0;
 }
 #endif
-
-
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
index d569669..1223b4f 100644
--- a/security/selinux/include/av_perm_to_string.h
+++ b/security/selinux/include/av_perm_to_string.h
@@ -14,12 +14,17 @@
    S_(SECCLASS_DIR, DIR__REPARENT, "reparent")
    S_(SECCLASS_DIR, DIR__SEARCH, "search")
    S_(SECCLASS_DIR, DIR__RMDIR, "rmdir")
+   S_(SECCLASS_DIR, DIR__OPEN, "open")
    S_(SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, "execute_no_trans")
    S_(SECCLASS_FILE, FILE__ENTRYPOINT, "entrypoint")
    S_(SECCLASS_FILE, FILE__EXECMOD, "execmod")
+   S_(SECCLASS_FILE, FILE__OPEN, "open")
    S_(SECCLASS_CHR_FILE, CHR_FILE__EXECUTE_NO_TRANS, "execute_no_trans")
    S_(SECCLASS_CHR_FILE, CHR_FILE__ENTRYPOINT, "entrypoint")
    S_(SECCLASS_CHR_FILE, CHR_FILE__EXECMOD, "execmod")
+   S_(SECCLASS_CHR_FILE, CHR_FILE__OPEN, "open")
+   S_(SECCLASS_BLK_FILE, BLK_FILE__OPEN, "open")
+   S_(SECCLASS_FIFO_FILE, FIFO_FILE__OPEN, "open")
    S_(SECCLASS_FD, FD__USE, "use")
    S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__CONNECTTO, "connectto")
    S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NEWCONN, "newconn")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
index 75b4131..c4c5116 100644
--- a/security/selinux/include/av_permissions.h
+++ b/security/selinux/include/av_permissions.h
@@ -79,6 +79,7 @@
 #define DIR__REPARENT                             0x00080000UL
 #define DIR__SEARCH                               0x00100000UL
 #define DIR__RMDIR                                0x00200000UL
+#define DIR__OPEN                                 0x00400000UL
 #define FILE__IOCTL                               0x00000001UL
 #define FILE__READ                                0x00000002UL
 #define FILE__WRITE                               0x00000004UL
@@ -99,6 +100,7 @@
 #define FILE__EXECUTE_NO_TRANS                    0x00020000UL
 #define FILE__ENTRYPOINT                          0x00040000UL
 #define FILE__EXECMOD                             0x00080000UL
+#define FILE__OPEN                                0x00100000UL
 #define LNK_FILE__IOCTL                           0x00000001UL
 #define LNK_FILE__READ                            0x00000002UL
 #define LNK_FILE__WRITE                           0x00000004UL
@@ -136,6 +138,7 @@
 #define CHR_FILE__EXECUTE_NO_TRANS                0x00020000UL
 #define CHR_FILE__ENTRYPOINT                      0x00040000UL
 #define CHR_FILE__EXECMOD                         0x00080000UL
+#define CHR_FILE__OPEN                            0x00100000UL
 #define BLK_FILE__IOCTL                           0x00000001UL
 #define BLK_FILE__READ                            0x00000002UL
 #define BLK_FILE__WRITE                           0x00000004UL
@@ -153,6 +156,7 @@
 #define BLK_FILE__SWAPON                          0x00004000UL
 #define BLK_FILE__QUOTAON                         0x00008000UL
 #define BLK_FILE__MOUNTON                         0x00010000UL
+#define BLK_FILE__OPEN                            0x00020000UL
 #define SOCK_FILE__IOCTL                          0x00000001UL
 #define SOCK_FILE__READ                           0x00000002UL
 #define SOCK_FILE__WRITE                          0x00000004UL
@@ -187,6 +191,7 @@
 #define FIFO_FILE__SWAPON                         0x00004000UL
 #define FIFO_FILE__QUOTAON                        0x00008000UL
 #define FIFO_FILE__MOUNTON                        0x00010000UL
+#define FIFO_FILE__OPEN                           0x00020000UL
 #define FD__USE                                   0x00000001UL
 #define SOCKET__IOCTL                             0x00000001UL
 #define SOCKET__READ                              0x00000002UL
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index 00a2809..9a9e7cd 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -41,10 +41,6 @@
 
 void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec,
 				      int family);
-void selinux_netlbl_sk_security_init(struct sk_security_struct *ssec,
-				     int family);
-void selinux_netlbl_sk_security_clone(struct sk_security_struct *ssec,
-				      struct sk_security_struct *newssec);
 
 int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
 				 u16 family,
@@ -73,18 +69,6 @@
 {
 	return;
 }
-static inline void selinux_netlbl_sk_security_init(
-	                                       struct sk_security_struct *ssec,
-					       int family)
-{
-	return;
-}
-static inline void selinux_netlbl_sk_security_clone(
-	                                    struct sk_security_struct *ssec,
-					    struct sk_security_struct *newssec)
-{
-	return;
-}
 
 static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
 					       u16 family,
diff --git a/security/selinux/include/netport.h b/security/selinux/include/netport.h
new file mode 100644
index 0000000..8991752
--- /dev/null
+++ b/security/selinux/include/netport.h
@@ -0,0 +1,31 @@
+/*
+ * Network port table
+ *
+ * SELinux must keep a mapping of network ports to labels/SIDs.  This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ *
+ * Author: Paul Moore <paul.moore@hp.com>
+ *
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2008
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SELINUX_NETPORT_H
+#define _SELINUX_NETPORT_H
+
+int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid);
+
+#endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index c6c2bb4..300b61b 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -28,14 +28,12 @@
 #include "avc.h"
 
 struct task_security_struct {
-	struct task_struct *task;      /* back pointer to task object */
 	u32 osid;            /* SID prior to last execve */
 	u32 sid;             /* current SID */
 	u32 exec_sid;        /* exec SID */
 	u32 create_sid;      /* fscreate SID */
 	u32 keycreate_sid;   /* keycreate SID */
 	u32 sockcreate_sid;  /* fscreate SID */
-	u32 ptrace_sid;      /* SID of ptrace parent */
 };
 
 struct inode_security_struct {
@@ -50,7 +48,6 @@
 };
 
 struct file_security_struct {
-	struct file *file;              /* back pointer to file object */
 	u32 sid;              /* SID of open file description */
 	u32 fown_sid;         /* SID of file owner (for SIGIO) */
 	u32 isid;             /* SID of inode at the time of file open */
@@ -73,18 +70,15 @@
 };
 
 struct msg_security_struct {
-	struct msg_msg *msg;		/* back pointer */
 	u32 sid;              /* SID of message */
 };
 
 struct ipc_security_struct {
-	struct kern_ipc_perm *ipc_perm; /* back pointer */
 	u16 sclass;	/* security class of this object */
 	u32 sid;              /* SID of IPC resource */
 };
 
 struct bprm_security_struct {
-	struct linux_binprm *bprm;     /* back pointer to bprm object */
 	u32 sid;                       /* SID for transformed process */
 	unsigned char set;
 
@@ -109,8 +103,13 @@
 	u16 family;			/* address family */
 };
 
+struct netport_security_struct {
+	u32 sid;			/* SID for this node */
+	u16 port;			/* port number */
+	u8 protocol;			/* transport protocol */
+};
+
 struct sk_security_struct {
-	struct sock *sk;		/* back pointer to sk object */
 	u32 sid;			/* SID of this object */
 	u32 peer_sid;			/* SID of peer */
 	u16 sclass;			/* sock security class */
@@ -120,12 +119,10 @@
 		NLBL_REQUIRE,
 		NLBL_LABELED,
 	} nlbl_state;
-	spinlock_t nlbl_lock;		/* protects nlbl_state */
 #endif
 };
 
 struct key_security_struct {
-	struct key *obj; /* back pointer */
 	u32 sid;         /* SID of key */
 };
 
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 44e12ec..1904c46 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -26,13 +26,14 @@
 #define POLICYDB_VERSION_AVTAB		20
 #define POLICYDB_VERSION_RANGETRANS	21
 #define POLICYDB_VERSION_POLCAP		22
+#define POLICYDB_VERSION_PERMISSIVE	23
 
 /* Range of policy versions we understand*/
 #define POLICYDB_VERSION_MIN   POLICYDB_VERSION_BASE
 #ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
 #define POLICYDB_VERSION_MAX	CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
 #else
-#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_POLCAP
+#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_PERMISSIVE
 #endif
 
 #define CONTEXT_MNT	0x01
@@ -40,6 +41,11 @@
 #define ROOTCONTEXT_MNT	0x04
 #define DEFCONTEXT_MNT	0x08
 
+#define CONTEXT_STR	"context="
+#define FSCONTEXT_STR	"fscontext="
+#define ROOTCONTEXT_STR	"rootcontext="
+#define DEFCONTEXT_STR	"defcontext="
+
 struct netlbl_lsm_secattr;
 
 extern int selinux_enabled;
@@ -48,11 +54,13 @@
 /* Policy capabilities */
 enum {
 	POLICYDB_CAPABILITY_NETPEER,
+	POLICYDB_CAPABILITY_OPENPERM,
 	__POLICYDB_CAPABILITY_MAX
 };
 #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
 
 extern int selinux_policycap_netpeer;
+extern int selinux_policycap_openperm;
 
 int security_load_policy(void * data, size_t len);
 
@@ -67,6 +75,8 @@
 	u32 seqno;
 };
 
+int security_permissive_sid(u32 sid);
+
 int security_compute_av(u32 ssid, u32 tsid,
 	u16 tclass, u32 requested,
 	struct av_decision *avd);
@@ -92,8 +102,7 @@
 int security_get_user_sids(u32 callsid, char *username,
 			   u32 **sids, u32 *nel);
 
-int security_port_sid(u16 domain, u16 type, u8 protocol, u16 port,
-	u32 *out_sid);
+int security_port_sid(u8 protocol, u16 port, u32 *out_sid);
 
 int security_netif_sid(char *name, u32 *if_sid);
 
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 0fa2be4..e8ee91a 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -34,6 +34,7 @@
 
 #include "objsec.h"
 #include "security.h"
+#include "netlabel.h"
 
 /**
  * selinux_netlbl_sidlookup_cached - Cache a SID lookup
@@ -69,9 +70,7 @@
  *
  * Description:
  * Attempt to label a socket using the NetLabel mechanism using the given
- * SID.  Returns zero values on success, negative values on failure.  The
- * caller is responsibile for calling rcu_read_lock() before calling this
- * this function and rcu_read_unlock() after this function returns.
+ * SID.  Returns zero values on success, negative values on failure.
  *
  */
 static int selinux_netlbl_sock_setsid(struct sock *sk, u32 sid)
@@ -86,11 +85,8 @@
 	if (rc != 0)
 		goto sock_setsid_return;
 	rc = netlbl_sock_setattr(sk, &secattr);
-	if (rc == 0) {
-		spin_lock_bh(&sksec->nlbl_lock);
+	if (rc == 0)
 		sksec->nlbl_state = NLBL_LABELED;
-		spin_unlock_bh(&sksec->nlbl_lock);
-	}
 
 sock_setsid_return:
 	netlbl_secattr_destroy(&secattr);
@@ -129,45 +125,6 @@
 }
 
 /**
- * selinux_netlbl_sk_security_init - Setup the NetLabel fields
- * @ssec: the sk_security_struct
- * @family: the socket family
- *
- * Description:
- * Called when a new sk_security_struct is allocated to initialize the NetLabel
- * fields.
- *
- */
-void selinux_netlbl_sk_security_init(struct sk_security_struct *ssec,
-				     int family)
-{
-	/* No locking needed, we are the only one who has access to ssec */
-	selinux_netlbl_sk_security_reset(ssec, family);
-	spin_lock_init(&ssec->nlbl_lock);
-}
-
-/**
- * selinux_netlbl_sk_security_clone - Copy the NetLabel fields
- * @ssec: the original sk_security_struct
- * @newssec: the cloned sk_security_struct
- *
- * Description:
- * Clone the NetLabel specific sk_security_struct fields from @ssec to
- * @newssec.
- *
- */
-void selinux_netlbl_sk_security_clone(struct sk_security_struct *ssec,
-				      struct sk_security_struct *newssec)
-{
-	/* We don't need to take newssec->nlbl_lock because we are the only
-	 * thread with access to newssec, but we do need to take the RCU read
-	 * lock as other threads could have access to ssec */
-	rcu_read_lock();
-	selinux_netlbl_sk_security_reset(newssec, ssec->sk->sk_family);
-	rcu_read_unlock();
-}
-
-/**
  * selinux_netlbl_skbuff_getsid - Get the sid of a packet using NetLabel
  * @skb: the packet
  * @family: protocol family
@@ -221,12 +178,8 @@
 	struct netlbl_lsm_secattr secattr;
 	u32 nlbl_peer_sid;
 
-	rcu_read_lock();
-
-	if (sksec->nlbl_state != NLBL_REQUIRE) {
-		rcu_read_unlock();
+	if (sksec->nlbl_state != NLBL_REQUIRE)
 		return;
-	}
 
 	netlbl_secattr_init(&secattr);
 	if (netlbl_sock_getattr(sk, &secattr) == 0 &&
@@ -239,8 +192,6 @@
 	 * here we will pick up the pieces in later calls to
 	 * selinux_netlbl_inode_permission(). */
 	selinux_netlbl_sock_setsid(sk, sksec->sid);
-
-	rcu_read_unlock();
 }
 
 /**
@@ -254,16 +205,13 @@
  */
 int selinux_netlbl_socket_post_create(struct socket *sock)
 {
-	int rc = 0;
 	struct sock *sk = sock->sk;
 	struct sk_security_struct *sksec = sk->sk_security;
 
-	rcu_read_lock();
-	if (sksec->nlbl_state == NLBL_REQUIRE)
-		rc = selinux_netlbl_sock_setsid(sk, sksec->sid);
-	rcu_read_unlock();
+	if (sksec->nlbl_state != NLBL_REQUIRE)
+		return 0;
 
-	return rc;
+	return selinux_netlbl_sock_setsid(sk, sksec->sid);
 }
 
 /**
@@ -288,21 +236,21 @@
 	if (!S_ISSOCK(inode->i_mode) ||
 	    ((mask & (MAY_WRITE | MAY_APPEND)) == 0))
 		return 0;
+
 	sock = SOCKET_I(inode);
 	sk = sock->sk;
 	sksec = sk->sk_security;
-
-	rcu_read_lock();
-	if (sksec->nlbl_state != NLBL_REQUIRE) {
-		rcu_read_unlock();
+	if (sksec->nlbl_state != NLBL_REQUIRE)
 		return 0;
-	}
+
 	local_bh_disable();
 	bh_lock_sock_nested(sk);
-	rc = selinux_netlbl_sock_setsid(sk, sksec->sid);
+	if (likely(sksec->nlbl_state == NLBL_REQUIRE))
+		rc = selinux_netlbl_sock_setsid(sk, sksec->sid);
+	else
+		rc = 0;
 	bh_unlock_sock(sk);
 	local_bh_enable();
-	rcu_read_unlock();
 
 	return rc;
 }
@@ -385,7 +333,6 @@
 	struct sk_security_struct *sksec = sk->sk_security;
 	struct netlbl_lsm_secattr secattr;
 
-	rcu_read_lock();
 	if (level == IPPROTO_IP && optname == IP_OPTIONS &&
 	    sksec->nlbl_state == NLBL_LABELED) {
 		netlbl_secattr_init(&secattr);
@@ -396,7 +343,6 @@
 			rc = -EACCES;
 		netlbl_secattr_destroy(&secattr);
 	}
-	rcu_read_unlock();
 
 	return rc;
 }
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index b59871d..6214a7a 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -89,7 +89,7 @@
 nlmsg_failure:
 	kfree_skb(skb);
 oom:
-	printk(KERN_ERR "SELinux:  OOM in %s\n", __FUNCTION__);
+	printk(KERN_ERR "SELinux:  OOM in %s\n", __func__);
 	goto out;
 }
 
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
new file mode 100644
index 0000000..68ede3c
--- /dev/null
+++ b/security/selinux/netport.c
@@ -0,0 +1,286 @@
+/*
+ * Network port table
+ *
+ * SELinux must keep a mapping of network ports to labels/SIDs.  This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ *
+ * Author: Paul Moore <paul.moore@hp.com>
+ *
+ * This code is heavily based on the "netif" concept originally developed by
+ * James Morris <jmorris@redhat.com>
+ *   (see security/selinux/netif.c for more information)
+ *
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2008
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <asm/bug.h>
+
+#include "netport.h"
+#include "objsec.h"
+
+#define SEL_NETPORT_HASH_SIZE       256
+#define SEL_NETPORT_HASH_BKT_LIMIT   16
+
+struct sel_netport_bkt {
+	int size;
+	struct list_head list;
+};
+
+struct sel_netport {
+	struct netport_security_struct psec;
+
+	struct list_head list;
+	struct rcu_head rcu;
+};
+
+/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
+ * for this is that I suspect most users will not make heavy use of both
+ * address families at the same time so one table will usually end up wasted,
+ * if this becomes a problem we can always add a hash table for each address
+ * family later */
+
+static LIST_HEAD(sel_netport_list);
+static DEFINE_SPINLOCK(sel_netport_lock);
+static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE];
+
+/**
+ * sel_netport_free - Frees a port entry
+ * @p: the entry's RCU field
+ *
+ * Description:
+ * This function is designed to be used as a callback to the call_rcu()
+ * function so that memory allocated to a hash table port entry can be
+ * released safely.
+ *
+ */
+static void sel_netport_free(struct rcu_head *p)
+{
+	struct sel_netport *port = container_of(p, struct sel_netport, rcu);
+	kfree(port);
+}
+
+/**
+ * sel_netport_hashfn - Hashing function for the port table
+ * @pnum: port number
+ *
+ * Description:
+ * This is the hashing function for the port table, it returns the bucket
+ * number for the given port.
+ *
+ */
+static unsigned int sel_netport_hashfn(u16 pnum)
+{
+	return (pnum & (SEL_NETPORT_HASH_SIZE - 1));
+}
+
+/**
+ * sel_netport_find - Search for a port record
+ * @protocol: protocol
+ * @port: pnum
+ *
+ * Description:
+ * Search the network port table and return the matching record.  If an entry
+ * can not be found in the table return NULL.
+ *
+ */
+static struct sel_netport *sel_netport_find(u8 protocol, u16 pnum)
+{
+	unsigned int idx;
+	struct sel_netport *port;
+
+	idx = sel_netport_hashfn(pnum);
+	list_for_each_entry_rcu(port, &sel_netport_hash[idx].list, list)
+		if (port->psec.port == pnum &&
+		    port->psec.protocol == protocol)
+			return port;
+
+	return NULL;
+}
+
+/**
+ * sel_netport_insert - Insert a new port into the table
+ * @port: the new port record
+ *
+ * Description:
+ * Add a new port record to the network address hash table.  Returns zero on
+ * success, negative values on failure.
+ *
+ */
+static int sel_netport_insert(struct sel_netport *port)
+{
+	unsigned int idx;
+
+	/* we need to impose a limit on the growth of the hash table so check
+	 * this bucket to make sure it is within the specified bounds */
+	idx = sel_netport_hashfn(port->psec.port);
+	list_add_rcu(&port->list, &sel_netport_hash[idx].list);
+	if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
+		struct sel_netport *tail;
+		tail = list_entry(port->list.prev, struct sel_netport, list);
+		list_del_rcu(port->list.prev);
+		call_rcu(&tail->rcu, sel_netport_free);
+	} else
+		sel_netport_hash[idx].size++;
+
+	return 0;
+}
+
+/**
+ * sel_netport_sid_slow - Lookup the SID of a network address using the policy
+ * @protocol: protocol
+ * @pnum: port
+ * @sid: port SID
+ *
+ * Description:
+ * This function determines the SID of a network port by quering the security
+ * policy.  The result is added to the network port table to speedup future
+ * queries.  Returns zero on success, negative values on failure.
+ *
+ */
+static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
+{
+	int ret;
+	struct sel_netport *port;
+	struct sel_netport *new = NULL;
+
+	spin_lock_bh(&sel_netport_lock);
+	port = sel_netport_find(protocol, pnum);
+	if (port != NULL) {
+		*sid = port->psec.sid;
+		ret = 0;
+		goto out;
+	}
+	new = kzalloc(sizeof(*new), GFP_ATOMIC);
+	if (new == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ret = security_port_sid(protocol, pnum, &new->psec.sid);
+	if (ret != 0)
+		goto out;
+	new->psec.port = pnum;
+	new->psec.protocol = protocol;
+	ret = sel_netport_insert(new);
+	if (ret != 0)
+		goto out;
+	*sid = new->psec.sid;
+
+out:
+	spin_unlock_bh(&sel_netport_lock);
+	if (unlikely(ret)) {
+		printk(KERN_WARNING
+		       "SELinux: failure in sel_netport_sid_slow(),"
+		       " unable to determine network port label\n");
+		kfree(new);
+	}
+	return ret;
+}
+
+/**
+ * sel_netport_sid - Lookup the SID of a network port
+ * @protocol: protocol
+ * @pnum: port
+ * @sid: port SID
+ *
+ * Description:
+ * This function determines the SID of a network port using the fastest method
+ * possible.  First the port table is queried, but if an entry can't be found
+ * then the policy is queried and the result is added to the table to speedup
+ * future queries.  Returns zero on success, negative values on failure.
+ *
+ */
+int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid)
+{
+	struct sel_netport *port;
+
+	rcu_read_lock();
+	port = sel_netport_find(protocol, pnum);
+	if (port != NULL) {
+		*sid = port->psec.sid;
+		rcu_read_unlock();
+		return 0;
+	}
+	rcu_read_unlock();
+
+	return sel_netport_sid_slow(protocol, pnum, sid);
+}
+
+/**
+ * sel_netport_flush - Flush the entire network port table
+ *
+ * Description:
+ * Remove all entries from the network address table.
+ *
+ */
+static void sel_netport_flush(void)
+{
+	unsigned int idx;
+	struct sel_netport *port;
+
+	spin_lock_bh(&sel_netport_lock);
+	for (idx = 0; idx < SEL_NETPORT_HASH_SIZE; idx++) {
+		list_for_each_entry(port, &sel_netport_hash[idx].list, list) {
+			list_del_rcu(&port->list);
+			call_rcu(&port->rcu, sel_netport_free);
+		}
+		sel_netport_hash[idx].size = 0;
+	}
+	spin_unlock_bh(&sel_netport_lock);
+}
+
+static int sel_netport_avc_callback(u32 event, u32 ssid, u32 tsid,
+				    u16 class, u32 perms, u32 *retained)
+{
+	if (event == AVC_CALLBACK_RESET) {
+		sel_netport_flush();
+		synchronize_net();
+	}
+	return 0;
+}
+
+static __init int sel_netport_init(void)
+{
+	int iter;
+	int ret;
+
+	if (!selinux_enabled)
+		return 0;
+
+	for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) {
+		INIT_LIST_HEAD(&sel_netport_hash[iter].list);
+		sel_netport_hash[iter].size = 0;
+	}
+
+	ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET,
+	                       SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
+	if (ret != 0)
+		panic("avc_add_callback() failed, error %d\n", ret);
+
+	return ret;
+}
+
+__initcall(sel_netport_init);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 0341567..26fabad 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -42,7 +42,8 @@
 
 /* Policy capability filenames */
 static char *policycap_names[] = {
-	"network_peer_controls"
+	"network_peer_controls",
+	"open_perms"
 };
 
 unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
@@ -391,7 +392,7 @@
 
 	if (len > SIMPLE_TRANSACTION_LIMIT) {
 		printk(KERN_ERR "%s:  context size (%u) exceeds payload "
-		       "max\n", __FUNCTION__, len);
+		       "max\n", __func__, len);
 		length = -ERANGE;
 		goto out;
 	}
@@ -644,7 +645,7 @@
 
 	if (len > SIMPLE_TRANSACTION_LIMIT) {
 		printk(KERN_ERR "%s:  context size (%u) exceeds payload "
-		       "max\n", __FUNCTION__, len);
+		       "max\n", __func__, len);
 		length = -ERANGE;
 		goto out3;
 	}
@@ -821,7 +822,7 @@
 
 	if (len > SIMPLE_TRANSACTION_LIMIT) {
 		printk(KERN_ERR "%s:  context size (%u) exceeds payload "
-		       "max\n", __FUNCTION__, len);
+		       "max\n", __func__, len);
 		length = -ERANGE;
 		goto out3;
 	}
@@ -1760,7 +1761,7 @@
 out:
 	return ret;
 err:
-	printk(KERN_ERR "%s:  failed while creating inodes\n", __FUNCTION__);
+	printk(KERN_ERR "%s:  failed while creating inodes\n", __func__);
 	goto out;
 }
 
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index cd10e27..916e73a 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -280,8 +280,8 @@
 	h->nel = 0;
 	h->nslot = nslot;
 	h->mask = mask;
-	printk(KERN_DEBUG "SELinux:%d avtab hash slots allocated. "
-	       "Num of rules:%d\n", h->nslot, nrules);
+	printk(KERN_DEBUG "SELinux: %d avtab hash slots, %d rules.\n",
+	       h->nslot, nrules);
 	return 0;
 }
 
@@ -345,18 +345,18 @@
 	if (vers < POLICYDB_VERSION_AVTAB) {
 		rc = next_entry(buf32, fp, sizeof(u32));
 		if (rc < 0) {
-			printk(KERN_ERR "security: avtab: truncated entry\n");
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
 			return -1;
 		}
 		items2 = le32_to_cpu(buf32[0]);
 		if (items2 > ARRAY_SIZE(buf32)) {
-			printk(KERN_ERR "security: avtab: entry overflow\n");
+			printk(KERN_ERR "SELinux: avtab: entry overflow\n");
 			return -1;
 
 		}
 		rc = next_entry(buf32, fp, sizeof(u32)*items2);
 		if (rc < 0) {
-			printk(KERN_ERR "security: avtab: truncated entry\n");
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
 			return -1;
 		}
 		items = 0;
@@ -364,19 +364,19 @@
 		val = le32_to_cpu(buf32[items++]);
 		key.source_type = (u16)val;
 		if (key.source_type != val) {
-			printk("security: avtab: truncated source type\n");
+			printk("SELinux: avtab: truncated source type\n");
 			return -1;
 		}
 		val = le32_to_cpu(buf32[items++]);
 		key.target_type = (u16)val;
 		if (key.target_type != val) {
-			printk("security: avtab: truncated target type\n");
+			printk("SELinux: avtab: truncated target type\n");
 			return -1;
 		}
 		val = le32_to_cpu(buf32[items++]);
 		key.target_class = (u16)val;
 		if (key.target_class != val) {
-			printk("security: avtab: truncated target class\n");
+			printk("SELinux: avtab: truncated target class\n");
 			return -1;
 		}
 
@@ -384,12 +384,12 @@
 		enabled = (val & AVTAB_ENABLED_OLD) ? AVTAB_ENABLED : 0;
 
 		if (!(val & (AVTAB_AV | AVTAB_TYPE))) {
-			printk("security: avtab: null entry\n");
+			printk("SELinux: avtab: null entry\n");
 			return -1;
 		}
 		if ((val & AVTAB_AV) &&
 		    (val & AVTAB_TYPE)) {
-			printk("security: avtab: entry has both access vectors and types\n");
+			printk("SELinux: avtab: entry has both access vectors and types\n");
 			return -1;
 		}
 
@@ -403,7 +403,7 @@
 		}
 
 		if (items != items2) {
-			printk("security: avtab: entry only had %d items, expected %d\n", items2, items);
+			printk("SELinux: avtab: entry only had %d items, expected %d\n", items2, items);
 			return -1;
 		}
 		return 0;
@@ -411,7 +411,7 @@
 
 	rc = next_entry(buf16, fp, sizeof(u16)*4);
 	if (rc < 0) {
-		printk("security: avtab: truncated entry\n");
+		printk("SELinux: avtab: truncated entry\n");
 		return -1;
 	}
 
@@ -424,7 +424,7 @@
 	if (!policydb_type_isvalid(pol, key.source_type) ||
 	    !policydb_type_isvalid(pol, key.target_type) ||
 	    !policydb_class_isvalid(pol, key.target_class)) {
-		printk(KERN_WARNING "security: avtab: invalid type or class\n");
+		printk(KERN_WARNING "SELinux: avtab: invalid type or class\n");
 		return -1;
 	}
 
@@ -435,19 +435,19 @@
 	}
 	if (!set || set > 1) {
 		printk(KERN_WARNING
-			"security:  avtab:  more than one specifier\n");
+			"SELinux:  avtab:  more than one specifier\n");
 		return -1;
 	}
 
 	rc = next_entry(buf32, fp, sizeof(u32));
 	if (rc < 0) {
-		printk("security: avtab: truncated entry\n");
+		printk("SELinux: avtab: truncated entry\n");
 		return -1;
 	}
 	datum.data = le32_to_cpu(*buf32);
 	if ((key.specified & AVTAB_TYPE) &&
 	    !policydb_type_isvalid(pol, datum.data)) {
-		printk(KERN_WARNING "security: avtab: invalid type\n");
+		printk(KERN_WARNING "SELinux: avtab: invalid type\n");
 		return -1;
 	}
 	return insertf(a, &key, &datum, p);
@@ -468,12 +468,12 @@
 
 	rc = next_entry(buf, fp, sizeof(u32));
 	if (rc < 0) {
-		printk(KERN_ERR "security: avtab: truncated table\n");
+		printk(KERN_ERR "SELinux: avtab: truncated table\n");
 		goto bad;
 	}
 	nel = le32_to_cpu(buf[0]);
 	if (!nel) {
-		printk(KERN_ERR "security: avtab: table is empty\n");
+		printk(KERN_ERR "SELinux: avtab: table is empty\n");
 		rc = -EINVAL;
 		goto bad;
 	}
@@ -486,9 +486,9 @@
 		rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL);
 		if (rc) {
 			if (rc == -ENOMEM)
-				printk(KERN_ERR "security: avtab: out of memory\n");
+				printk(KERN_ERR "SELinux: avtab: out of memory\n");
 			else if (rc == -EEXIST)
-				printk(KERN_ERR "security: avtab: duplicate entry\n");
+				printk(KERN_ERR "SELinux: avtab: duplicate entry\n");
 			else
 				rc = -EINVAL;
 			goto bad;
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 50ad85d..a996cf1 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -96,7 +96,7 @@
 	if (new_state != node->cur_state) {
 		node->cur_state = new_state;
 		if (new_state == -1)
-			printk(KERN_ERR "security: expression result was undefined - disabling all rules.\n");
+			printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n");
 		/* turn the rules on or off */
 		for (cur = node->true_list; cur != NULL; cur = cur->next) {
 			if (new_state <= 0) {
@@ -276,7 +276,7 @@
 	 */
 	if (k->specified & AVTAB_TYPE) {
 		if (avtab_search(&p->te_avtab, k)) {
-			printk("security: type rule already exists outside of a conditional.");
+			printk("SELinux: type rule already exists outside of a conditional.");
 			goto err;
 		}
 		/*
@@ -291,7 +291,7 @@
 			node_ptr = avtab_search_node(&p->te_cond_avtab, k);
 			if (node_ptr) {
 				if (avtab_search_node_next(node_ptr, k->specified)) {
-					printk("security: too many conflicting type rules.");
+					printk("SELinux: too many conflicting type rules.");
 					goto err;
 				}
 				found = 0;
@@ -302,13 +302,13 @@
 					}
 				}
 				if (!found) {
-					printk("security: conflicting type rules.\n");
+					printk("SELinux: conflicting type rules.\n");
 					goto err;
 				}
 			}
 		} else {
 			if (avtab_search(&p->te_cond_avtab, k)) {
-				printk("security: conflicting type rules when adding type rule for true.\n");
+				printk("SELinux: conflicting type rules when adding type rule for true.\n");
 				goto err;
 			}
 		}
@@ -316,7 +316,7 @@
 
 	node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
 	if (!node_ptr) {
-		printk("security: could not insert rule.");
+		printk("SELinux: could not insert rule.");
 		goto err;
 	}
 
@@ -376,12 +376,12 @@
 static int expr_isvalid(struct policydb *p, struct cond_expr *expr)
 {
 	if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) {
-		printk("security: conditional expressions uses unknown operator.\n");
+		printk("SELinux: conditional expressions uses unknown operator.\n");
 		return 0;
 	}
 
 	if (expr->bool > p->p_bools.nprim) {
-		printk("security: conditional expressions uses unknown bool.\n");
+		printk("SELinux: conditional expressions uses unknown bool.\n");
 		return 0;
 	}
 	return 1;
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 920b5e3..e499af4 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -364,7 +364,7 @@
 	count = le32_to_cpu(buf[2]);
 
 	if (mapunit != sizeof(u64) * 8) {
-		printk(KERN_ERR "security: ebitmap: map size %u does not "
+		printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
 		       "match my size %Zd (high bit was %d)\n",
 		       mapunit, sizeof(u64) * 8, e->highbit);
 		goto bad;
@@ -382,19 +382,19 @@
 	for (i = 0; i < count; i++) {
 		rc = next_entry(&startbit, fp, sizeof(u32));
 		if (rc < 0) {
-			printk(KERN_ERR "security: ebitmap: truncated map\n");
+			printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
 			goto bad;
 		}
 		startbit = le32_to_cpu(startbit);
 
 		if (startbit & (mapunit - 1)) {
-			printk(KERN_ERR "security: ebitmap start bit (%d) is "
+			printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
 			       "not a multiple of the map unit size (%u)\n",
 			       startbit, mapunit);
 			goto bad;
 		}
 		if (startbit > e->highbit - mapunit) {
-			printk(KERN_ERR "security: ebitmap start bit (%d) is "
+			printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
 			       "beyond the end of the bitmap (%u)\n",
 			       startbit, (e->highbit - mapunit));
 			goto bad;
@@ -405,7 +405,7 @@
 			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
 			if (!tmp) {
 				printk(KERN_ERR
-				       "security: ebitmap: out of memory\n");
+				       "SELinux: ebitmap: out of memory\n");
 				rc = -ENOMEM;
 				goto bad;
 			}
@@ -418,7 +418,7 @@
 			}
 			n = tmp;
 		} else if (startbit <= n->startbit) {
-			printk(KERN_ERR "security: ebitmap: start bit %d"
+			printk(KERN_ERR "SELinux: ebitmap: start bit %d"
 			       " comes after start bit %d\n",
 			       startbit, n->startbit);
 			goto bad;
@@ -426,7 +426,7 @@
 
 		rc = next_entry(&map, fp, sizeof(u64));
 		if (rc < 0) {
-			printk(KERN_ERR "security: ebitmap: truncated map\n");
+			printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
 			goto bad;
 		}
 		map = le64_to_cpu(map);
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index bd7d6a0..6bdb0ff 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -111,6 +111,11 @@
 		.version	= POLICYDB_VERSION_POLCAP,
 		.sym_num	= SYM_NUM,
 		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_PERMISSIVE,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
 	}
 };
 
@@ -194,6 +199,7 @@
 		goto out_free_symtab;
 
 	ebitmap_init(&p->policycaps);
+	ebitmap_init(&p->permissive_map);
 
 out:
 	return rc;
@@ -401,14 +407,14 @@
 {
 	int i, rc = 0;
 
-	printk(KERN_DEBUG "security:  %d users, %d roles, %d types, %d bools",
+	printk(KERN_DEBUG "SELinux:  %d users, %d roles, %d types, %d bools",
 	       p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
 	if (selinux_mls_enabled)
 		printk(", %d sens, %d cats", p->p_levels.nprim,
 		       p->p_cats.nprim);
 	printk("\n");
 
-	printk(KERN_DEBUG "security:  %d classes, %d rules\n",
+	printk(KERN_DEBUG "SELinux:  %d classes, %d rules\n",
 	       p->p_classes.nprim, p->te_avtab.nel);
 
 #ifdef DEBUG_HASHES
@@ -687,6 +693,7 @@
 	kfree(p->type_attr_map);
 	kfree(p->undefined_perms);
 	ebitmap_destroy(&p->policycaps);
+	ebitmap_destroy(&p->permissive_map);
 
 	return;
 }
@@ -702,20 +709,20 @@
 
 	rc = sidtab_init(s);
 	if (rc) {
-		printk(KERN_ERR "security:  out of memory on SID table init\n");
+		printk(KERN_ERR "SELinux:  out of memory on SID table init\n");
 		goto out;
 	}
 
 	head = p->ocontexts[OCON_ISID];
 	for (c = head; c; c = c->next) {
 		if (!c->context[0].user) {
-			printk(KERN_ERR "security:  SID %s was never "
+			printk(KERN_ERR "SELinux:  SID %s was never "
 			       "defined.\n", c->u.name);
 			rc = -EINVAL;
 			goto out;
 		}
 		if (sidtab_insert(s, c->sid[0], &c->context[0])) {
-			printk(KERN_ERR "security:  unable to load initial "
+			printk(KERN_ERR "SELinux:  unable to load initial "
 			       "SID %s.\n", c->u.name);
 			rc = -EINVAL;
 			goto out;
@@ -809,13 +816,13 @@
 
 	items = le32_to_cpu(buf[0]);
 	if (items > ARRAY_SIZE(buf)) {
-		printk(KERN_ERR "security: mls:  range overflow\n");
+		printk(KERN_ERR "SELinux: mls:  range overflow\n");
 		rc = -EINVAL;
 		goto out;
 	}
 	rc = next_entry(buf, fp, sizeof(u32) * items);
 	if (rc < 0) {
-		printk(KERN_ERR "security: mls:  truncated range\n");
+		printk(KERN_ERR "SELinux: mls:  truncated range\n");
 		goto out;
 	}
 	r->level[0].sens = le32_to_cpu(buf[0]);
@@ -826,21 +833,21 @@
 
 	rc = ebitmap_read(&r->level[0].cat, fp);
 	if (rc) {
-		printk(KERN_ERR "security: mls:  error reading low "
+		printk(KERN_ERR "SELinux: mls:  error reading low "
 		       "categories\n");
 		goto out;
 	}
 	if (items > 1) {
 		rc = ebitmap_read(&r->level[1].cat, fp);
 		if (rc) {
-			printk(KERN_ERR "security: mls:  error reading high "
+			printk(KERN_ERR "SELinux: mls:  error reading high "
 			       "categories\n");
 			goto bad_high;
 		}
 	} else {
 		rc = ebitmap_cpy(&r->level[1].cat, &r->level[0].cat);
 		if (rc) {
-			printk(KERN_ERR "security: mls:  out of memory\n");
+			printk(KERN_ERR "SELinux: mls:  out of memory\n");
 			goto bad_high;
 		}
 	}
@@ -866,7 +873,7 @@
 
 	rc = next_entry(buf, fp, sizeof buf);
 	if (rc < 0) {
-		printk(KERN_ERR "security: context truncated\n");
+		printk(KERN_ERR "SELinux: context truncated\n");
 		goto out;
 	}
 	c->user = le32_to_cpu(buf[0]);
@@ -874,7 +881,7 @@
 	c->type = le32_to_cpu(buf[2]);
 	if (p->policyvers >= POLICYDB_VERSION_MLS) {
 		if (mls_read_range_helper(&c->range, fp)) {
-			printk(KERN_ERR "security: error reading MLS range of "
+			printk(KERN_ERR "SELinux: error reading MLS range of "
 			       "context\n");
 			rc = -EINVAL;
 			goto out;
@@ -882,7 +889,7 @@
 	}
 
 	if (!policydb_context_isvalid(p, c)) {
-		printk(KERN_ERR "security:  invalid security context\n");
+		printk(KERN_ERR "SELinux:  invalid security context\n");
 		context_destroy(c);
 		rc = -EINVAL;
 	}
@@ -1128,7 +1135,7 @@
 		cladatum->comdatum = hashtab_search(p->p_commons.table,
 						    cladatum->comkey);
 		if (!cladatum->comdatum) {
-			printk(KERN_ERR "security:  unknown common %s\n",
+			printk(KERN_ERR "SELinux:  unknown common %s\n",
 			       cladatum->comkey);
 			rc = -EINVAL;
 			goto bad;
@@ -1283,13 +1290,13 @@
 
 	rc = next_entry(buf, fp, sizeof buf);
 	if (rc < 0) {
-		printk(KERN_ERR "security: mls: truncated level\n");
+		printk(KERN_ERR "SELinux: mls: truncated level\n");
 		goto bad;
 	}
 	lp->sens = le32_to_cpu(buf[0]);
 
 	if (ebitmap_read(&lp->cat, fp)) {
-		printk(KERN_ERR "security: mls:  error reading level "
+		printk(KERN_ERR "SELinux: mls:  error reading level "
 		       "categories\n");
 		goto bad;
 	}
@@ -1491,7 +1498,7 @@
 		goto bad;
 
 	if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) {
-		printk(KERN_ERR "security:  policydb magic number 0x%x does "
+		printk(KERN_ERR "SELinux:  policydb magic number 0x%x does "
 		       "not match expected magic number 0x%x\n",
 		       le32_to_cpu(buf[0]), POLICYDB_MAGIC);
 		goto bad;
@@ -1499,27 +1506,27 @@
 
 	len = le32_to_cpu(buf[1]);
 	if (len != strlen(POLICYDB_STRING)) {
-		printk(KERN_ERR "security:  policydb string length %d does not "
+		printk(KERN_ERR "SELinux:  policydb string length %d does not "
 		       "match expected length %Zu\n",
 		       len, strlen(POLICYDB_STRING));
 		goto bad;
 	}
 	policydb_str = kmalloc(len + 1,GFP_KERNEL);
 	if (!policydb_str) {
-		printk(KERN_ERR "security:  unable to allocate memory for policydb "
+		printk(KERN_ERR "SELinux:  unable to allocate memory for policydb "
 		       "string of length %d\n", len);
 		rc = -ENOMEM;
 		goto bad;
 	}
 	rc = next_entry(policydb_str, fp, len);
 	if (rc < 0) {
-		printk(KERN_ERR "security:  truncated policydb string identifier\n");
+		printk(KERN_ERR "SELinux:  truncated policydb string identifier\n");
 		kfree(policydb_str);
 		goto bad;
 	}
 	policydb_str[len] = 0;
 	if (strcmp(policydb_str, POLICYDB_STRING)) {
-		printk(KERN_ERR "security:  policydb string %s does not match "
+		printk(KERN_ERR "SELinux:  policydb string %s does not match "
 		       "my string %s\n", policydb_str, POLICYDB_STRING);
 		kfree(policydb_str);
 		goto bad;
@@ -1536,7 +1543,7 @@
 	p->policyvers = le32_to_cpu(buf[0]);
 	if (p->policyvers < POLICYDB_VERSION_MIN ||
 	    p->policyvers > POLICYDB_VERSION_MAX) {
-	    	printk(KERN_ERR "security:  policydb version %d does not match "
+		printk(KERN_ERR "SELinux:  policydb version %d does not match "
 	    	       "my version range %d-%d\n",
 	    	       le32_to_cpu(buf[0]), POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX);
 	    	goto bad;
@@ -1570,16 +1577,20 @@
 	    ebitmap_read(&p->policycaps, fp) != 0)
 		goto bad;
 
+	if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE &&
+	    ebitmap_read(&p->permissive_map, fp) != 0)
+		goto bad;
+
 	info = policydb_lookup_compat(p->policyvers);
 	if (!info) {
-		printk(KERN_ERR "security:  unable to find policy compat info "
+		printk(KERN_ERR "SELinux:  unable to find policy compat info "
 		       "for version %d\n", p->policyvers);
 		goto bad;
 	}
 
 	if (le32_to_cpu(buf[2]) != info->sym_num ||
 		le32_to_cpu(buf[3]) != info->ocon_num) {
-		printk(KERN_ERR "security:  policydb table sizes (%d,%d) do "
+		printk(KERN_ERR "SELinux:  policydb table sizes (%d,%d) do "
 		       "not match mine (%d,%d)\n", le32_to_cpu(buf[2]),
 			le32_to_cpu(buf[3]),
 		       info->sym_num, info->ocon_num);
@@ -1823,7 +1834,7 @@
 		for (genfs_p = NULL, genfs = p->genfs; genfs;
 		     genfs_p = genfs, genfs = genfs->next) {
 			if (strcmp(newgenfs->fstype, genfs->fstype) == 0) {
-				printk(KERN_ERR "security:  dup genfs "
+				printk(KERN_ERR "SELinux:  dup genfs "
 				       "fstype %s\n", newgenfs->fstype);
 				kfree(newgenfs->fstype);
 				kfree(newgenfs);
@@ -1873,7 +1884,7 @@
 				if (!strcmp(newc->u.name, c->u.name) &&
 				    (!c->v.sclass || !newc->v.sclass ||
 				     newc->v.sclass == c->v.sclass)) {
-					printk(KERN_ERR "security:  dup genfs "
+					printk(KERN_ERR "SELinux:  dup genfs "
 					       "entry (%s,%s)\n",
 					       newgenfs->fstype, c->u.name);
 					goto bad_newc;
@@ -1931,7 +1942,7 @@
 			if (rc)
 				goto bad;
 			if (!mls_range_isvalid(p, &rt->target_range)) {
-				printk(KERN_WARNING "security:  rangetrans:  invalid range\n");
+				printk(KERN_WARNING "SELinux:  rangetrans:  invalid range\n");
 				goto bad;
 			}
 			lrt = rt;
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index c4ce996..ba593a3 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -243,6 +243,8 @@
 
 	struct ebitmap policycaps;
 
+	struct ebitmap permissive_map;
+
 	unsigned int policyvers;
 
 	unsigned int reject_unknown : 1;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 5fd54f2..33425b1 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -40,6 +40,7 @@
 #include <linux/sched.h>
 #include <linux/audit.h>
 #include <linux/mutex.h>
+#include <linux/selinux.h>
 #include <net/netlabel.h>
 
 #include "flask.h"
@@ -61,6 +62,7 @@
 unsigned int policydb_loaded_version;
 
 int selinux_policycap_netpeer;
+int selinux_policycap_openperm;
 
 /*
  * This is declared in avc.c
@@ -412,10 +414,35 @@
 	return 0;
 
 inval_class:
-	printk(KERN_ERR "%s:  unrecognized class %d\n", __FUNCTION__, tclass);
+	printk(KERN_ERR "%s:  unrecognized class %d\n", __func__, tclass);
 	return -EINVAL;
 }
 
+/*
+ * Given a sid find if the type has the permissive flag set
+ */
+int security_permissive_sid(u32 sid)
+{
+	struct context *context;
+	u32 type;
+	int rc;
+
+	POLICY_RDLOCK;
+
+	context = sidtab_search(&sidtab, sid);
+	BUG_ON(!context);
+
+	type = context->type;
+	/*
+	 * we are intentionally using type here, not type-1, the 0th bit may
+	 * someday indicate that we are globally setting permissive in policy.
+	 */
+	rc = ebitmap_get_bit(&policydb.permissive_map, type);
+
+	POLICY_RDUNLOCK;
+	return rc;
+}
+
 static int security_validtrans_handle_fail(struct context *ocontext,
                                            struct context *ncontext,
                                            struct context *tcontext,
@@ -1096,7 +1123,7 @@
 			continue;
 		if (i > p->p_classes.nprim) {
 			printk(KERN_INFO
-			       "security:  class %s not defined in policy\n",
+			       "SELinux:  class %s not defined in policy\n",
 			       def_class);
 			if (p->reject_unknown)
 				return -EINVAL;
@@ -1107,7 +1134,7 @@
 		pol_class = p->p_class_val_to_name[i-1];
 		if (strcmp(pol_class, def_class)) {
 			printk(KERN_ERR
-			       "security:  class %d is incorrect, found %s but should be %s\n",
+			       "SELinux:  class %d is incorrect, found %s but should be %s\n",
 			       i, pol_class, def_class);
 			return -EINVAL;
 		}
@@ -1125,7 +1152,7 @@
 		nprim = 1 << (perms->nprim - 1);
 		if (perm_val > nprim) {
 			printk(KERN_INFO
-			       "security:  permission %s in class %s not defined in policy\n",
+			       "SELinux:  permission %s in class %s not defined in policy\n",
 			       def_perm, pol_class);
 			if (p->reject_unknown)
 				return -EINVAL;
@@ -1136,14 +1163,14 @@
 		perdatum = hashtab_search(perms->table, def_perm);
 		if (perdatum == NULL) {
 			printk(KERN_ERR
-			       "security:  permission %s in class %s not found in policy, bad policy\n",
+			       "SELinux:  permission %s in class %s not found in policy, bad policy\n",
 			       def_perm, pol_class);
 			return -EINVAL;
 		}
 		pol_val = 1 << (perdatum->value - 1);
 		if (pol_val != perm_val) {
 			printk(KERN_ERR
-			       "security:  permission %s in class %s has incorrect value\n",
+			       "SELinux:  permission %s in class %s has incorrect value\n",
 			       def_perm, pol_class);
 			return -EINVAL;
 		}
@@ -1157,7 +1184,7 @@
 		BUG_ON(!cladatum);
 		if (!cladatum->comdatum) {
 			printk(KERN_ERR
-			       "security:  class %s should have an inherits clause but does not\n",
+			       "SELinux:  class %s should have an inherits clause but does not\n",
 			       pol_class);
 			return -EINVAL;
 		}
@@ -1172,7 +1199,7 @@
 			def_perm = kdefs->av_inherit[i].common_pts[j];
 			if (j >= perms->nprim) {
 				printk(KERN_INFO
-				       "security:  permission %s in class %s not defined in policy\n",
+				       "SELinux:  permission %s in class %s not defined in policy\n",
 				       def_perm, pol_class);
 				if (p->reject_unknown)
 					return -EINVAL;
@@ -1183,13 +1210,13 @@
 			perdatum = hashtab_search(perms->table, def_perm);
 			if (perdatum == NULL) {
 				printk(KERN_ERR
-				       "security:  permission %s in class %s not found in policy, bad policy\n",
+				       "SELinux:  permission %s in class %s not found in policy, bad policy\n",
 				       def_perm, pol_class);
 				return -EINVAL;
 			}
 			if (perdatum->value != j + 1) {
 				printk(KERN_ERR
-				       "security:  permission %s in class %s has incorrect value\n",
+				       "SELinux:  permission %s in class %s has incorrect value\n",
 				       def_perm, pol_class);
 				return -EINVAL;
 			}
@@ -1219,7 +1246,7 @@
 		u32 len;
 
 		context_struct_to_string(context, &s, &len);
-		printk(KERN_ERR "security:  context %s is invalid\n", s);
+		printk(KERN_ERR "SELinux:  context %s is invalid\n", s);
 		kfree(s);
 	}
 	return rc;
@@ -1299,7 +1326,7 @@
 bad:
 	context_struct_to_string(&oldc, &s, &len);
 	context_destroy(&oldc);
-	printk(KERN_ERR "security:  invalidating context %s\n", s);
+	printk(KERN_ERR "SELinux:  invalidating context %s\n", s);
 	kfree(s);
 	goto out;
 }
@@ -1308,6 +1335,8 @@
 {
 	selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps,
 						  POLICYDB_CAPABILITY_NETPEER);
+	selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
+						  POLICYDB_CAPABILITY_OPENPERM);
 }
 
 extern void selinux_complete_init(void);
@@ -1350,7 +1379,7 @@
 		/* Verify that the kernel defined classes are correct. */
 		if (validate_classes(&policydb)) {
 			printk(KERN_ERR
-			       "security:  the definition of a class is incorrect\n");
+			       "SELinux:  the definition of a class is incorrect\n");
 			LOAD_UNLOCK;
 			sidtab_destroy(&sidtab);
 			policydb_destroy(&policydb);
@@ -1384,14 +1413,14 @@
 	/* Verify that the kernel defined classes are correct. */
 	if (validate_classes(&newpolicydb)) {
 		printk(KERN_ERR
-		       "security:  the definition of a class is incorrect\n");
+		       "SELinux:  the definition of a class is incorrect\n");
 		rc = -EINVAL;
 		goto err;
 	}
 
 	rc = security_preserve_bools(&newpolicydb);
 	if (rc) {
-		printk(KERN_ERR "security:  unable to preserve booleans\n");
+		printk(KERN_ERR "SELinux:  unable to preserve booleans\n");
 		goto err;
 	}
 
@@ -1443,17 +1472,11 @@
 
 /**
  * security_port_sid - Obtain the SID for a port.
- * @domain: communication domain aka address family
- * @type: socket type
  * @protocol: protocol number
  * @port: port number
  * @out_sid: security identifier
  */
-int security_port_sid(u16 domain,
-		      u16 type,
-		      u8 protocol,
-		      u16 port,
-		      u32 *out_sid)
+int security_port_sid(u8 protocol, u16 port, u32 *out_sid)
 {
 	struct ocontext *c;
 	int rc = 0;
@@ -2203,7 +2226,7 @@
 	match = hashtab_search(policydb.p_classes.table, class);
 	if (!match) {
 		printk(KERN_ERR "%s:  unrecognized class %s\n",
-			__FUNCTION__, class);
+			__func__, class);
 		rc = -EINVAL;
 		goto out;
 	}